diff --git a/data/css/style.css b/data/css/style.css index adf2bdeb..6c80315c 100755 --- a/data/css/style.css +++ b/data/css/style.css @@ -246,7 +246,6 @@ table#searchresults_table td#comicyear { vertical-align: middle; text-align: lef table#searchresults_table td#issues { vertical-align: middle; text-align: center; min-width: 50px; } div.progress-container { border: 1px solid #ccc; width: 100px; height: 14px; margin: 2px 5px 2px 0; padding: 1px; float: left; background: white; } -div.progress-container > div { background-color: #a3e532; height: 14px; } .havetracks { font-size: 13px; margin-left: 36px; padding-bottom: 3px; vertical-align: middle; } footer { margin: 20px auto 20px auto; } diff --git a/data/interfaces/default/base.html b/data/interfaces/default/base.html index bc8b2ced..5149f6f6 100755 --- a/data/interfaces/default/base.html +++ b/data/interfaces/default/base.html @@ -86,6 +86,10 @@ (${version.MYLAR_VERSION}) %endif +
+@mylarcomics + +
Back to top diff --git a/data/interfaces/default/css/style.css b/data/interfaces/default/css/style.css index a8ea6038..511587ed 100755 --- a/data/interfaces/default/css/style.css +++ b/data/interfaces/default/css/style.css @@ -879,6 +879,38 @@ div#artistheader h2 a { font-weight: bold; font-family: "Trebuchet MS", Helvetica, Arial, sans-serif; } +#weekly_pull th#publisher { + min-width: 150px; + text-align: left; +} +#weekly_pull th#comicname { + min-width: 250px; + text-align: left; +} +#weekly_pull th#comicnumber, +#weekly_pull th#status, +#weekly_pull th#series { + min-width: 50px; + text-align: center; +} +#weekly_pull td#comicname { + min-width: 275px; + text-align: left; + vertical-align: middle; + font-size: 12px; +} +#weekly_pull td#status, +#weekly_pull td#series, +#weekly_pull td#comicnumber { + min-width: 50px; + text-align: left; + vertical-align: middle; +} +#weekly_pull td#publisher { + min-width: 150px; + text-align: left; + vertical-align: middle; +} #manage_comic th#name { min-width: 275px; text-align: left; diff --git a/data/interfaces/default/idirectory.html b/data/interfaces/default/idirectory.html new file mode 100755 index 00000000..be14e9fd --- /dev/null +++ b/data/interfaces/default/idirectory.html @@ -0,0 +1,85 @@ +<%inherit file="base.html" /> +<%! + import mylar + from mylar.helpers import checked +%> +<%def name="headerIncludes()"> +
+
+ Manage Comics +
+
+ + +<%def name="body()"> +
+

Manage

+
+
+ +
+
+
+ Scan Comic Library +

Where do you keep your comics?

+

You can put in any directory, and it will scan for comic files in that folder + (including all subdirectories).
For example: '/Users/name/Comics'

+

+ It may take a while depending on how many files you have. You can navigate away from the page
+ as soon as you click 'Save changes' +

+
+

THIS IS CURRENTLY DISABLED UNTIL WORKING..

+
+ + %if mylar.COMIC_DIR: + + %else: + + %endif +
+
+ +
+
+ +
+
+ + Leaving this unchecked will not move anything, but will mark the issues as Archived +
+
+
+ + Rename files to configuration settings +
+
+
+ + +
+
+
+ +
+ +<%def name="javascriptIncludes()"> + + diff --git a/data/interfaces/default/importresults.html b/data/interfaces/default/importresults.html new file mode 100644 index 00000000..352a59fc --- /dev/null +++ b/data/interfaces/default/importresults.html @@ -0,0 +1,77 @@ +<%inherit file="base.html" /> <%! + import mylar + from mylar.helpers import checked +%> +<%def name="headerIncludes()"> +
+
+ Some Button +
+
+ + +<%def name="body()"> +
+

Borg Importing Results

+
+
+ +
+ + + + + + +
+
+
+
+
+ + + + + + + + + + + + %if results: + %for result in results: + + + + + + + + %endfor + %else: + + + + %endif + + + +
Comic NameYearStatusImport DateOptions
${result['ComicName']}${result['ComicYear']}${result['Status']}${result['ImportDate']}Add Series
There are no results to display
+
+
+ +<%def name="javascriptIncludes()"> + + + diff --git a/data/interfaces/default/searchfix.html b/data/interfaces/default/searchfix.html index bda98a09..db6aafa5 100755 --- a/data/interfaces/default/searchfix.html +++ b/data/interfaces/default/searchfix.html @@ -97,7 +97,6 @@ function initThisPage() { jQuery( "#tabs" ).tabs(); initActions(); - initConfigCheckbox("#addtocsv"); }; $(document).ready(function() { initThisPage(); diff --git a/data/interfaces/default/weeklypull.html b/data/interfaces/default/weeklypull.html index b29013a8..2a21f683 100755 --- a/data/interfaces/default/weeklypull.html +++ b/data/interfaces/default/weeklypull.html @@ -8,11 +8,6 @@
Refresh Pull-list - %if pullfilter is False: - Filter Non-Comics - %else: - Show All Comics - %endif
« Back to overview @@ -23,30 +18,15 @@

Weekly Pull list for : ${pulldate}

-
- -
Mark selected issues as - - -
+
+ - - %if pullfilter is False: - - %endif - @@ -64,60 +44,21 @@ %if pullfilter is True: %if str(weekly['ISSUE']).isdigit() > 0: - - - - - + + + - %elif (weekly['STATUS'] == 'Wanted'): - [skip] - %else: - [retry][new] + add series %endif %endif - %elif pullfilter is False: - - - - %if str(weekly['ISSUE']).isdigit() > 0: - - %else: - - %endif - %if str(weekly['ISSUE']).isdigit() > 0: - - %else: - - %endif - %if str(weekly['ISSUE']).isdigit() > 0: - - %elif (weekly['STATUS'] == 'Wanted'): - [skip] - %else: - [retry][new] - %endif - %else: - %endfor
Publisher COMIC NumberTypeStatusSeries
${weekly['PUBLISHER']}${weekly['COMIC']} - ${weekly['ISSUE']}${weekly['STATUS']} + ${weekly['PUBLISHER']}${weekly['COMIC']}${weekly['ISSUE']}${weekly['STATUS']} %if weekly['STATUS'] == 'Skipped': - [want] - add series${weekly['PUBLISHER']}${weekly['COMIC']}${weekly['ISSUE']} ${weekly['ISSUE']}${weekly['STATUS']} - %if weekly['STATUS'] == 'Skipped': - [want] - add series${weekly['STATUS']} - %if weekly['STATUS'] == 'Skipped': - [want] - %elif (weekly['STATUS'] == 'Wanted'): - [skip] - %endif - %endif %endif
- +
<%def name="headIncludes()"> @@ -129,42 +70,34 @@ + $(document).ready(function() { + initThisPage(); + }); + diff --git a/mylar/__init__.py b/mylar/__init__.py index 4b3671fa..db86e426 100755 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -87,6 +87,7 @@ ADD_COMICS = False COMIC_DIR = None LIBRARYSCAN = False IMP_MOVE = False +IMP_RENAME = False SEARCH_INTERVAL = 360 NZB_STARTUP_SEARCH = False @@ -216,7 +217,7 @@ def initialize(): HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, LAUNCH_BROWSER, GIT_PATH, \ CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, MUSIC_DIR, DESTINATION_DIR, \ DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, \ - LIBRARYSCAN, LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, \ + LIBRARYSCAN, LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, \ NZBSU, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, NZBX,\ NEWZNAB, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_ENABLED, EXTRA_NEWZNABS,\ RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \ @@ -269,6 +270,7 @@ def initialize(): ADD_COMICS = bool(check_setting_int(CFG, 'General', 'add_comics', 0)) COMIC_DIR = check_setting_str(CFG, 'General', 'comic_dir', '') IMP_MOVE = bool(check_setting_int(CFG, 'General', 'imp_move', 0)) + IMP_RENAME = bool(check_setting_int(CFG, 'General', 'imp_rename', 0)) DOWNLOAD_SCAN_INTERVAL = check_setting_int(CFG, 'General', 'download_scan_interval', 5) INTERFACE = check_setting_str(CFG, 'General', 'interface', 'default') AUTOWANT_ALL = bool(check_setting_int(CFG, 'General', 'autowant_all', 0)) @@ -520,6 +522,7 @@ def config_write(): new_config['General']['add_comics'] = int(ADD_COMICS) new_config['General']['comic_dir'] = COMIC_DIR new_config['General']['imp_move'] = int(IMP_MOVE) + new_config['General']['imp_rename'] = int(IMP_RENAME) new_config['General']['download_scan_interval'] = DOWNLOAD_SCAN_INTERVAL new_config['General']['interface'] = INTERFACE new_config['General']['autowant_all'] = int(AUTOWANT_ALL) @@ -639,7 +642,7 @@ def dbcheck(): c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT)') c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE text, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text)') # c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)') - + c.execute('CREATE TABLE IF NOT EXISTS importresults (ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT)') conn.commit c.close #new diff --git a/mylar/helpers.py b/mylar/helpers.py index 6443fad4..b0ca8ac1 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -183,3 +183,132 @@ def decimal_issue(iss): issdec = int(iss_decval.rstrip('0')) * 10 deciss = (int(iss_b4dec) * 1000) + issdec return deciss + +def rename_param(comicid, comicname, comicyear, issue, issueid=None): + myDB = db.DBConnection() + if issueid is None: + chkissue = myDB.action("SELECT * from issues WHERE ComicID=? AND Issue_Number=?", [comicid, issue]).fetchone() + if chkissue is None: + logger.error("Invalid Issue_Number - please validate.") + return + else: + issueid = chkissue['IssueID'] + + #use issueid to get publisher, series, year, issue number + issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone() + #comicid = issuenzb['ComicID'] + issuenum = issuenzb['Issue_Number'] + #issueno = str(issuenum).split('.')[0] + + iss_find = issuenum.find('.') + iss_b4dec = issuenum[:iss_find] + iss_decval = issuenum[iss_find+1:] + if int(iss_decval) == 0: + iss = iss_b4dec + issdec = int(iss_decval) + issueno = str(iss) + logger.fdebug("Issue Number: " + str(issueno)) + else: + if len(iss_decval) == 1: + iss = iss_b4dec + "." + iss_decval + issdec = int(iss_decval) * 10 + else: + iss = iss_b4dec + "." + iss_decval.rstrip('0') + issdec = int(iss_decval.rstrip('0')) * 10 + issueno = iss_b4dec + logger.fdebug("Issue Number: " + str(iss)) + + # issue zero-suppression here + if mylar.ZERO_LEVEL == "0": + zeroadd = "" + else: + if mylar.ZERO_LEVEL_N == "none": zeroadd = "" + elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0" + elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00" + + logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N)) + + if str(len(issueno)) > 1: + if int(issueno) < 10: + logger.fdebug("issue detected less than 10") + if int(iss_decval) > 0: + issueno = str(iss) + prettycomiss = str(zeroadd) + str(iss) + else: + prettycomiss = str(zeroadd) + str(int(issueno)) + logger.fdebug("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss)) + elif int(issueno) >= 10 and int(issueno) < 100: + logger.fdebug("issue detected greater than 10, but less than 100") + if mylar.ZERO_LEVEL_N == "none": + zeroadd = "" + else: + zeroadd = "0" + if int(iss_decval) > 0: + issueno = str(iss) + prettycomiss = str(zeroadd) + str(iss) + else: + prettycomiss = str(zeroadd) + str(int(issueno)) + logger.fdebug("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss)) + else: + logger.fdebug("issue detected greater than 100") + if int(iss_decval) > 0: + issueno = str(iss) + prettycomiss = str(issueno) + logger.fdebug("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss)) + else: + prettycomiss = str(issueno) + logger.fdebug("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss)) + + logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss)) + issueyear = issuenzb['IssueDate'][:4] + logger.fdebug("Issue Year : " + str(issueyear)) + comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() + publisher = comicnzb['ComicPublisher'] + logger.fdebug("Publisher: " + str(publisher)) + series = comicnzb['ComicName'] + logger.fdebug("Series: " + str(series)) + seriesyear = comicnzb['ComicYear'] + logger.fdebug("Year: " + str(seriesyear)) + comlocation = comicnzb['ComicLocation'] + logger.fdebug("Comic Location: " + str(comlocation)) + + file_values = {'$Series': series, + '$Issue': prettycomiss, + '$Year': issueyear, + '$series': series.lower(), + '$Publisher': publisher, + '$publisher': publisher.lower(), + '$Volume': seriesyear + } + + extensions = ('.cbr', '.cbz') + + if mylar.FILE_FORMAT == '': + self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG) + logger.fdebug("Rename Files isn't enabled - keeping original filename.") + #check if extension is in nzb_name - will screw up otherwise + if ofilename.lower().endswith(extensions): + nfilename = ofilename[:-4] + else: + nfilename = ofilename + else: + nfilename = helpers.replace_all(mylar.FILE_FORMAT, file_values) + if mylar.REPLACE_SPACES: + #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot + nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) + nfilename = re.sub('[\,\:]', '', nfilename) + logger.fdebug("New Filename: " + str(nfilename)) + + if mylar.LOWERCASE_FILENAMES: + dst = (comlocation + "/" + nfilename + ext).lower() + else: + dst = comlocation + "/" + nfilename + ext.lower() + logger.fdebug("Source: " + str(src)) + logger.fdebug("Destination: " + str(dst)) + + rename_this = { "destination_dir" : dst, + "nfilename" : nfilename, + "issueid" : issueid, + "comicid" : comicid } + + return rename_this diff --git a/mylar/librarysync.py b/mylar/librarysync.py new file mode 100644 index 00000000..e0aa767b --- /dev/null +++ b/mylar/librarysync.py @@ -0,0 +1,444 @@ +# This file is part of Mylar. +# +# Mylar is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Mylar is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Mylar. If not, see . + +from __future__ import with_statement + +import os +import glob +import re +import shutil + +import mylar +from mylar import db, logger, helpers, importer, updater + +# You can scan a single directory and append it to the current library by specifying append=True +def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None): + + if cron and not mylar.LIBRARYSCAN: + return + + if not dir: + dir = mylar.COMIC_DIR + + # If we're appending a dir, it's coming from the post processor which is + # already bytestring + if not append: + dir = dir.encode(mylar.SYS_ENCODING) + + if not os.path.isdir(dir): + logger.warn('Cannot find directory: %s. Not scanning' % dir.decode(mylar.SYS_ENCODING, 'replace')) + return + + + logger.info('Scanning comic directory: %s' % dir.decode(mylar.SYS_ENCODING, 'replace')) + + basedir = dir + + watchmatch = {} + comic_list = [] + comiccnt = 0 + extensions = ('cbr','cbz') + for r,d,f in os.walk(dir): + #for directory in d[:]: + # if directory.startswith("."): + # d.remove(directory) + for files in f: + if any(files.lower().endswith('.' + x.lower()) for x in extensions): + comic = files + comicpath = os.path.join(r, files) + comicsize = os.path.getsize(comicpath) + print "Comic: " + comic + print "Comic Path: " + comicpath + print "Comic Size: " + str(comicsize) + + # We need the unicode path to use for logging, inserting into database + unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING, 'replace') + + comiccnt+=1 + comic_dict = { 'ComicFilename': comic, + 'ComicLocation': comicpath, + 'ComicSize': comicsize, + 'Unicode_ComicLocation': unicode_comic_path } + comic_list.append(comic_dict) + + logger.info("I've found a total of " + str(comiccnt) + " comics....analyzing now") + + myDB = db.DBConnection() + + #let's load in the watchlist to see if we have any matches. + logger.info("loading in the watchlist to see if a series is being watched already...") + watchlist = myDB.action("SELECT * from comics") + ComicName = [] + ComicYear = [] + ComicPublisher = [] + ComicTotal = [] + ComicID = [] + ComicLocation = [] + + AltName = [] + watchcnt = 0 + + watch_kchoice = [] + watchchoice = {} + import_by_comicids = [] + import_comicids = {} + + for watch in watchlist: + # let's clean up the name, just in case for comparison purposes... + watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', str(watch['ComicName'])) + #watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip() + alt_chk = "no" # alt-checker flag (default to no) + + # account for alternate names as well + if watch['AlternateSearch'] is not None and watch['AlternateSearch'] is not 'None': + altcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', str(watch['AlternateSearch'])) + #altcomic = re.sub('\s+', ' ', str(altcomic)).strip() + AltName.append(altcomic) + alt_chk = "yes" # alt-checker flag + + ComicName.append(watchcomic) + ComicYear.append(watch['ComicYear']) + ComicPublisher.append(watch['ComicPublisher']) + ComicTotal.append(watch['Total']) + ComicID.append(watch['ComicID']) + ComicLocation.append(watch['ComicLocation']) + watchcnt+=1 + + logger.info("Successfully loaded " + str(watchcnt) + " series from your watchlist.") + + ripperlist=['digital-', + 'empire', + 'dcp'] + + watchfound = 0 + + for i in comic_list: + #print i['ComicFilename'] + + comfilename = i['ComicFilename'] + comlocation = i['ComicLocation'] + #let's clean up the filename for matching purposes + + cfilename = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', str(comfilename)) + #cfilename = re.sub('\s+', ' ', str(cfilename)).strip() + + cm_cn = 0 + + #we need to track the counter to make sure we are comparing the right array parts + #this takes care of the brackets :) + m = re.findall('[^()]+', cfilename) + lenm = len(m) + print ("there are " + str(lenm) + " words.") + cnt = 0 + yearmatch = "false" + foundonwatch = "False" + + while (cnt < lenm): + if m[cnt] is None: break + if m[cnt] == ' ': + pass + else: + logger.fdebug(str(cnt) + ". Bracket Word: " + str(m[cnt])) + if cnt == 0: + comic_andiss = m[cnt] + logger.fdebug("Comic: " + str(comic_andiss)) + if m[cnt][:-2] == '19' or m[cnt][:-2] == '20': + logger.fdebug("year detected: " + str(m[cnt])) + result_comyear = m[cnt] + yearmatch = "true" + # if str(comyear) in result_comyear: + # logger.fdebug(str(comyear) + " - right years match baby!") + # yearmatch = "true" + # else: + # logger.fdebug(str(comyear) + " - not right - years do not match") + # yearmatch = "false" + #let's do this hear and save a few extra loops ;) + if 'digital' in m[cnt] and len(m[cnt]) == 7: + logger.fdebug("digital edition detected") + pass + if ' of ' in m[cnt]: + logger.fdebug("mini-series detected : " + str(m[cnt])) + result_of = m[cnt] + if 'cover' in m[cnt]: + logger.fdebug("covers detected: " + str(m[cnt])) + result_comcovers = m[cnt] + for ripper in ripperlist: + if ripper in m[cnt]: + logger.fdebug("Scanner detected: " + str(m[cnt])) + result_comscanner = m[cnt] + cnt+=1 + + if yearmatch == "false": + logger.fdebug("failed to match...skipping.") + break + splitit = [] + watchcomic_split = [] + logger.fdebug("filename comic and issue: " + str(cfilename)) + #changed this from '' to ' ' + comic_iss_b4 = re.sub('[\-\:\,]', ' ', str(comic_andiss)) + comic_iss = comic_iss_b4.replace('.',' ') + logger.fdebug("adjusted comic and issue: " + str(comic_iss)) + splitit = comic_iss.split(None) + logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss)) + #bmm = re.findall('v\d', comic_iss) + #if len(bmm) > 0: splitst = len(splitit) - 2 + #else: splitst = len(splitit) - 1 + #----- + #here we cycle through the Watchlist looking for a match. + while (cm_cn < watchcnt): + #setup the watchlist + comname = ComicName[cm_cn] + print ("watch_comic:" + str(comname)) + comyear = ComicYear[cm_cn] + compub = ComicPublisher[cm_cn] + comtotal = ComicTotal[cm_cn] + comicid = ComicID[cm_cn] + watch_location = ComicLocation[cm_cn] + + if splitit[(len(splitit)-1)].isdigit(): + #compares - if the last digit and second last digit are #'s seperated by spaces assume decimal + comic_iss = splitit[(len(splitit)-1)] + splitst = len(splitit) - 1 + if splitit[(len(splitit)-2)].isdigit(): + # for series that have a digit at the end, it screws up the logistics. + i = 1 + chg_comic = splitit[0] + while (i < (len(splitit)-1)): + chg_comic = chg_comic + " " + splitit[i] + i+=1 + logger.fdebug("chg_comic:" + str(chg_comic)) + if chg_comic.upper() == comname.upper(): + logger.fdebug("series contains numerics...adjusting..") + else: + changeup = "." + splitit[(len(splitit)-1)] + logger.fdebug("changeup to decimal: " + str(changeup)) + comic_iss = splitit[(len(splitit)-2)] + "." + comic_iss + splitst = len(splitit) - 2 + else: + # if the nzb name doesn't follow the series-issue-year format even closely..ignore nzb + logger.fdebug("invalid naming format of filename detected - cannot properly determine issue") + continue + + # make sure that things like - in watchcomic are accounted for when comparing to nzb. + watchcomic_split = helpers.cleanName(str(comname)) + watchcomic_split = re.sub('[\-\:\,\.]', ' ', watchcomic_split).split(None) + + logger.fdebug(str(splitit) + " file series word count: " + str(splitst)) + logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split))) + if (splitst) != len(watchcomic_split): + logger.fdebug("incorrect comic lengths...not a match") + if str(splitit[0]).lower() == "the": + logger.fdebug("THE word detected...attempting to adjust pattern matching") + splitit[0] = splitit[4:] + else: + logger.fdebug("length match..proceeding") + n = 0 + scount = 0 + logger.fdebug("search-length: " + str(splitst)) + logger.fdebug("Watchlist-length: " + str(len(watchcomic_split))) + while ( n <= (splitst)-1 ): + logger.fdebug("splitit: " + str(splitit[n])) + if n < (splitst) and n < len(watchcomic_split): + logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n])) + if '+' in watchcomic_split[n]: + watchcomic_split[n] = re.sub('+', '', str(watchcomic_split[n])) + if str(watchcomic_split[n].lower()) in str(splitit[n].lower()) and len(watchcomic_split[n]) >= len(splitit[n]): + logger.fdebug("word matched on : " + str(splitit[n])) + scount+=1 + #elif ':' in splitit[n] or '-' in splitit[n]: + # splitrep = splitit[n].replace('-', '') + # print ("non-character keyword...skipped on " + splitit[n]) + elif str(splitit[n].lower()).startswith('v'): + logger.fdebug("possible versioning..checking") + #we hit a versioning # - account for it + if splitit[n][1:].isdigit(): + comicversion = str(splitit[n]) + logger.fdebug("version found: " + str(comicversion)) + else: + logger.fdebug("Comic / Issue section") + if splitit[n].isdigit(): + logger.fdebug("issue detected") + #comiss = splitit[n] + comicNAMER = n - 1 + com_NAME = splitit[0] + cmnam = 1 + while (cmnam <= comicNAMER): + com_NAME = str(com_NAME) + " " + str(splitit[cmnam]) + cmnam+=1 + logger.fdebug("comic: " + str(com_NAME)) + else: + logger.fdebug("non-match for: "+ str(splitit[n])) + pass + n+=1 + #set the match threshold to 80% (for now) + # if it's less than 80% consider it a non-match and discard. + #splitit has to splitit-1 because last position is issue. + wordcnt = int(scount) + logger.fdebug("scount:" + str(wordcnt)) + totalcnt = int(splitst) + logger.fdebug("splitit-len:" + str(totalcnt)) + spercent = (wordcnt/totalcnt) * 100 + logger.fdebug("we got " + str(spercent) + " percent.") + if int(spercent) >= 80: + logger.fdebug("it's a go captain... - we matched " + str(spercent) + "%!") + logger.fdebug("this should be a match!") + if '.' in comic_iss: + comisschk_find = comic_iss.find('.') + comisschk_b4dec = comic_iss[:comisschk_find] + comisschk_decval = comic_iss[comisschk_find+1:] + logger.fdebug("Found IssueNumber: " + str(comic_iss)) + logger.fdebug("..before decimal: " + str(comisschk_b4dec)) + logger.fdebug("...after decimal: " + str(comisschk_decval)) + #--let's make sure we don't wipe out decimal issues ;) + if int(comisschk_decval) == 0: + ciss = comisschk_b4dec + cintdec = int(comisschk_decval) + else: + if len(comisschk_decval) == 1: + ciss = comisschk_b4dec + "." + comisschk_decval + cintdec = int(comisschk_decval) * 10 + else: + ciss = comisschk_b4dec + "." + comisschk_decval.rstrip('0') + cintdec = int(comisschk_decval.rstrip('0')) * 10 + comintIss = (int(comisschk_b4dec) * 1000) + cintdec + else: + comintIss = int(comic_iss) * 1000 + logger.fdebug("issue we found for is : " + str(comic_iss)) + + #issue comparison now as well + logger.info(u"Found " + str(comname) + " (" + str(comyear) + ") issue: " + str(comic_iss)) + watchfound+=1 +# updater.forceRescan(ComicID=comicid) +# if not any(d.get('ComicID', None) == str(comicid) for d in watch_kchoice): + watch_kchoice.append({ + "ComicID": str(comicid), + "ComicName": str(comname), + "ComicYear": str(comyear), + "ComicIssue": str(int(comic_iss)), + "ComicLocation": str(watch_location), + "OriginalLocation" : str(comlocation), + "OriginalFilename" : str(comfilename) + }) + foundonwatch = "True" + break + elif int(spercent) < 80: + logger.fdebug("failure - we only got " + str(spercent) + "% right!") + cm_cn+=1 + + if foundonwatch == "False": + #---if it's not a match - send it to the importer. + n = 0 + csplit = comic_andiss.split(None) + while ( n <= (len(csplit)-1) ): + if csplit[n].isdigit(): + logger.fdebug("issue detected") + #comiss = splitit[n] + comicNAMER = n - 1 + com_NAME = csplit[0] + cmnam = 1 + while (cmnam <= comicNAMER): + com_NAME = str(com_NAME) + " " + str(csplit[cmnam]) + cmnam+=1 + logger.fdebug("comic: " + str(com_NAME)) + n+=1 + + print ("adding " + str(com_NAME) + " to the import-queue!") + import_by_comicids.append({ + "comicname" : com_NAME, + "comicyear" : result_comyear, + "comfilename" : comfilename, + "comlocation" : comlocation + }) + + if len(watch_kchoice) > 0: + watchchoice['watchlist'] = watch_kchoice + print ("watchchoice: " + str(watchchoice)) + + logger.info("I have found " + str(watchfound) + " out of " + str(comiccnt) + " comics for series that are being watched.") + wat = 0 + comicids = [] + + if watchfound > 0: + if mylar.IMP_MOVE: + logger.info("You checked off Move Files...so that's what I'm going to do") + #check to see if Move Files is enabled. + #if not being moved, set the archive bit. + print("Moving files into appropriate directory") + while (wat < watchfound): + watch_the_list = watchchoice['watchlist'][wat] + watch_comlocation = watch_the_list['ComicLocation'] + watch_comicid = watch_the_list['ComicID'] + watch_comicname = watch_the_list['ComicName'] + watch_comicyear = watch_the_list['ComicYear'] + watch_comiciss = watch_the_list['ComicIssue'] + print ("ComicLocation: " + str(watch_comlocation)) + orig_comlocation = watch_the_list['OriginalLocation'] + orig_filename = watch_the_list['OriginalFilename'] + print ("Orig. Location: " + str(orig_comlocation)) + print ("Orig. Filename: " + str(orig_filename)) + #before moving check to see if Rename to Mylar structure is enabled. + if mylar.IMP_RENAME: + print("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT)) + renameit = helpers.rename_param(watch_comicid, watch_comicname, watch_comicyear, watch_comiciss) + nfilename = renameit['nfilename'] + + dst_path = os.path.join(watch_comlocation,nfilename) + if str(watch_comicid) not in comicids: + comicids.append(watch_comicid) + else: + print("Renaming files not enabled, keeping original filename(s)") + dst_path = os.path.join(watch_comlocation,orig_filename) + + #os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext))) + #src = os.path.join(, str(nfilename + ext)) + print ("I'm going to move " + str(orig_comlocation) + " to .." + str(dst_path)) + try: + shutil.move(orig_comlocation, dst_path) + except (OSError, IOError): + logger.info("Failed to move directory - check directories and manually re-run.") + wat+=1 + else: + # if move files isn't enabled, let's set all found comics to Archive status :) + while (wat < watchfound): + watch_the_list = watchchoice['watchlist'][wat] + watch_comicid = watch_the_list['ComicID'] + watch_issue = watch_the_list['ComicIssue'] + print ("ComicID: " + str(watch_comicid)) + print ("Issue#: " + str(watch_issue)) + issuechk = myDB.action("SELECT * from issues where ComicID=? AND INT_IssueNumber=?", [watch_comicid, watch_issue]).fetchone() + if issuechk is None: + print ("no matching issues for this comic#") + else: + print("...Existing status: " + str(issuechk['Status'])) + control = {"IssueID": issuechk['IssueID']} + values = { "Status": "Archived"} + print ("...changing status of " + str(issuechk['Issue_Number']) + " to Archived ") + myDB.upsert("issues", values, control) + if str(watch_comicid) not in comicids: + comicids.append(watch_comicid) + wat+=1 + if comicids is None: pass + else: + c_upd = len(comicids) + c = 0 + while (c < c_upd ): + print ("Rescanning.. " + str(c)) + updater.forceRescan(c) + if not len(import_by_comicids): + return "Completed" + if len(import_by_comicids) > 0: + import_comicids['comic_info'] = import_by_comicids + print ("import comicids: " + str(import_by_comicids)) + return import_comicids diff --git a/mylar/search.py b/mylar/search.py index 0c8941b9..f2c8a98d 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -648,6 +648,26 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is nzbxlink_st = linkapi.find("*|*") linkapi = linkapi[:(nzbxlink_st + 3)] + str(nzbname) logger.fdebug("new linkapi (this should =nzbname) :" + str(linkapi)) + +# #test nzb.get +# if mylar.NZBGET: +# from xmlrpclib import ServerProxy +# if mylar.NZBGET_HOST[:4] = 'http': +# tmpapi = "http://" +# nzbget_host = mylar.NZBGET_HOST[7] +# elif mylar.NZBGET_HOST[:5] = 'https': +# tmpapi = "https://" +# nzbget_host = mylar.NZBGET_HOST[8] +# tmpapi = tmpapi + str(mylar.NZBGET_USERNAME) + ":" + str(mylar.NZBGET_PASSWORD) +# tmpapi = tmpapi + "@" + nzbget_host + ":" + str(mylar.NZBGET_PORT) + "/xmlrpc" +# server = ServerProxy(tmpapi) +# send_to_nzbget = server.appendurl(nzbname, mylar.NZBGET_CATEGORY, mylar.NZBGET_PRIORITY, True, str(linkapi)) +# if send_to_nzbget is True: +# logger.info("Successfully sent nzb to NZBGet!") +# else: +# logger.info("Unable to send nzb to NZBGet - check your configs.") +# #end nzb.get test + # let's build the send-to-SAB string now: tmpapi = str(mylar.SAB_HOST) logger.fdebug("send-to-SAB host string: " + str(tmpapi)) diff --git a/mylar/webserve.py b/mylar/webserve.py index 92dda2ec..2bd5648a 100755 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -28,10 +28,11 @@ import time import threading import csv import platform +import Queue import mylar -from mylar import logger, db, importer, mb, search, filechecker, helpers, updater, parseit, weeklypull, PostProcessor, version +from mylar import logger, db, importer, mb, search, filechecker, helpers, updater, parseit, weeklypull, PostProcessor, version, librarysync #from mylar.helpers import checked, radio, today import lib.simplejson as simplejson @@ -97,6 +98,7 @@ class WebInterface(object): searchresults = mb.findComic(name, mode, issue=None) elif type == 'comic' and mode == 'want': searchresults = mb.findComic(name, mode, issue) + searchresults = sorted(searchresults, key=itemgetter('comicyear','issues'), reverse=True) #print ("Results: " + str(searchresults)) return serve_template(templatename="searchresults.html", title='Search Results for: "' + name + '"', searchresults=searchresults, type=type) @@ -403,16 +405,26 @@ class WebInterface(object): def pullist(self): myDB = db.DBConnection() + weeklyresults = [] popit = myDB.select("SELECT * FROM sqlite_master WHERE name='weekly' and type='table'") if popit: - weeklyresults = myDB.select("SELECT * from weekly") + w_results = myDB.select("SELECT PUBLISHER, ISSUE, COMIC, STATUS from weekly") + for weekly in w_results: + if weekly['ISSUE'].isdigit(): + weeklyresults.append({ + "PUBLISHER" : weekly['PUBLISHER'], + "ISSUE" : weekly['ISSUE'], + "COMIC" : weekly['COMIC'], + "STATUS" : weekly['STATUS'] + }) + weeklyresults = sorted(weeklyresults, key=itemgetter('PUBLISHER','COMIC'), reverse=False) pulldate = myDB.action("SELECT * from weekly").fetchone() if pulldate is None: return self.manualpull() #raise cherrypy.HTTPRedirect("home") else: return self.manualpull() - return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pulldate=pulldate['SHIPDATE']) + return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pulldate=pulldate['SHIPDATE'], pullfilter=True) pullist.exposed = True def filterpull(self): @@ -421,7 +433,7 @@ class WebInterface(object): pulldate = myDB.action("SELECT * from weekly").fetchone() if pulldate is None: raise cherrypy.HTTPRedirect("home") - return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pulldate=pulldate['SHIPDATE']) + return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pulldate=pulldate['SHIPDATE'], pullfilter=True) filterpull.exposed = True def manualpull(self): @@ -479,6 +491,10 @@ class WebInterface(object): raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % [comicid]) skipped2wanted.exposed = True + def ManualRename(self): + print ("hello") + ManualRename.exposed = True + def searchScan(self, name): return serve_template(templatename="searchfix.html", title="Manage", name=name) searchScan.exposed = True @@ -573,6 +589,64 @@ class WebInterface(object): raise cherrypy.HTTPRedirect("history") clearhistory.exposed = True + #for testing. + def idirectory(self): + return serve_template(templatename="idirectory.html", title="Import a Directory") + idirectory.exposed = True + + def comicScan(self, path, scan=0, redirect=None, autoadd=0, libraryscan=0, imp_move=0, imp_rename=0): + mylar.LIBRARYSCAN = libraryscan + mylar.ADD_COMICS = autoadd + mylar.COMIC_DIR = path + mylar.IMP_MOVE = imp_move + mylar.IMP_RENAME = imp_rename + mylar.config_write() + if scan: + try: + soma = librarysync.libraryScan() + except Exception, e: + logger.error('Unable to complete the scan: %s' % e) + if soma == "Completed": + print ("sucessfully completed import.") + else: + logger.info(u"Starting mass importing...") + #this is what it should do... + #store soma (the list of comic_details from importing) into sql table so import can be whenever + #display webpage showing results + #allow user to select comic to add (one at a time) + #call addComic off of the webpage to initiate the add. + #return to result page to finish or continue adding. + #.... + #threading.Thread(target=self.searchit).start() + #threadthis = threadit.ThreadUrl() + #result = threadthis.main(soma) + myDB = db.DBConnection() + sl = 0 + while (sl < len(soma)): + soma_sl = soma['comic_info'][sl] + print ("cname: " + soma_sl['comicname']) + + controlValue = {"ComicName": soma_sl['comicname']} + newValue = {"ComicYear": soma_sl['comicyear'], + "Status": "Not Imported", + "ImportDate": helpers.today()} + myDB.upsert("importresults", newValue, controlValue) + sl+=1 + + self.importResults() + + if redirect: + raise cherrypy.HTTPRedirect(redirect) + else: + raise cherrypy.HTTPRedirect("home") + comicScan.exposed = True + + def importResults(self): + myDB = db.DBConnection() + results = myDB.select("SELECT * FROM importresults") + return serve_template(templatename="importresults.html", title="Import Results", results=results) + importResults.exposed = True + #--- def config(self): interface_dir = os.path.join(mylar.PROG_DIR, 'data/interfaces/') diff --git a/mylar/weeklypull.py b/mylar/weeklypull.py index 3d316cf8..ec3428bf 100755 --- a/mylar/weeklypull.py +++ b/mylar/weeklypull.py @@ -397,9 +397,10 @@ def pullitcheck(comic1off_name=None,comic1off_id=None): lines[cnt] = str(lines[cnt]).upper() #llen[cnt] = str(llen[cnt]) logger.fdebug("looking for : " + str(lines[cnt])) - sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', str(lines[cnt])) + sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', ' ', str(lines[cnt])) sqlsearch = re.sub(r'\s', '%', sqlsearch) if 'THE' in sqlsearch: sqlsearch = re.sub('THE', '', sqlsearch) + if '+' in sqlsearch: sqlsearch = re.sub('\+', '%PLUS%', sqlsearch) logger.fdebug("searchsql: " + str(sqlsearch)) weekly = myDB.select('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [sqlsearch]) #cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]]) @@ -435,6 +436,10 @@ def pullitcheck(comic1off_name=None,comic1off_id=None): logger.fdebug("ComicNM: " + str(comicnm)) if 'THE' in str(watchcomic): modcomicnm = re.sub('THE', '', comicnm) + #thnx to A+X for this... + if '+' in str(watchcomic): + if 'plus' in str(comicnm).lower(): + modcomicnm = re.sub('plus', '+', comicnm) if str(comicnm) == str(watchcomic).upper() or str(modcomicnm) == str(watchcomic).upper(): logger.fdebug("matched on:" + str(comicnm) + "..." + str(watchcomic).upper()) #pass