diff --git a/data/css/style.css b/data/css/style.css index adf2bdeb..6c80315c 100755 --- a/data/css/style.css +++ b/data/css/style.css @@ -246,7 +246,6 @@ table#searchresults_table td#comicyear { vertical-align: middle; text-align: lef table#searchresults_table td#issues { vertical-align: middle; text-align: center; min-width: 50px; } div.progress-container { border: 1px solid #ccc; width: 100px; height: 14px; margin: 2px 5px 2px 0; padding: 1px; float: left; background: white; } -div.progress-container > div { background-color: #a3e532; height: 14px; } .havetracks { font-size: 13px; margin-left: 36px; padding-bottom: 3px; vertical-align: middle; } footer { margin: 20px auto 20px auto; } diff --git a/data/interfaces/default/album.html b/data/interfaces/default/album.html deleted file mode 100755 index 0ebc283b..00000000 --- a/data/interfaces/default/album.html +++ /dev/null @@ -1,163 +0,0 @@ -<%inherit file="base.html" /> -<%! - from mylar import db, helpers - myDB = db.DBConnection() -%> - -<%def name="headerIncludes()"> -
-
- Delete Album - %if album['Status'] == 'Skipped': - Mark Album as Wanted - %elif album['Status'] == 'Wanted': - Force Check - Mark Album as Skipped - %else: - Retry Download - Try New Version - %endif -
-
- « Back to ${album['ArtistName']} - - -<%def name="body()"> -
-
-
- -
- -

${album['AlbumTitle']}

-

${album['ArtistName']}

- <% - totalduration = myDB.action("SELECT SUM(TrackDuration) FROM tracks WHERE AlbumID=?", [album['AlbumID']]).fetchone()[0] - totaltracks = len(myDB.select("SELECT TrackTitle from tracks WHERE AlbumID=?", [album['AlbumID']])) - try: - albumduration = helpers.convert_milliseconds(totalduration) - except: - albumduration = 'n/a' - - %> -
-
-
    -
  • Tracks: ${totaltracks}
  • -
  • Duration: ${albumduration}
  • -
-
- -
-
- - - - - - - - - - - - - %for track in tracks: - <% - if track['Location']: - grade = 'A' - location = track['Location'] - else: - grade = 'X' - location = '' - - if track['BitRate']: - bitrate = str(track['BitRate']/1000) + ' kbps' - else: - bitrate = '' - - try: - trackduration = helpers.convert_milliseconds(track['TrackDuration']) - except: - trackduration = 'n/a' - - if not track['Format']: - format = 'Unknown' - else: - format = track['Format'] - %> - - - - - - - - - %endfor - <% - unmatched = myDB.select('SELECT * from have WHERE ArtistName LIKE ? AND AlbumTitle LIKE ?', [album['ArtistName'], album['AlbumTitle']]) - %> - %if unmatched: - %for track in unmatched: - <% - duration = helpers.convert_seconds(float(track['TrackLength'])) - %> - - - - - - - - - %endfor - %endif - -
#Track TitleDurationLocal FileBit RateFormat
${track['TrackNumber']}${track['TrackTitle']}${trackduration}${location}${bitrate}${format}
${track['TrackNumber']}${track['TrackTitle']}${duration}${track['Location']}${int(track['BitRate'])/1000} kbps${track['Format']}
-
-
- - -<%def name="headIncludes()"> - - - -<%def name="javascriptIncludes()"> - - - diff --git a/data/interfaces/default/artist-descincluded.html b/data/interfaces/default/artist-descincluded.html deleted file mode 100755 index 4d03b279..00000000 --- a/data/interfaces/default/artist-descincluded.html +++ /dev/null @@ -1,194 +0,0 @@ -<%inherit file="base.html"/> -<%! - from mylar import db - import mylar -%> - -<%def name="headerIncludes()"> -
-
- Edit Comic - Refresh Comic - Delete Comic - %if comic['Status'] == 'Paused': - Resume Comic - %else: - Pause Comic - %endif -
-
- « Back to overview - - -<%def name="body()"> -
-
- -
-

- %if comic['Status'] == 'Loading': - loading - %endif - ${comic['ComicName']} (${comic['ComicYear']}) - %if comic['Status'] == 'Loading': -

(Comic information for this comic is currently being loaded)

- %endif - -
- - Publisher: ${comic['ComicPublisher']}
- Comics in Series: ${comic['Total']} issues (${comic['ComicPublished']})
- %if comic['ComicPublisher'] == 'DC Comics': - DC - %elif comic['ComicPublisher'] == 'Marvel': - Marvel - %elif comic['ComicPublisher'] == 'Image': - Image - %endif - - Status: ${comic['Status']} - %if comic['Status'] == 'Active': - - %endif -
- Comic Directory Location:
   ${comic['ComicLocation']} -

- Quality Control: - - - - - - -
Version: v5
Scanner: GreenGiant (Fallback ON)
Type: NOADS (Fallback ON)over-rides default settings*
Pixel Quality: None (Fallback OFF)
-
-
-
- -
Mark selected issues as - - -
- - - - - - - - - - - - - %for issue in issues: - <% - if issue['Status'] == 'Skipped': - grade = 'Z' - elif issue['Status'] == 'Wanted': - grade = 'X' - elif issue['Status'] == 'Snatched': - grade = 'C' - else: - grade = 'A' - %> - - - - - - - - - %endfor - -
NumberNameDateTypeStatus
${issue['Issue_Number']}${issue['IssueName']}${issue['IssueDate']}${issue['Type']}${issue['Status']} - %if issue['Status'] == 'Skipped': - [want] - %elif (issue['Status'] == 'Wanted'): - [skip] - %else: - [retry][new] - %endif -
-
- - -<%def name="headIncludes()"> - - %if comic['Status'] == 'Loading': - - %endif - - -<%def name="javascriptIncludes()"> - - - - diff --git a/data/interfaces/default/artist.html b/data/interfaces/default/artist.html deleted file mode 100755 index 1d4706e4..00000000 --- a/data/interfaces/default/artist.html +++ /dev/null @@ -1,262 +0,0 @@ -<%inherit file="base.html"/> -<%! - from mylar import db - import mylar -%> - -<%def name="headerIncludes()"> -
-
- Edit Comic - Refresh Comic - Delete Comic - %if comic['Status'] == 'Paused': - Resume Comic - %else: - Pause Comic - %endif -
-
- « Back to overview - - -<%def name="body()"> -
- - -
- - - - - - -
-
-
- -
-
-
-
-

- %if comic['Status'] == 'Loading': - loading - %endif -
- ${comic['ComicName']} (${comic['ComicYear']}) - %if comic['Status'] == 'Loading': -

(Comic information for this comic is currently being loaded)

- %endif -
-

-
- -
-
- -
- %if comic['ComicPublisher'] == 'DC Comics': - DC - %elif comic['ComicPublisher'] == 'Marvel': - Marvel - %endif -
- -
-
- - -
-
-
-
- -
- - - - -
-
- Quality Control -
- -
-
- -
-
- -
-
- -
-
-
-
- -
- - -
- -
Mark selected issues as - - -
- - - - - - - - - - - - - %for issue in issues: - <% - if issue['Status'] == 'Skipped': - grade = 'Z' - elif issue['Status'] == 'Wanted': - grade = 'X' - elif issue['Status'] == 'Snatched': - grade = 'C' - else: - grade = 'A' - %> - - - - - - - - - %endfor - -
NumberNameDateTypeStatus
${issue['Issue_Number']}${issue['IssueName']}${issue['IssueDate']}${issue['Type']}${issue['Status']} - %if issue['Status'] == 'Skipped': - [want] - %elif (issue['Status'] == 'Wanted'): - [skip] - %else: - [retry][new] - %endif -
-
- - -<%def name="headIncludes()"> - - %if comic['Status'] == 'Loading': - - %endif - - -<%def name="javascriptIncludes()"> - - - - diff --git a/data/interfaces/default/artistredone.html b/data/interfaces/default/artistredone.html index 13198915..ba1ca9d7 100755 --- a/data/interfaces/default/artistredone.html +++ b/data/interfaces/default/artistredone.html @@ -220,11 +220,12 @@ selected issues + - + @@ -246,7 +247,7 @@ - + - - + + diff --git a/data/interfaces/default/weeklypull.html b/data/interfaces/default/weeklypull.html index b29013a8..2a21f683 100755 --- a/data/interfaces/default/weeklypull.html +++ b/data/interfaces/default/weeklypull.html @@ -8,11 +8,6 @@
Refresh Pull-list - %if pullfilter is False: - Filter Non-Comics - %else: - Show All Comics - %endif
« Back to overview @@ -23,30 +18,15 @@

Weekly Pull list for : ${pulldate}

- - -
Mark selected issues as - - -
+
+
Number Name Date
${issue['Issue_Number']}${issue['IssueName']}${issue['IssueName']} ${issue['IssueDate']} ${issue['Status']} %if issue['Status'] == 'Skipped': @@ -297,45 +298,36 @@ } }; - function initThisPage() - { + function initThisPage(){ $(function() { $( "#tabs" ).tabs(); }); initActions(); - $('#issue_table').dataTable({ - "bDestroy": true, - "aoColumns": [ - null, - null, - null, - null, - null - - ], - "aoColumnDefs": [ - { 'bSortable': false, 'aTargets': [ 0,1 ] } - ], - "oLanguage": { - "sLengthMenu":"Show _MENU_ issues per page", - "sEmptyTable": "No issue information available", - "sInfo":"Showing _TOTAL_ issues", - "sInfoEmpty":"Showing 0 to 0 of 0 issues", - "sInfoFiltered":"(filtered from _MAX_ total issues)", - "sSearch": ""}, - "bPaginate": false, - "aaSorting": [[1, 'desc'],[3,'desc']] - - }); - resetFilters("issues"); - setTimeout(function(){ - initFancybox(); - },1500) + $('#issue_table').dataTable( + { + "bDestroy": true, + "aoColumnDefs": [ + { 'bSortable': false, 'aTargets': [ 0, 2 ] } + ], + "oLanguage": { + "sLengthMenu":"Show _MENU_ issues per page", + "sEmptyTable": "No issue information available", + "sInfo":"Showing _TOTAL_ issues", + "sInfoEmpty":"Showing 0 to 0 of 0 issues", + "sInfoFiltered":"(filtered from _MAX_ total issues)", + "sSearch": ""}, + "iDisplayLength": 25, + "sPaginationType": "full_numbers", + "aaSorting": [[1, 'desc'],[3,'desc']] + }); + resetFilters("issues"); + setTimeout(function(){ + initFancybox(); + },1500) } - $(document).ready(function() { initThisPage(); }); diff --git a/data/interfaces/default/artistredone.html.save b/data/interfaces/default/artistredone.html.save deleted file mode 100755 index a9668e2e..00000000 --- a/data/interfaces/default/artistredone.html.save +++ /dev/null @@ -1,318 +0,0 @@ -<%inherit file="base.html"/> -<%! - from mylar import db - import mylar -%> - -<%def name="headerIncludes()"> -
-
- Refresh Comic - Delete Comic - %if comic['Status'] == 'Paused': - Resume Comic - %else: - Pause Comic - %endif - Shut Down - Restart - - -
-
- -<%def name="body()"> - <% - totaltracks = comic['Total'] - havetracks = comic['Have'] - if not havetracks: - havetracks = 0 - try: - percent = (havetracks*100.0)/totaltracks - if percent > 100: - percent = 100 - except (ZeroDivisionError, TypeError): - percent = 0 - totaltracks = '?' - %> - - -
-

- %if comic['Status'] == 'Loading': - loading - %endif -
- ${comic['ComicName']} (${comic['ComicYear']}) - %if comic['Status'] == 'Loading': -

(Comic information for this comic is currently being loaded)

- %endif -
-

- -
-
- -
- -
- - - - - -
-
-
- -
-
-
- %if comic['ComicPublisher'] == 'DC Comics': - DC - %elif comic['ComicPublisher'] == 'Marvel': - Marvel - %if comic['ComicPublisher'] == 'DC Comics': - DC - - - - - -
-
-
- -
-
-
- %if comic['ComicPublisher'] == 'DC Comics': - DC - %elif comic['ComicPublisher'] == 'Marvel': - Marvel - %endif -
-
- -
-
- -
-
- -
-
- -
-
-
- - -
- - - - - - -
-
-
- -
-
-
-
-
- - - the directory where all the comics are for this comic -
-
- - - if the comic is v5 or whatever, enter 'v5' here -
-
- - - list preference of scanner -
-
- - - c2c / noads -
-
- - - resolution of scan (ie.1440px) -
- - -
-
-
- - - - -
Mark selected issues as - - -
- - - - - - - - - - - - - %for issue in issues: - <% - if issue['Status'] == 'Skipped': - grade = 'Z' - elif issue['Status'] == 'Wanted': - grade = 'X' - elif issue['Status'] == 'Snatched': - grade = 'C' - else: - grade = 'A' - %> - - - - - - - - - %endfor - -
NumberNameDateTypeStatus
${issue['Issue_Number']}${issue['IssueName']}${issue['IssueDate']}${issue['Type']}${issue['Status']} - %if issue['Status'] == 'Skipped': - [want] - %elif (issue['Status'] == 'Wanted'): - [skip] - %else: - [retry][new] - %endif -
- - - - -<%def name="headIncludes()"> - - %if comic['Status'] == 'Loading': - - %endif - - - -<%def name="javascriptIncludes()"> - - - - diff --git a/data/interfaces/default/base.html b/data/interfaces/default/base.html index bc8b2ced..5149f6f6 100755 --- a/data/interfaces/default/base.html +++ b/data/interfaces/default/base.html @@ -86,6 +86,10 @@ (${version.MYLAR_VERSION}) %endif +
+ + +
Back to top diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index cd0da1b2..0c82e5bb 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -354,9 +354,6 @@
Post-Processing -
- -
@@ -385,6 +382,9 @@
Renaming options +
+ +
@@ -423,7 +423,7 @@
-
+
+ +
diff --git a/data/interfaces/default/css/style.css b/data/interfaces/default/css/style.css index a8ea6038..511587ed 100755 --- a/data/interfaces/default/css/style.css +++ b/data/interfaces/default/css/style.css @@ -879,6 +879,38 @@ div#artistheader h2 a { font-weight: bold; font-family: "Trebuchet MS", Helvetica, Arial, sans-serif; } +#weekly_pull th#publisher { + min-width: 150px; + text-align: left; +} +#weekly_pull th#comicname { + min-width: 250px; + text-align: left; +} +#weekly_pull th#comicnumber, +#weekly_pull th#status, +#weekly_pull th#series { + min-width: 50px; + text-align: center; +} +#weekly_pull td#comicname { + min-width: 275px; + text-align: left; + vertical-align: middle; + font-size: 12px; +} +#weekly_pull td#status, +#weekly_pull td#series, +#weekly_pull td#comicnumber { + min-width: 50px; + text-align: left; + vertical-align: middle; +} +#weekly_pull td#publisher { + min-width: 150px; + text-align: left; + vertical-align: middle; +} #manage_comic th#name { min-width: 275px; text-align: left; diff --git a/data/interfaces/default/editcomic.html b/data/interfaces/default/editcomic.html deleted file mode 100755 index 36567d70..00000000 --- a/data/interfaces/default/editcomic.html +++ /dev/null @@ -1,137 +0,0 @@ -<%inherit file="base.html"/> -<%! - import mylar -%> - -<%def name="headerIncludes()"> -
- -
- -<%def name="body()"> - - - -
-

settings${comic['ComicName']} - Settings

-
-
- -
- -
- - - - - - - -
-
- Basic -
- - - the directory where all the comics are for this comic -
-
-
-
-
- -
-
-
-
- -
- - - - - -
-
- Quality -
- - - if the comic is v5 or whatever, enter 'v5' here -
-
- - -
-
- - -
-
- - -
-
-
- -
-

Web Interface changes require a restart to take effect

-
- -
- - -<%def name="javascriptIncludes()"> - - diff --git a/data/interfaces/default/extras.html b/data/interfaces/default/extras.html deleted file mode 100755 index 2c25d357..00000000 --- a/data/interfaces/default/extras.html +++ /dev/null @@ -1,15 +0,0 @@ -<%inherit file="base.html" /> -<%def name="body()"> -
-

extraArtists You Might Like

-
-
-
- -
-
- diff --git a/data/interfaces/default/idirectory.html b/data/interfaces/default/idirectory.html new file mode 100755 index 00000000..be14e9fd --- /dev/null +++ b/data/interfaces/default/idirectory.html @@ -0,0 +1,85 @@ +<%inherit file="base.html" /> +<%! + import mylar + from mylar.helpers import checked +%> +<%def name="headerIncludes()"> +
+ +
+ + +<%def name="body()"> +
+

Manage

+
+
+ +
+ +
+ Scan Comic Library +

Where do you keep your comics?

+

You can put in any directory, and it will scan for comic files in that folder + (including all subdirectories).
For example: '/Users/name/Comics'

+

+ It may take a while depending on how many files you have. You can navigate away from the page
+ as soon as you click 'Save changes' +

+
+

THIS IS CURRENTLY DISABLED UNTIL WORKING..

+
+ + %if mylar.COMIC_DIR: + + %else: + + %endif +
+
+ +
+
+ +
+
+ + Leaving this unchecked will not move anything, but will mark the issues as Archived +
+
+
+ + Rename files to configuration settings +
+
+
+ + +
+ +
+ +
+ +<%def name="javascriptIncludes()"> + + diff --git a/data/interfaces/default/importresults.html b/data/interfaces/default/importresults.html new file mode 100644 index 00000000..352a59fc --- /dev/null +++ b/data/interfaces/default/importresults.html @@ -0,0 +1,77 @@ +<%inherit file="base.html" /> <%! + import mylar + from mylar.helpers import checked +%> +<%def name="headerIncludes()"> +
+ +
+ + +<%def name="body()"> +
+

Borg Importing Results

+
+
+ +
+ + + + + + +
+
+
+
+
+ + + + + + + + + + + + %if results: + %for result in results: + + + + + + + + %endfor + %else: + + + + %endif + + + +
Comic NameYearStatusImport DateOptions
${result['ComicName']}${result['ComicYear']}${result['Status']}${result['ImportDate']}Add Series
There are no results to display
+
+
+ +<%def name="javascriptIncludes()"> + + + diff --git a/data/interfaces/default/index.html b/data/interfaces/default/index.html index 5435d9ee..ba13b994 100755 --- a/data/interfaces/default/index.html +++ b/data/interfaces/default/index.html @@ -1,9 +1,11 @@ <%inherit file="base.html"/> <%! - from mylar import helpers + from mylar import helpers, db %> <%def name="body()"> +
+ @@ -18,10 +20,28 @@ %for comic in comics: <% - totaltracks = comic['Total'] - havetracks = comic['Have'] - if not havetracks: - havetracks = 0 + myDB = db.DBConnection() + issue = myDB.select("SELECT * FROM issues WHERE ComicID=?", [comic['ComicID']]) + wantedc = myDB.action("SELECT COUNT(*) as count FROM issues WHERE ComicID=? AND Status='Wanted'", [comic['ComicID']]).fetchone() + archedc = myDB.action("SELECT COUNT(*) as count FROM issues WHERE ComicID=? AND Status='Archived'", [comic['ComicID']]).fetchone() + totaltracks = comic['Total'] + havetracks = comic['Have'] + wants = wantedc[0] + arcs = archedc[0] + if not havetracks: + havetracks = 0 + if not wants: + wants = 0 + if not arcs: + arcs = 0 + try: + wantpercent = (wants*100.0)/totaltracks + if wantpercent > 100: + wantpercent = 100 + except (ZeroDivisionError, TypeError): + wantpercent = 0 + wants = '?' + try: percent = (havetracks*100.0)/totaltracks if percent > 100: @@ -34,7 +54,7 @@ if comic['Status'] == 'Paused': grade = 'X' elif comic['Status'] == 'Loading': - grade = 'L' + grade = 'L' elif comic['Status'] == 'Error': grade = 'Z' else: @@ -47,11 +67,12 @@ - - + + %endfor
${comic['ComicYear']} ${comic['Status']} # ${comic['LatestIssue']} (${comic['LatestDate']})
${havetracks}/${totaltracks}
${havetracks}/${totaltracks}
+
<%def name="headIncludes()"> @@ -62,40 +83,33 @@ diff --git a/data/interfaces/default/searchfix.html b/data/interfaces/default/searchfix.html index bda98a09..f78f5d6a 100755 --- a/data/interfaces/default/searchfix.html +++ b/data/interfaces/default/searchfix.html @@ -82,7 +82,7 @@
- +

${result['name']}${result['publisher']}${result['comicyear']}${result['publisher']}${result['comicyear']} ${result['issues']} Add this Comic
- - %if pullfilter is False: - - %endif - @@ -64,60 +44,21 @@ %if pullfilter is True: %if str(weekly['ISSUE']).isdigit() > 0: - - - - - + + + - %elif (weekly['STATUS'] == 'Wanted'): - [skip] - %else: - [retry][new] + add series %endif %endif - %elif pullfilter is False: - - - - %if str(weekly['ISSUE']).isdigit() > 0: - - %else: - - %endif - %if str(weekly['ISSUE']).isdigit() > 0: - - %else: - - %endif - %if str(weekly['ISSUE']).isdigit() > 0: - - %elif (weekly['STATUS'] == 'Wanted'): - [skip] - %else: - [retry][new] - %endif - %else: - %endfor
Publisher COMIC NumberTypeStatusSeries
${weekly['PUBLISHER']}${weekly['COMIC']} - ${weekly['ISSUE']}${weekly['STATUS']} + ${weekly['PUBLISHER']}${weekly['COMIC']}${weekly['ISSUE']}${weekly['STATUS']} %if weekly['STATUS'] == 'Skipped': - [want] - add series${weekly['PUBLISHER']}${weekly['COMIC']}${weekly['ISSUE']} ${weekly['ISSUE']}${weekly['STATUS']} - %if weekly['STATUS'] == 'Skipped': - [want] - add series${weekly['STATUS']} - %if weekly['STATUS'] == 'Skipped': - [want] - %elif (weekly['STATUS'] == 'Wanted'): - [skip] - %endif - %endif %endif
- + <%def name="headIncludes()"> @@ -129,42 +70,34 @@ + $(document).ready(function() { + initThisPage(); + }); + diff --git a/exceptions.csv b/exceptions.csv index 480f7c98..f3ecd4af 100644 --- a/exceptions.csv +++ b/exceptions.csv @@ -14,6 +14,8 @@ 2,2127,none,1570/7794/11288 #The Boys 1,18033,none,19531/25058 +#2000 A.D. +4,19752,none,11289/11295/11294/11292/11293 #-------- #-----Mismatched Names------ 99,3092,2605,none @@ -27,3 +29,4 @@ 99,42821,60934,none 99,42947,61242,none 99,42322,60917,none +99,53920,69871,none diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 05bd2d5d..a156585a 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -92,7 +92,7 @@ class PostProcessor(object): self._log("initiating pre script detection.", logger.DEBUG) self._log("mylar.PRE_SCRIPTS : " + mylar.PRE_SCRIPTS, logger.DEBUG) # for currentScriptName in mylar.PRE_SCRIPTS: - currentScriptName = mylar.PRE_SCRIPTS + currentScriptName = str(mylar.PRE_SCRIPTS).decode("string_escape") self._log("pre script detected...enabling: " + str(currentScriptName), logger.DEBUG) # generate a safe command line string to execute the script and provide all the parameters script_cmd = shlex.split(currentScriptName) + [str(nzb_name), str(nzb_folder), str(seriesmetadata)] @@ -117,7 +117,7 @@ class PostProcessor(object): self._log("initiating extra script detection.", logger.DEBUG) self._log("mylar.EXTRA_SCRIPTS : " + mylar.EXTRA_SCRIPTS, logger.DEBUG) # for curScriptName in mylar.EXTRA_SCRIPTS: - curScriptName = mylar.EXTRA_SCRIPTS + curScriptName = str(mylar.EXTRA_SCRIPTS).decode("string_escape") self._log("extra script detected...enabling: " + str(curScriptName), logger.DEBUG) # generate a safe command line string to execute the script and provide all the parameters script_cmd = shlex.split(curScriptName) + [str(nzb_name), str(nzb_folder), str(filen), str(folderp), str(seriesmetadata)] @@ -134,10 +134,11 @@ class PostProcessor(object): self._log(u"Unable to run extra_script: " + str(script_cmd)) -# def PostProcess(nzb_name, nzb_folder): def Process(self): self._log("nzb name: " + str(self.nzb_name), logger.DEBUG) self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG) + logger.fdebug("nzb name: " + str(self.nzb_name)) + logger.fdebug("nzb folder: " + str(self.nzb_folder)) #lookup nzb_name in nzblog table to get issueid #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals @@ -151,8 +152,8 @@ class PostProcessor(object): sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText - #logger.fdebug("sabreps:" + str(sabreps)) - + logger.fdebug("SAB Replace Spaces: " + str(sabreps)) + logger.fdebug("SAB Replace Dots: " + str(sabrepd)) myDB = db.DBConnection() nzbname = self.nzb_name @@ -168,21 +169,25 @@ class PostProcessor(object): nzbname = re.sub(' ', '.', str(nzbname)) nzbname = re.sub('[\,\:]', '', str(nzbname)) + logger.fdebug("After conversions, nzbname is : " + str(nzbname)) self._log("nzbname: " + str(nzbname), logger.DEBUG) nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG) + logger.fdebug("Failure - could not locate nzbfile initially.") # if failed on spaces, change it all to decimals and try again. nzbname = re.sub('_', '.', str(nzbname)) self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG) + logger.fdebug("trying again with nzbname of : " + str(nzbname)) nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone() if nzbiss is None: logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.") return else: self._log("I corrected and found the nzb as : " + str(nzbname)) + logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname)) issueid = nzbiss['IssueID'] else: issueid = nzbiss['IssueID'] @@ -200,6 +205,7 @@ class PostProcessor(object): issdec = int(iss_decval) issueno = str(iss) self._log("Issue Number: " + str(issueno), logger.DEBUG) + logger.fdebug("Issue Number: " + str(issueno)) else: if len(iss_decval) == 1: iss = iss_b4dec + "." + iss_decval @@ -209,6 +215,7 @@ class PostProcessor(object): issdec = int(iss_decval.rstrip('0')) * 10 issueno = iss_b4dec self._log("Issue Number: " + str(iss), logger.DEBUG) + logger.fdebug("Issue Number: " + str(iss)) # issue zero-suppression here if mylar.ZERO_LEVEL == "0": @@ -218,6 +225,8 @@ class PostProcessor(object): elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0" elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00" + logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N)) + if str(len(issueno)) > 1: if int(issueno) < 10: self._log("issue detected less than 10", logger.DEBUG) @@ -249,17 +258,23 @@ class PostProcessor(object): prettycomiss = str(issueno) self._log("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss), logger.DEBUG) + logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss)) issueyear = issuenzb['IssueDate'][:4] self._log("Issue Year: " + str(issueyear), logger.DEBUG) + logger.fdebug("Issue Year : " + str(issueyear)) comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() publisher = comicnzb['ComicPublisher'] self._log("Publisher: " + publisher, logger.DEBUG) + logger.fdebug("Publisher: " + str(publisher)) series = comicnzb['ComicName'] self._log("Series: " + series, logger.DEBUG) + logger.fdebug("Series: " + str(series)) seriesyear = comicnzb['ComicYear'] self._log("Year: " + seriesyear, logger.DEBUG) + logger.fdebug("Year: " + str(seriesyear)) comlocation = comicnzb['ComicLocation'] self._log("Comic Location: " + comlocation, logger.DEBUG) + logger.fdebug("Comic Location: " + str(comlocation)) #Run Pre-script @@ -301,8 +316,12 @@ class PostProcessor(object): path, ext = os.path.splitext(ofilename) self._log("Original Filename: " + ofilename, logger.DEBUG) self._log("Original Extension: " + ext, logger.DEBUG) + logger.fdebug("Original Filname: " + str(ofilename)) + logger.fdebug("Original Extension: " + str(ext)) + if mylar.FILE_FORMAT == '': self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG) + logger.fdebug("Rename Files isn't enabled - keeping original filename.") #check if extension is in nzb_name - will screw up otherwise if ofilename.lower().endswith(extensions): nfilename = ofilename[:-4] @@ -315,16 +334,20 @@ class PostProcessor(object): nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) nfilename = re.sub('[\,\:]', '', nfilename) self._log("New Filename: " + nfilename, logger.DEBUG) + logger.fdebug("New Filename: " + str(nfilename)) - src = self.nzb_folder + "/" + ofilename + src = os.path.join(self.nzb_folder, ofilename) if mylar.LOWERCASE_FILENAMES: dst = (comlocation + "/" + nfilename + ext).lower() else: dst = comlocation + "/" + nfilename + ext.lower() self._log("Source:" + src, logger.DEBUG) self._log("Destination:" + dst, logger.DEBUG) - os.rename(self.nzb_folder + "/" + ofilename, self.nzb_folder + "/" + nfilename + ext) - src = self.nzb_folder + "/" + nfilename + ext + logger.fdebug("Source: " + str(src)) + logger.fdebug("Destination: " + str(dst)) + + os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext))) + src = os.path.join(self.nzb_folder, str(nfilename + ext)) try: shutil.move(src, dst) except (OSError, IOError): diff --git a/mylar/__init__.py b/mylar/__init__.py index efb72f85..db86e426 100755 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -84,6 +84,10 @@ DESTINATION_DIR = None USENET_RETENTION = None ADD_COMICS = False +COMIC_DIR = None +LIBRARYSCAN = False +IMP_MOVE = False +IMP_RENAME = False SEARCH_INTERVAL = 360 NZB_STARTUP_SEARCH = False @@ -115,6 +119,8 @@ AUTOWANT_UPCOMING = True AUTOWANT_ALL = False COMIC_COVER_LOCAL = False ADD_TO_CSV = True +SKIPPED2WANTED = False +CVINFO = False SAB_HOST = None SAB_USERNAME = None @@ -211,11 +217,11 @@ def initialize(): HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, LAUNCH_BROWSER, GIT_PATH, \ CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, MUSIC_DIR, DESTINATION_DIR, \ DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, \ - LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, BLACKHOLE, BLACKHOLE_DIR, \ + LIBRARYSCAN, LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, \ NZBSU, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, NZBX,\ NEWZNAB, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_ENABLED, EXTRA_NEWZNABS,\ RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \ - PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, \ + PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, \ COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS if __INITIALIZED__: @@ -259,7 +265,12 @@ def initialize(): SEARCH_INTERVAL = check_setting_int(CFG, 'General', 'search_interval', 360) NZB_STARTUP_SEARCH = bool(check_setting_int(CFG, 'General', 'nzb_startup_search', 0)) + LIBRARYSCAN = bool(check_setting_int(CFG, 'General', 'libraryscan', 1)) LIBRARYSCAN_INTERVAL = check_setting_int(CFG, 'General', 'libraryscan_interval', 300) + ADD_COMICS = bool(check_setting_int(CFG, 'General', 'add_comics', 0)) + COMIC_DIR = check_setting_str(CFG, 'General', 'comic_dir', '') + IMP_MOVE = bool(check_setting_int(CFG, 'General', 'imp_move', 0)) + IMP_RENAME = bool(check_setting_int(CFG, 'General', 'imp_rename', 0)) DOWNLOAD_SCAN_INTERVAL = check_setting_int(CFG, 'General', 'download_scan_interval', 5) INTERFACE = check_setting_str(CFG, 'General', 'interface', 'default') AUTOWANT_ALL = bool(check_setting_int(CFG, 'General', 'autowant_all', 0)) @@ -283,7 +294,7 @@ def initialize(): USE_MAXSIZE = bool(check_setting_int(CFG, 'General', 'use_maxsize', 0)) MAXSIZE = check_setting_str(CFG, 'General', 'maxsize', '') ADD_TO_CSV = bool(check_setting_int(CFG, 'General', 'add_to_csv', 1)) - + CVINFO = bool(check_setting_int(CFG, 'General', 'cvinfo', 0)) ENABLE_EXTRA_SCRIPTS = bool(check_setting_int(CFG, 'General', 'enable_extra_scripts', 0)) EXTRA_SCRIPTS = check_setting_str(CFG, 'General', 'extra_scripts', '') @@ -506,7 +517,12 @@ def config_write(): new_config['General']['search_interval'] = SEARCH_INTERVAL new_config['General']['nzb_startup_search'] = int(NZB_STARTUP_SEARCH) + new_config['General']['libraryscan'] = int(LIBRARYSCAN) new_config['General']['libraryscan_interval'] = LIBRARYSCAN_INTERVAL + new_config['General']['add_comics'] = int(ADD_COMICS) + new_config['General']['comic_dir'] = COMIC_DIR + new_config['General']['imp_move'] = int(IMP_MOVE) + new_config['General']['imp_rename'] = int(IMP_RENAME) new_config['General']['download_scan_interval'] = DOWNLOAD_SCAN_INTERVAL new_config['General']['interface'] = INTERFACE new_config['General']['autowant_all'] = int(AUTOWANT_ALL) @@ -530,7 +546,7 @@ def config_write(): new_config['General']['use_maxsize'] = int(USE_MAXSIZE) new_config['General']['maxsize'] = MAXSIZE new_config['General']['add_to_csv'] = int(ADD_TO_CSV) - + new_config['General']['cvinfo'] = int(CVINFO) new_config['General']['enable_extra_scripts'] = int(ENABLE_EXTRA_SCRIPTS) new_config['General']['extra_scripts'] = EXTRA_SCRIPTS new_config['General']['enable_pre_scripts'] = int(ENABLE_PRE_SCRIPTS) @@ -626,7 +642,7 @@ def dbcheck(): c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT)') c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE text, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text)') # c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)') - + c.execute('CREATE TABLE IF NOT EXISTS importresults (ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT)') conn.commit c.close #new @@ -672,8 +688,8 @@ def dbcheck(): except sqlite3.OperationalError: c.execute('ALTER TABLE comics ADD COLUMN UseFuzzy TEXT') - #let's delete errant comics that are stranded (ie. None) - c.execute("DELETE from COMICS WHERE ComicName='None'") + #let's delete errant comics that are stranded (ie. Comicname = Comic ID: ) + c.execute("DELETE from COMICS WHERE ComicName='None' OR ComicName LIKE 'Comic ID%'") logger.info(u"Ensuring DB integrity - Removing all Erroneous Comics (ie. named None)") conn.commit() diff --git a/mylar/helpers.py b/mylar/helpers.py index 6443fad4..b0ca8ac1 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -183,3 +183,132 @@ def decimal_issue(iss): issdec = int(iss_decval.rstrip('0')) * 10 deciss = (int(iss_b4dec) * 1000) + issdec return deciss + +def rename_param(comicid, comicname, comicyear, issue, issueid=None): + myDB = db.DBConnection() + if issueid is None: + chkissue = myDB.action("SELECT * from issues WHERE ComicID=? AND Issue_Number=?", [comicid, issue]).fetchone() + if chkissue is None: + logger.error("Invalid Issue_Number - please validate.") + return + else: + issueid = chkissue['IssueID'] + + #use issueid to get publisher, series, year, issue number + issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone() + #comicid = issuenzb['ComicID'] + issuenum = issuenzb['Issue_Number'] + #issueno = str(issuenum).split('.')[0] + + iss_find = issuenum.find('.') + iss_b4dec = issuenum[:iss_find] + iss_decval = issuenum[iss_find+1:] + if int(iss_decval) == 0: + iss = iss_b4dec + issdec = int(iss_decval) + issueno = str(iss) + logger.fdebug("Issue Number: " + str(issueno)) + else: + if len(iss_decval) == 1: + iss = iss_b4dec + "." + iss_decval + issdec = int(iss_decval) * 10 + else: + iss = iss_b4dec + "." + iss_decval.rstrip('0') + issdec = int(iss_decval.rstrip('0')) * 10 + issueno = iss_b4dec + logger.fdebug("Issue Number: " + str(iss)) + + # issue zero-suppression here + if mylar.ZERO_LEVEL == "0": + zeroadd = "" + else: + if mylar.ZERO_LEVEL_N == "none": zeroadd = "" + elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0" + elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00" + + logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N)) + + if str(len(issueno)) > 1: + if int(issueno) < 10: + logger.fdebug("issue detected less than 10") + if int(iss_decval) > 0: + issueno = str(iss) + prettycomiss = str(zeroadd) + str(iss) + else: + prettycomiss = str(zeroadd) + str(int(issueno)) + logger.fdebug("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss)) + elif int(issueno) >= 10 and int(issueno) < 100: + logger.fdebug("issue detected greater than 10, but less than 100") + if mylar.ZERO_LEVEL_N == "none": + zeroadd = "" + else: + zeroadd = "0" + if int(iss_decval) > 0: + issueno = str(iss) + prettycomiss = str(zeroadd) + str(iss) + else: + prettycomiss = str(zeroadd) + str(int(issueno)) + logger.fdebug("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss)) + else: + logger.fdebug("issue detected greater than 100") + if int(iss_decval) > 0: + issueno = str(iss) + prettycomiss = str(issueno) + logger.fdebug("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss)) + else: + prettycomiss = str(issueno) + logger.fdebug("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss)) + + logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss)) + issueyear = issuenzb['IssueDate'][:4] + logger.fdebug("Issue Year : " + str(issueyear)) + comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() + publisher = comicnzb['ComicPublisher'] + logger.fdebug("Publisher: " + str(publisher)) + series = comicnzb['ComicName'] + logger.fdebug("Series: " + str(series)) + seriesyear = comicnzb['ComicYear'] + logger.fdebug("Year: " + str(seriesyear)) + comlocation = comicnzb['ComicLocation'] + logger.fdebug("Comic Location: " + str(comlocation)) + + file_values = {'$Series': series, + '$Issue': prettycomiss, + '$Year': issueyear, + '$series': series.lower(), + '$Publisher': publisher, + '$publisher': publisher.lower(), + '$Volume': seriesyear + } + + extensions = ('.cbr', '.cbz') + + if mylar.FILE_FORMAT == '': + self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG) + logger.fdebug("Rename Files isn't enabled - keeping original filename.") + #check if extension is in nzb_name - will screw up otherwise + if ofilename.lower().endswith(extensions): + nfilename = ofilename[:-4] + else: + nfilename = ofilename + else: + nfilename = helpers.replace_all(mylar.FILE_FORMAT, file_values) + if mylar.REPLACE_SPACES: + #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot + nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) + nfilename = re.sub('[\,\:]', '', nfilename) + logger.fdebug("New Filename: " + str(nfilename)) + + if mylar.LOWERCASE_FILENAMES: + dst = (comlocation + "/" + nfilename + ext).lower() + else: + dst = comlocation + "/" + nfilename + ext.lower() + logger.fdebug("Source: " + str(src)) + logger.fdebug("Destination: " + str(dst)) + + rename_this = { "destination_dir" : dst, + "nfilename" : nfilename, + "issueid" : issueid, + "comicid" : comicid } + + return rename_this diff --git a/mylar/importer.py b/mylar/importer.py index 4814075c..042c0c33 100755 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -343,6 +343,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None): #print ("Existing status : " + str(iss_exists['Status'])) newValueDict['Status'] = iss_exists['Status'] + #logger.fdebug("newValueDict:" + str(newValueDict)) + myDB.upsert("issues", newValueDict, controlValueDict) n+=1 @@ -363,6 +365,11 @@ def addComictoDB(comicid,mismatch=None,pullupd=None): } myDB.upsert("comics", newValueStat, controlValueStat) + + if mylar.CVINFO: + if not os.path.exists(comlocation + "/cvinfo"): + with open(comlocation + "/cvinfo","w") as text_file: + text_file.write("http://www.comicvine.com/" + str(comic['ComicName']).replace(" ", "-") + "/49-" + str(comicid)) logger.info(u"Updating complete for: " + comic['ComicName']) @@ -657,6 +664,11 @@ def GCDimport(gcomicid, pullupd=None): myDB.upsert("comics", newValueStat, controlValueStat) + if mylar.CVINFO: + if not os.path.exists(comlocation + "/cvinfo"): + with open(comlocation + "/cvinfo","w") as text_file: + text_file.write("http://www.comicvine.com/" + str(comic['ComicName']).replace(" ", "-") + "/49-" + str(comicid)) + logger.info(u"Updating complete for: " + ComicName) if pullupd is None: diff --git a/mylar/librarysync.py b/mylar/librarysync.py new file mode 100644 index 00000000..e0aa767b --- /dev/null +++ b/mylar/librarysync.py @@ -0,0 +1,444 @@ +# This file is part of Mylar. +# +# Mylar is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Mylar is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Mylar. If not, see . + +from __future__ import with_statement + +import os +import glob +import re +import shutil + +import mylar +from mylar import db, logger, helpers, importer, updater + +# You can scan a single directory and append it to the current library by specifying append=True +def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None): + + if cron and not mylar.LIBRARYSCAN: + return + + if not dir: + dir = mylar.COMIC_DIR + + # If we're appending a dir, it's coming from the post processor which is + # already bytestring + if not append: + dir = dir.encode(mylar.SYS_ENCODING) + + if not os.path.isdir(dir): + logger.warn('Cannot find directory: %s. Not scanning' % dir.decode(mylar.SYS_ENCODING, 'replace')) + return + + + logger.info('Scanning comic directory: %s' % dir.decode(mylar.SYS_ENCODING, 'replace')) + + basedir = dir + + watchmatch = {} + comic_list = [] + comiccnt = 0 + extensions = ('cbr','cbz') + for r,d,f in os.walk(dir): + #for directory in d[:]: + # if directory.startswith("."): + # d.remove(directory) + for files in f: + if any(files.lower().endswith('.' + x.lower()) for x in extensions): + comic = files + comicpath = os.path.join(r, files) + comicsize = os.path.getsize(comicpath) + print "Comic: " + comic + print "Comic Path: " + comicpath + print "Comic Size: " + str(comicsize) + + # We need the unicode path to use for logging, inserting into database + unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING, 'replace') + + comiccnt+=1 + comic_dict = { 'ComicFilename': comic, + 'ComicLocation': comicpath, + 'ComicSize': comicsize, + 'Unicode_ComicLocation': unicode_comic_path } + comic_list.append(comic_dict) + + logger.info("I've found a total of " + str(comiccnt) + " comics....analyzing now") + + myDB = db.DBConnection() + + #let's load in the watchlist to see if we have any matches. + logger.info("loading in the watchlist to see if a series is being watched already...") + watchlist = myDB.action("SELECT * from comics") + ComicName = [] + ComicYear = [] + ComicPublisher = [] + ComicTotal = [] + ComicID = [] + ComicLocation = [] + + AltName = [] + watchcnt = 0 + + watch_kchoice = [] + watchchoice = {} + import_by_comicids = [] + import_comicids = {} + + for watch in watchlist: + # let's clean up the name, just in case for comparison purposes... + watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', str(watch['ComicName'])) + #watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip() + alt_chk = "no" # alt-checker flag (default to no) + + # account for alternate names as well + if watch['AlternateSearch'] is not None and watch['AlternateSearch'] is not 'None': + altcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', str(watch['AlternateSearch'])) + #altcomic = re.sub('\s+', ' ', str(altcomic)).strip() + AltName.append(altcomic) + alt_chk = "yes" # alt-checker flag + + ComicName.append(watchcomic) + ComicYear.append(watch['ComicYear']) + ComicPublisher.append(watch['ComicPublisher']) + ComicTotal.append(watch['Total']) + ComicID.append(watch['ComicID']) + ComicLocation.append(watch['ComicLocation']) + watchcnt+=1 + + logger.info("Successfully loaded " + str(watchcnt) + " series from your watchlist.") + + ripperlist=['digital-', + 'empire', + 'dcp'] + + watchfound = 0 + + for i in comic_list: + #print i['ComicFilename'] + + comfilename = i['ComicFilename'] + comlocation = i['ComicLocation'] + #let's clean up the filename for matching purposes + + cfilename = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', str(comfilename)) + #cfilename = re.sub('\s+', ' ', str(cfilename)).strip() + + cm_cn = 0 + + #we need to track the counter to make sure we are comparing the right array parts + #this takes care of the brackets :) + m = re.findall('[^()]+', cfilename) + lenm = len(m) + print ("there are " + str(lenm) + " words.") + cnt = 0 + yearmatch = "false" + foundonwatch = "False" + + while (cnt < lenm): + if m[cnt] is None: break + if m[cnt] == ' ': + pass + else: + logger.fdebug(str(cnt) + ". Bracket Word: " + str(m[cnt])) + if cnt == 0: + comic_andiss = m[cnt] + logger.fdebug("Comic: " + str(comic_andiss)) + if m[cnt][:-2] == '19' or m[cnt][:-2] == '20': + logger.fdebug("year detected: " + str(m[cnt])) + result_comyear = m[cnt] + yearmatch = "true" + # if str(comyear) in result_comyear: + # logger.fdebug(str(comyear) + " - right years match baby!") + # yearmatch = "true" + # else: + # logger.fdebug(str(comyear) + " - not right - years do not match") + # yearmatch = "false" + #let's do this hear and save a few extra loops ;) + if 'digital' in m[cnt] and len(m[cnt]) == 7: + logger.fdebug("digital edition detected") + pass + if ' of ' in m[cnt]: + logger.fdebug("mini-series detected : " + str(m[cnt])) + result_of = m[cnt] + if 'cover' in m[cnt]: + logger.fdebug("covers detected: " + str(m[cnt])) + result_comcovers = m[cnt] + for ripper in ripperlist: + if ripper in m[cnt]: + logger.fdebug("Scanner detected: " + str(m[cnt])) + result_comscanner = m[cnt] + cnt+=1 + + if yearmatch == "false": + logger.fdebug("failed to match...skipping.") + break + splitit = [] + watchcomic_split = [] + logger.fdebug("filename comic and issue: " + str(cfilename)) + #changed this from '' to ' ' + comic_iss_b4 = re.sub('[\-\:\,]', ' ', str(comic_andiss)) + comic_iss = comic_iss_b4.replace('.',' ') + logger.fdebug("adjusted comic and issue: " + str(comic_iss)) + splitit = comic_iss.split(None) + logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss)) + #bmm = re.findall('v\d', comic_iss) + #if len(bmm) > 0: splitst = len(splitit) - 2 + #else: splitst = len(splitit) - 1 + #----- + #here we cycle through the Watchlist looking for a match. + while (cm_cn < watchcnt): + #setup the watchlist + comname = ComicName[cm_cn] + print ("watch_comic:" + str(comname)) + comyear = ComicYear[cm_cn] + compub = ComicPublisher[cm_cn] + comtotal = ComicTotal[cm_cn] + comicid = ComicID[cm_cn] + watch_location = ComicLocation[cm_cn] + + if splitit[(len(splitit)-1)].isdigit(): + #compares - if the last digit and second last digit are #'s seperated by spaces assume decimal + comic_iss = splitit[(len(splitit)-1)] + splitst = len(splitit) - 1 + if splitit[(len(splitit)-2)].isdigit(): + # for series that have a digit at the end, it screws up the logistics. + i = 1 + chg_comic = splitit[0] + while (i < (len(splitit)-1)): + chg_comic = chg_comic + " " + splitit[i] + i+=1 + logger.fdebug("chg_comic:" + str(chg_comic)) + if chg_comic.upper() == comname.upper(): + logger.fdebug("series contains numerics...adjusting..") + else: + changeup = "." + splitit[(len(splitit)-1)] + logger.fdebug("changeup to decimal: " + str(changeup)) + comic_iss = splitit[(len(splitit)-2)] + "." + comic_iss + splitst = len(splitit) - 2 + else: + # if the nzb name doesn't follow the series-issue-year format even closely..ignore nzb + logger.fdebug("invalid naming format of filename detected - cannot properly determine issue") + continue + + # make sure that things like - in watchcomic are accounted for when comparing to nzb. + watchcomic_split = helpers.cleanName(str(comname)) + watchcomic_split = re.sub('[\-\:\,\.]', ' ', watchcomic_split).split(None) + + logger.fdebug(str(splitit) + " file series word count: " + str(splitst)) + logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split))) + if (splitst) != len(watchcomic_split): + logger.fdebug("incorrect comic lengths...not a match") + if str(splitit[0]).lower() == "the": + logger.fdebug("THE word detected...attempting to adjust pattern matching") + splitit[0] = splitit[4:] + else: + logger.fdebug("length match..proceeding") + n = 0 + scount = 0 + logger.fdebug("search-length: " + str(splitst)) + logger.fdebug("Watchlist-length: " + str(len(watchcomic_split))) + while ( n <= (splitst)-1 ): + logger.fdebug("splitit: " + str(splitit[n])) + if n < (splitst) and n < len(watchcomic_split): + logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n])) + if '+' in watchcomic_split[n]: + watchcomic_split[n] = re.sub('+', '', str(watchcomic_split[n])) + if str(watchcomic_split[n].lower()) in str(splitit[n].lower()) and len(watchcomic_split[n]) >= len(splitit[n]): + logger.fdebug("word matched on : " + str(splitit[n])) + scount+=1 + #elif ':' in splitit[n] or '-' in splitit[n]: + # splitrep = splitit[n].replace('-', '') + # print ("non-character keyword...skipped on " + splitit[n]) + elif str(splitit[n].lower()).startswith('v'): + logger.fdebug("possible versioning..checking") + #we hit a versioning # - account for it + if splitit[n][1:].isdigit(): + comicversion = str(splitit[n]) + logger.fdebug("version found: " + str(comicversion)) + else: + logger.fdebug("Comic / Issue section") + if splitit[n].isdigit(): + logger.fdebug("issue detected") + #comiss = splitit[n] + comicNAMER = n - 1 + com_NAME = splitit[0] + cmnam = 1 + while (cmnam <= comicNAMER): + com_NAME = str(com_NAME) + " " + str(splitit[cmnam]) + cmnam+=1 + logger.fdebug("comic: " + str(com_NAME)) + else: + logger.fdebug("non-match for: "+ str(splitit[n])) + pass + n+=1 + #set the match threshold to 80% (for now) + # if it's less than 80% consider it a non-match and discard. + #splitit has to splitit-1 because last position is issue. + wordcnt = int(scount) + logger.fdebug("scount:" + str(wordcnt)) + totalcnt = int(splitst) + logger.fdebug("splitit-len:" + str(totalcnt)) + spercent = (wordcnt/totalcnt) * 100 + logger.fdebug("we got " + str(spercent) + " percent.") + if int(spercent) >= 80: + logger.fdebug("it's a go captain... - we matched " + str(spercent) + "%!") + logger.fdebug("this should be a match!") + if '.' in comic_iss: + comisschk_find = comic_iss.find('.') + comisschk_b4dec = comic_iss[:comisschk_find] + comisschk_decval = comic_iss[comisschk_find+1:] + logger.fdebug("Found IssueNumber: " + str(comic_iss)) + logger.fdebug("..before decimal: " + str(comisschk_b4dec)) + logger.fdebug("...after decimal: " + str(comisschk_decval)) + #--let's make sure we don't wipe out decimal issues ;) + if int(comisschk_decval) == 0: + ciss = comisschk_b4dec + cintdec = int(comisschk_decval) + else: + if len(comisschk_decval) == 1: + ciss = comisschk_b4dec + "." + comisschk_decval + cintdec = int(comisschk_decval) * 10 + else: + ciss = comisschk_b4dec + "." + comisschk_decval.rstrip('0') + cintdec = int(comisschk_decval.rstrip('0')) * 10 + comintIss = (int(comisschk_b4dec) * 1000) + cintdec + else: + comintIss = int(comic_iss) * 1000 + logger.fdebug("issue we found for is : " + str(comic_iss)) + + #issue comparison now as well + logger.info(u"Found " + str(comname) + " (" + str(comyear) + ") issue: " + str(comic_iss)) + watchfound+=1 +# updater.forceRescan(ComicID=comicid) +# if not any(d.get('ComicID', None) == str(comicid) for d in watch_kchoice): + watch_kchoice.append({ + "ComicID": str(comicid), + "ComicName": str(comname), + "ComicYear": str(comyear), + "ComicIssue": str(int(comic_iss)), + "ComicLocation": str(watch_location), + "OriginalLocation" : str(comlocation), + "OriginalFilename" : str(comfilename) + }) + foundonwatch = "True" + break + elif int(spercent) < 80: + logger.fdebug("failure - we only got " + str(spercent) + "% right!") + cm_cn+=1 + + if foundonwatch == "False": + #---if it's not a match - send it to the importer. + n = 0 + csplit = comic_andiss.split(None) + while ( n <= (len(csplit)-1) ): + if csplit[n].isdigit(): + logger.fdebug("issue detected") + #comiss = splitit[n] + comicNAMER = n - 1 + com_NAME = csplit[0] + cmnam = 1 + while (cmnam <= comicNAMER): + com_NAME = str(com_NAME) + " " + str(csplit[cmnam]) + cmnam+=1 + logger.fdebug("comic: " + str(com_NAME)) + n+=1 + + print ("adding " + str(com_NAME) + " to the import-queue!") + import_by_comicids.append({ + "comicname" : com_NAME, + "comicyear" : result_comyear, + "comfilename" : comfilename, + "comlocation" : comlocation + }) + + if len(watch_kchoice) > 0: + watchchoice['watchlist'] = watch_kchoice + print ("watchchoice: " + str(watchchoice)) + + logger.info("I have found " + str(watchfound) + " out of " + str(comiccnt) + " comics for series that are being watched.") + wat = 0 + comicids = [] + + if watchfound > 0: + if mylar.IMP_MOVE: + logger.info("You checked off Move Files...so that's what I'm going to do") + #check to see if Move Files is enabled. + #if not being moved, set the archive bit. + print("Moving files into appropriate directory") + while (wat < watchfound): + watch_the_list = watchchoice['watchlist'][wat] + watch_comlocation = watch_the_list['ComicLocation'] + watch_comicid = watch_the_list['ComicID'] + watch_comicname = watch_the_list['ComicName'] + watch_comicyear = watch_the_list['ComicYear'] + watch_comiciss = watch_the_list['ComicIssue'] + print ("ComicLocation: " + str(watch_comlocation)) + orig_comlocation = watch_the_list['OriginalLocation'] + orig_filename = watch_the_list['OriginalFilename'] + print ("Orig. Location: " + str(orig_comlocation)) + print ("Orig. Filename: " + str(orig_filename)) + #before moving check to see if Rename to Mylar structure is enabled. + if mylar.IMP_RENAME: + print("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT)) + renameit = helpers.rename_param(watch_comicid, watch_comicname, watch_comicyear, watch_comiciss) + nfilename = renameit['nfilename'] + + dst_path = os.path.join(watch_comlocation,nfilename) + if str(watch_comicid) not in comicids: + comicids.append(watch_comicid) + else: + print("Renaming files not enabled, keeping original filename(s)") + dst_path = os.path.join(watch_comlocation,orig_filename) + + #os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext))) + #src = os.path.join(, str(nfilename + ext)) + print ("I'm going to move " + str(orig_comlocation) + " to .." + str(dst_path)) + try: + shutil.move(orig_comlocation, dst_path) + except (OSError, IOError): + logger.info("Failed to move directory - check directories and manually re-run.") + wat+=1 + else: + # if move files isn't enabled, let's set all found comics to Archive status :) + while (wat < watchfound): + watch_the_list = watchchoice['watchlist'][wat] + watch_comicid = watch_the_list['ComicID'] + watch_issue = watch_the_list['ComicIssue'] + print ("ComicID: " + str(watch_comicid)) + print ("Issue#: " + str(watch_issue)) + issuechk = myDB.action("SELECT * from issues where ComicID=? AND INT_IssueNumber=?", [watch_comicid, watch_issue]).fetchone() + if issuechk is None: + print ("no matching issues for this comic#") + else: + print("...Existing status: " + str(issuechk['Status'])) + control = {"IssueID": issuechk['IssueID']} + values = { "Status": "Archived"} + print ("...changing status of " + str(issuechk['Issue_Number']) + " to Archived ") + myDB.upsert("issues", values, control) + if str(watch_comicid) not in comicids: + comicids.append(watch_comicid) + wat+=1 + if comicids is None: pass + else: + c_upd = len(comicids) + c = 0 + while (c < c_upd ): + print ("Rescanning.. " + str(c)) + updater.forceRescan(c) + if not len(import_by_comicids): + return "Completed" + if len(import_by_comicids) > 0: + import_comicids['comic_info'] = import_by_comicids + print ("import comicids: " + str(import_by_comicids)) + return import_comicids diff --git a/mylar/parseit.py b/mylar/parseit.py index 904f0270..961973ce 100755 --- a/mylar/parseit.py +++ b/mylar/parseit.py @@ -14,16 +14,17 @@ # along with Mylar. If not, see . -from bs4 import BeautifulSoup +from bs4 import BeautifulSoup, UnicodeDammit import urllib2 import re import helpers import logger import datetime +import sys from decimal import Decimal from HTMLParser import HTMLParseError -def GCDScraper(ComicName, ComicYear, Total, ComicID): +def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None): NOWyr = datetime.date.today().year if datetime.date.today().month == 12: NOWyr = NOWyr + 1 @@ -148,8 +149,11 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID): if 'and' in ComicName.lower(): ComicName = ComicName.replace('and', '&') return GCDScraper(ComicName, ComicYear, Total, ComicID) - return 'No Match' + if not quickmatch: return 'No Match' #vari_loop = 0 + if quickmatch == "yes": + if resultURL is None: return 'No Match' + else: return 'Match' return GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=ComicID, TotalIssues=TotalIssues, issvariation=issvariation, resultPublished=resultPublished) @@ -179,8 +183,17 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati # let's pull down the publication date as it'll be blank otherwise inputMIS = 'http://www.comics.org' + str(resultURL) resp = urllib2.urlopen ( inputMIS ) - soup = BeautifulSoup ( resp ) - +# soup = BeautifulSoup ( resp ) + try: + soup = BeautifulSoup(urllib2.urlopen(inputMIS)) + except UnicodeDecodeError: + logger.info("I've detected your system is using: " + sys.stdout.encoding) + logger.info("unable to parse properly due to utf-8 problem, ignoring wrong symbols") + try: + soup = BeautifulSoup(urllib2.urlopen(inputMIS)).decode('utf-8', 'ignore') + except UnicodeDecodeError: + logger.info("not working...aborting. Tell Evilhero.") + return parsed = soup.find("div", {"id" : "series_data"}) subtxt3 = parsed.find("dd", {"id" : "publication_dates"}) resultPublished = subtxt3.findNext(text=True).rstrip() @@ -195,8 +208,8 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati #print ("resultURL:" + str(resultURL)) #print ("comicID:" + str(ComicID)) input2 = 'http://www.comics.org' + str(resultURL) + 'details/' - resp = urllib2.urlopen ( input2 ) - soup = BeautifulSoup ( resp ) + resp = urllib2.urlopen(input2) + soup = BeautifulSoup(resp) #for newer comics, on-sale date has complete date... #for older comics, pub.date is to be used @@ -239,11 +252,16 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati #print ( "ID: " + str(resultID) ) if ',' in ParseIssue: ParseIssue = re.sub("\,", "", ParseIssue) + #print ("ParseIssue before : " + str(ParseIssue)) + if 'Vol' in ParseIssue or '[' in ParseIssue: + ParseIssue = re.sub("[^0-9]", "", ParseIssue) isslen = ParseIssue.find(' ') #if 'isslen' exists, it means that it's an alternative cover. #however, if ONLY alternate covers exist of an issue it won't work. #let's use the FIRST record, and ignore all other covers for the given issue. isschk = ParseIssue[:isslen] + #print ("Parse is now: " + str(isschk)) + #check if decimal or '1/2' exists or not, and store decimal results halfchk = "no" if '.' in isschk: @@ -309,6 +327,7 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati ParseDate = "0000-00-00" #ParseDate = ParseDate.replace('?','') ParseDate = ParseDate.replace(' ','') + #print "Parse date: " + str(ParseDate) gcdinfo['ComicDate'] = ParseDate #^^ will retrieve date # if not any(d.get('GCDIssue', None) == str(gcdinfo['ComicIssue']) for d in gcdchoice): @@ -455,11 +474,11 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID): comicis = Total comicid = ComicID comicpub = ComicPublisher - print ( "comicname: " + str(comicnm) ) - print ( "comicyear: " + str(comicyr) ) - print ( "comichave: " + str(comicis) ) - print ( "comicpub: " + str(comicpub) ) - print ( "comicid: " + str(comicid) ) + #print ( "comicname: " + str(comicnm) ) + #print ( "comicyear: " + str(comicyr) ) + #print ( "comichave: " + str(comicis) ) + #print ( "comicpub: " + str(comicpub) ) + #print ( "comicid: " + str(comicid) ) # do 3 runs at the comics.org search to get the best results comicrun = [] # &pub_name=DC @@ -574,3 +593,11 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID): comchoice['comchkchoice'] = comchkchoice return comchoice, totalcount +def decode_html(html_string): + converted = UnicodeDammit(html_string, isHTML=True) + if not converted.unicode: + raise UnicodeDecodeError( + "Failed to detect encoding, tried [%s]", + ', '.join(converted.triedEncodings)) + # print converted.originalEncoding + return converted.unicode diff --git a/mylar/search.py b/mylar/search.py index 7af39860..f2c8a98d 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -362,6 +362,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is logger.fdebug("Entry: " + str(thisentry)) cleantitle = re.sub('[_/.]', ' ', str(entry['title'])) cleantitle = helpers.cleanName(str(cleantitle)) + nzbname = cleantitle logger.fdebug("Cleantitle: " + str(cleantitle)) @@ -482,7 +483,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is #else: splitst = len(splitit) - 1 # make sure that things like - in watchcomic are accounted for when comparing to nzb. - watchcomic_split = re.sub('[\-\:\,\.]', ' ', findcomic[findloop]).split(None) + watchcomic_split = helpers.cleanName(str(findcomic[findloop])) + watchcomic_split = re.sub('[\-\:\,\.]', ' ', watchcomic_split).split(None) logger.fdebug(str(splitit) + " nzb series word count: " + str(splitst)) logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split))) @@ -503,7 +505,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n])) if '+' in watchcomic_split[n]: watchcomic_split[n] = re.sub('+', '', str(watchcomic_split[n])) - if str(watchcomic_split[n].lower()) in str(splitit[n].lower()): + if str(watchcomic_split[n].lower()) in str(splitit[n].lower()) and len(watchcomic_split[n]) >= len(splitit[n]): logger.fdebug("word matched on : " + str(splitit[n])) scount+=1 #elif ':' in splitit[n] or '-' in splitit[n]: @@ -607,6 +609,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is urllib.urlretrieve(linkapi, str(mylar.BLACKHOLE_DIR) + str(filenamenzb)) logger.fdebug("filename saved to your blackhole as : " + str(filenamenzb)) logger.info(u"Successfully sent .nzb to your Blackhole directory : " + str(mylar.BLACKHOLE_DIR) + str(filenamenzb) ) + nzbname = filenamenzb[:-4] + logger.fdebug("nzb name to be used for post-processing is : " + str(nzbname)) #end blackhole else: @@ -627,12 +631,43 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is logger.fdebug("link to retrieve via api:" + str(linkapi)) + #let's change all space to decimals for simplicity + nzbname = re.sub(" ", ".", str(entry['title'])) + nzbname = re.sub('[\,\:]', '', str(nzbname)) + extensions = ('.cbr', '.cbz') + + if nzbname.lower().endswith(extensions): + fd, ext = os.path.splitext(nzbname) + logger.fdebug("Removed extension from nzb: " + ext) + nzbname = re.sub(str(ext), '', str(nzbname)) + + logger.fdebug("nzbname used for post-processing:" + str(nzbname)) #we need to change the nzbx string now to allow for the nzbname rename. if nzbprov == 'nzbx': nzbxlink_st = linkapi.find("*|*") linkapi = linkapi[:(nzbxlink_st + 3)] + str(nzbname) logger.fdebug("new linkapi (this should =nzbname) :" + str(linkapi)) + +# #test nzb.get +# if mylar.NZBGET: +# from xmlrpclib import ServerProxy +# if mylar.NZBGET_HOST[:4] = 'http': +# tmpapi = "http://" +# nzbget_host = mylar.NZBGET_HOST[7] +# elif mylar.NZBGET_HOST[:5] = 'https': +# tmpapi = "https://" +# nzbget_host = mylar.NZBGET_HOST[8] +# tmpapi = tmpapi + str(mylar.NZBGET_USERNAME) + ":" + str(mylar.NZBGET_PASSWORD) +# tmpapi = tmpapi + "@" + nzbget_host + ":" + str(mylar.NZBGET_PORT) + "/xmlrpc" +# server = ServerProxy(tmpapi) +# send_to_nzbget = server.appendurl(nzbname, mylar.NZBGET_CATEGORY, mylar.NZBGET_PRIORITY, True, str(linkapi)) +# if send_to_nzbget is True: +# logger.info("Successfully sent nzb to NZBGet!") +# else: +# logger.info("Unable to send nzb to NZBGet - check your configs.") +# #end nzb.get test + # let's build the send-to-SAB string now: tmpapi = str(mylar.SAB_HOST) logger.fdebug("send-to-SAB host string: " + str(tmpapi)) @@ -673,22 +708,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is # logger.info(u"Removed temporary save file") #raise an exception to break out of loop - #let's change all space to decimals for simplicity - if mylar.BLACKHOLE: - bhole_cname = re.sub('[/:/,\/]', '', str(ComicName)) - nzbname = str(re.sub(" ", ".", str(bhole_cname))) + "." + str(IssueNumber) + ".(" + str(comyear) + ")" - else: - nzbname = re.sub(" ", ".", str(entry['title'])) - nzbname = re.sub('[\,\:]', '', str(nzbname)) - extensions = ('.cbr', '.cbz') - - if nzbname.lower().endswith(extensions): - fd, ext = os.path.splitext(nzbname) - logger.fdebug("Removed extension from nzb: " + ext) - nzbname = re.sub(str(ext), '', str(nzbname)) - - - logger.fdebug("nzbname used for post-processing:" + str(nzbname)) foundc = "yes" done = True @@ -728,7 +747,7 @@ def searchforissue(issueid=None, new=False): new = True for result in results: - comic = myDB.action('SELECT * from comics WHERE ComicID=?', [result['ComicID']]).fetchone() + comic = myDB.action("SELECT * from comics WHERE ComicID=? AND ComicName != 'None'", [result['ComicID']]).fetchone() foundNZB = "none" SeriesYear = comic['ComicYear'] AlternateSearch = comic['AlternateSearch'] diff --git a/mylar/webserve.py b/mylar/webserve.py index ec837306..1c9f0821 100755 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -28,10 +28,11 @@ import time import threading import csv import platform +import Queue import mylar -from mylar import logger, db, importer, mb, search, filechecker, helpers, updater, parseit, weeklypull, PostProcessor, version +from mylar import logger, db, importer, mb, search, filechecker, helpers, updater, parseit, weeklypull, PostProcessor, version, librarysync #from mylar.helpers import checked, radio, today import lib.simplejson as simplejson @@ -70,12 +71,14 @@ class WebInterface(object): if comic is None: raise cherrypy.HTTPRedirect("home") usethefuzzy = comic['UseFuzzy'] + skipped2wanted = "0" if usethefuzzy is None: usethefuzzy = "0" comicConfig = { "comiclocation" : mylar.COMIC_LOCATION, "fuzzy_year0" : helpers.radio(int(usethefuzzy), 0), "fuzzy_year1" : helpers.radio(int(usethefuzzy), 1), - "fuzzy_year2" : helpers.radio(int(usethefuzzy), 2) + "fuzzy_year2" : helpers.radio(int(usethefuzzy), 2), + "skipped2wanted" : helpers.checked(skipped2wanted) } return serve_template(templatename="artistredone.html", title=comic['ComicName'], comic=comic, issues=issues, comicConfig=comicConfig) artistPage.exposed = True @@ -95,6 +98,7 @@ class WebInterface(object): searchresults = mb.findComic(name, mode, issue=None) elif type == 'comic' and mode == 'want': searchresults = mb.findComic(name, mode, issue) + searchresults = sorted(searchresults, key=itemgetter('comicyear','issues'), reverse=True) #print ("Results: " + str(searchresults)) return serve_template(templatename="searchresults.html", title='Search Results for: "' + name + '"', searchresults=searchresults, type=type) @@ -113,7 +117,7 @@ class WebInterface(object): #here we test for exception matches (ie. comics spanning more than one volume, known mismatches, etc). CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone() if CV_EXcomicid is None: # pass # - gcdinfo=parseit.GCDScraper(comicname, comicyear, comicissues, comicid) + gcdinfo=parseit.GCDScraper(comicname, comicyear, comicissues, comicid, quickmatch="yes") if gcdinfo == "No Match": #when it no matches, the image will always be blank...let's fix it. cvdata = mylar.cv.getComic(comicid,'comic') @@ -177,7 +181,7 @@ class WebInterface(object): def from_Exceptions(self, comicid, gcdid, comicname=None, comicyear=None, comicissues=None, comicpublisher=None): mismatch = "yes" - print ("gcdid:" + str(gcdid)) + #print ("gcdid:" + str(gcdid)) #write it to the custom_exceptions.csv and reload it so that importer will pick it up and do it's thing :) #custom_exceptions in this format... #99, (comicid), (gcdid), none @@ -207,6 +211,20 @@ class WebInterface(object): gcomicid = "G" + str(comicid) comicyear_len = comicyear.find(' ', 2) comyear = comicyear[comicyear_len+1:comicyear_len+5] + if comyear.isdigit(): + logger.fdebug("Series year set to : " + str(comyear)) + else: + logger.fdebug("Invalid Series year detected - trying to adjust from " + str(comyear)) + #comicyear_len above will trap wrong year if it's 10 October 2010 - etc ( 2000 AD)... + find_comicyear = comicyear.split() + for i in find_comicyear: + if len(i) == 4: + logger.fdebug("Series year detected as : " + str(i)) + comyear = str(i) + continue + + logger.fdebug("Series year set to: " + str(comyear)) + controlValueDict = { 'ComicID': gcomicid } newValueDict = {'ComicName': comicname, 'ComicYear': comyear, @@ -256,6 +274,7 @@ class WebInterface(object): logger.info(u"Deleting all traces of Comic: " + str(ComicName)) myDB.action('DELETE from comics WHERE ComicID=?', [ComicID]) myDB.action('DELETE from issues WHERE ComicID=?', [ComicID]) + myDB.action('DELETE from upcoming WHERE ComicID=?' [ComicID]) raise cherrypy.HTTPRedirect("home") deleteArtist.exposed = True @@ -387,16 +406,26 @@ class WebInterface(object): def pullist(self): myDB = db.DBConnection() + weeklyresults = [] popit = myDB.select("SELECT * FROM sqlite_master WHERE name='weekly' and type='table'") if popit: - weeklyresults = myDB.select("SELECT * from weekly") + w_results = myDB.select("SELECT PUBLISHER, ISSUE, COMIC, STATUS from weekly") + for weekly in w_results: + if weekly['ISSUE'].isdigit(): + weeklyresults.append({ + "PUBLISHER" : weekly['PUBLISHER'], + "ISSUE" : weekly['ISSUE'], + "COMIC" : weekly['COMIC'], + "STATUS" : weekly['STATUS'] + }) + weeklyresults = sorted(weeklyresults, key=itemgetter('PUBLISHER','COMIC'), reverse=False) pulldate = myDB.action("SELECT * from weekly").fetchone() if pulldate is None: return self.manualpull() #raise cherrypy.HTTPRedirect("home") else: return self.manualpull() - return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pulldate=pulldate['SHIPDATE'],pullfilter=False) + return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pulldate=pulldate['SHIPDATE'], pullfilter=True) pullist.exposed = True def filterpull(self): @@ -444,6 +473,29 @@ class WebInterface(object): return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming, issues=issues) upcoming.exposed = True + def skipped2wanted(self, comicid): + # change all issues for a given ComicID that are Skipped, into Wanted. + issuestowanted = [] + issuesnumwant = [] + myDB = db.DBConnection() + skipped2 = myDB.select("SELECT * from issues WHERE ComicID=? AND Status='Skipped'", [comicid]) + for skippy in skipped2: + mvcontroldict = {"IssueID": skippy['IssueID']} + mvvalues = {"Status": "Wanted"} + #print ("Changing issue " + str(skippy['Issue_Number']) + " to Wanted.") + myDB.upsert("issues", mvvalues, mvcontroldict) + issuestowanted.append(skippy['IssueID']) + issuesnumwant.append(skippy['Issue_Number']) + if len(issuestowanted) > 0 : + logger.info("Marking issues: %s as Wanted" % issuesnumwant) + threading.Thread(target=search.searchIssueIDList, args=[issuestowanted]).start() + raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % [comicid]) + skipped2wanted.exposed = True + + def ManualRename(self): + print ("hello") + ManualRename.exposed = True + def searchScan(self, name): return serve_template(templatename="searchfix.html", title="Manage", name=name) searchScan.exposed = True @@ -538,6 +590,64 @@ class WebInterface(object): raise cherrypy.HTTPRedirect("history") clearhistory.exposed = True + #for testing. + def idirectory(self): + return serve_template(templatename="idirectory.html", title="Import a Directory") + idirectory.exposed = True + + def comicScan(self, path, scan=0, redirect=None, autoadd=0, libraryscan=0, imp_move=0, imp_rename=0): + mylar.LIBRARYSCAN = libraryscan + mylar.ADD_COMICS = autoadd + mylar.COMIC_DIR = path + mylar.IMP_MOVE = imp_move + mylar.IMP_RENAME = imp_rename + mylar.config_write() + if scan: + try: + soma = librarysync.libraryScan() + except Exception, e: + logger.error('Unable to complete the scan: %s' % e) + if soma == "Completed": + print ("sucessfully completed import.") + else: + logger.info(u"Starting mass importing...") + #this is what it should do... + #store soma (the list of comic_details from importing) into sql table so import can be whenever + #display webpage showing results + #allow user to select comic to add (one at a time) + #call addComic off of the webpage to initiate the add. + #return to result page to finish or continue adding. + #.... + #threading.Thread(target=self.searchit).start() + #threadthis = threadit.ThreadUrl() + #result = threadthis.main(soma) + myDB = db.DBConnection() + sl = 0 + while (sl < len(soma)): + soma_sl = soma['comic_info'][sl] + print ("cname: " + soma_sl['comicname']) + + controlValue = {"ComicName": soma_sl['comicname']} + newValue = {"ComicYear": soma_sl['comicyear'], + "Status": "Not Imported", + "ImportDate": helpers.today()} + myDB.upsert("importresults", newValue, controlValue) + sl+=1 + + self.importResults() + + if redirect: + raise cherrypy.HTTPRedirect(redirect) + else: + raise cherrypy.HTTPRedirect("home") + comicScan.exposed = True + + def importResults(self): + myDB = db.DBConnection() + results = myDB.select("SELECT * FROM importresults") + return serve_template(templatename="importresults.html", title="Import Results", results=results) + importResults.exposed = True + #--- def config(self): interface_dir = os.path.join(mylar.PROG_DIR, 'data/interfaces/') @@ -599,6 +709,7 @@ class WebInterface(object): "zero_level" : helpers.checked(mylar.ZERO_LEVEL), "zero_level_n" : mylar.ZERO_LEVEL_N, "add_to_csv" : helpers.checked(mylar.ADD_TO_CSV), + "cvinfo" : helpers.checked(mylar.CVINFO), "lowercase_filenames" : helpers.checked(mylar.LOWERCASE_FILENAMES), "enable_extra_scripts" : helpers.checked(mylar.ENABLE_EXTRA_SCRIPTS), "extra_scripts" : mylar.EXTRA_SCRIPTS, @@ -618,11 +729,17 @@ class WebInterface(object): return serve_template(templatename="config.html", title="Settings", config=config) config.exposed = True - def error_change(self, comicid, errorgcd): + def error_change(self, comicid, errorgcd, comicname): + # if comicname contains a "," it will break the exceptions import. + import urllib + b = urllib.unquote_plus(comicname) + cname = b.decode("utf-8") + cname = re.sub("\,", "", cname) + if errorgcd[:5].isdigit(): print ("GCD-ID detected : " + str(errorgcd)[:5]) - print ("I'm assuming you know what you're doing - going to force-match.") - self.from_Exceptions(comicid=comicid,gcdid=errorgcd) + print ("I'm assuming you know what you're doing - going to force-match for " + cname.encode("utf-8")) + self.from_Exceptions(comicid=comicid,gcdid=errorgcd,comicname=cname) else: print ("Assuming rewording of Comic - adjusting to : " + str(errorgcd)) Err_Info = mylar.cv.getComic(comicid,'comic') @@ -697,7 +814,7 @@ class WebInterface(object): sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=None, log_dir=None, blackhole=0, blackhole_dir=None, usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, nzbx=0, newznab=0, newznab_host=None, newznab_apikey=None, newznab_enabled=0, raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0, - preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, + preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, destination_dir=None, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, **kwargs): mylar.HTTP_HOST = http_host mylar.HTTP_PORT = http_port @@ -741,6 +858,7 @@ class WebInterface(object): mylar.ZERO_LEVEL = zero_level mylar.ZERO_LEVEL_N = zero_level_n mylar.ADD_TO_CSV = add_to_csv + mylar.CVINFO = cvinfo mylar.LOWERCASE_FILENAMES = lowercase_filenames mylar.USE_MINSIZE = use_minsize mylar.MINSIZE = minsize diff --git a/mylar/weeklypull.py b/mylar/weeklypull.py index a12b6c9c..acd985e4 100755 --- a/mylar/weeklypull.py +++ b/mylar/weeklypull.py @@ -37,7 +37,7 @@ def pullit(): pull_date = myDB.action("SELECT SHIPDATE from weekly").fetchone() logger.info(u"Weekly pull list present - checking if it's up-to-date..") pulldate = pull_date['SHIPDATE'] - except sqlite3.OperationalError, msg: + except (sqlite3.OperationalError, TypeError),msg: conn=sqlite3.connect(mylar.DB_FILE) c=conn.cursor() logger.info(u"Error Retrieving weekly pull list - attempting to adjust") @@ -397,9 +397,10 @@ def pullitcheck(comic1off_name=None,comic1off_id=None): lines[cnt] = str(lines[cnt]).upper() #llen[cnt] = str(llen[cnt]) logger.fdebug("looking for : " + str(lines[cnt])) - sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', str(lines[cnt])) + sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', ' ', str(lines[cnt])) sqlsearch = re.sub(r'\s', '%', sqlsearch) if 'THE' in sqlsearch: sqlsearch = re.sub('THE', '', sqlsearch) + if '+' in sqlsearch: sqlsearch = re.sub('\+', '%PLUS%', sqlsearch) logger.fdebug("searchsql: " + str(sqlsearch)) weekly = myDB.select('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [sqlsearch]) #cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]]) @@ -421,12 +422,12 @@ def pullitcheck(comic1off_name=None,comic1off_id=None): comicnm = week['COMIC'] #here's the tricky part, ie. BATMAN will match on #every batman comic, not exact -# logger.fdebug("comparing" + str(comicnm) + "..to.." + str(unlines[cnt]).upper()) - logger.fdebug("comparing" + str(sqlsearch) + "..to.." + str(unlines[cnt]).upper()) + logger.fdebug("comparing" + str(comicnm) + "..to.." + str(unlines[cnt]).upper()) + #logger.fdebug("comparing" + str(sqlsearch) + "..to.." + str(unlines[cnt]).upper()) #-NEW- # strip out all special characters and compare - watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', str(sqlsearch)) + watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', str(unlines[cnt])) comicnm = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', str(comicnm)) watchcomic = re.sub(r'\s', '', watchcomic) comicnm = re.sub(r'\s', '', comicnm) @@ -435,6 +436,10 @@ def pullitcheck(comic1off_name=None,comic1off_id=None): logger.fdebug("ComicNM: " + str(comicnm)) if 'THE' in str(watchcomic): modcomicnm = re.sub('THE', '', comicnm) + #thnx to A+X for this... + if '+' in str(watchcomic): + if 'plus' in str(comicnm).lower(): + modcomicnm = re.sub('plus', '+', comicnm) if str(comicnm) == str(watchcomic).upper() or str(modcomicnm) == str(watchcomic).upper(): logger.fdebug("matched on:" + str(comicnm) + "..." + str(watchcomic).upper()) #pass