2012-09-14 17:29:01 +00:00
# This file is part of Mylar.
2012-09-13 15:27:34 +00:00
#
2012-09-14 17:29:01 +00:00
# Mylar is free software: you can redistribute it and/or modify
2012-09-13 15:27:34 +00:00
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
2012-09-14 17:29:01 +00:00
# Mylar is distributed in the hope that it will be useful,
2012-09-13 15:27:34 +00:00
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
2012-09-14 17:29:01 +00:00
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
2012-09-13 15:27:34 +00:00
2013-01-15 19:02:32 +00:00
from __future__ import with_statement
2012-09-13 15:27:34 +00:00
import os
import cherrypy
2012-12-31 16:52:16 +00:00
import datetime
2013-01-06 08:51:44 +00:00
import re
2012-09-13 15:27:34 +00:00
from mako . template import Template
from mako . lookup import TemplateLookup
from mako import exceptions
import time
import threading
2013-01-11 21:20:51 +00:00
import csv
import platform
2013-02-06 19:55:23 +00:00
import urllib
import shutil
2012-09-13 15:27:34 +00:00
import mylar
2013-02-09 03:34:02 +00:00
from mylar import logger , db , importer , mb , search , filechecker , helpers , updater , parseit , weeklypull , PostProcessor , version , librarysync , moveit
2012-09-13 15:27:34 +00:00
#from mylar.helpers import checked, radio, today
import lib . simplejson as simplejson
from operator import itemgetter
def serve_template ( templatename , * * kwargs ) :
interface_dir = os . path . join ( str ( mylar . PROG_DIR ) , ' data/interfaces/ ' )
template_dir = os . path . join ( str ( interface_dir ) , mylar . INTERFACE )
2013-02-27 08:28:40 +00:00
2012-09-13 15:27:34 +00:00
_hplookup = TemplateLookup ( directories = [ template_dir ] )
try :
template = _hplookup . get_template ( templatename )
return template . render ( * * kwargs )
except :
return exceptions . html_error_template ( ) . render ( )
class WebInterface ( object ) :
def index ( self ) :
raise cherrypy . HTTPRedirect ( " home " )
index . exposed = True
def home ( self ) :
myDB = db . DBConnection ( )
comics = myDB . select ( ' SELECT * from comics order by ComicSortName COLLATE NOCASE ' )
return serve_template ( templatename = " index.html " , title = " Home " , comics = comics )
home . exposed = True
def artistPage ( self , ComicID ) :
myDB = db . DBConnection ( )
comic = myDB . action ( ' SELECT * FROM comics WHERE ComicID=? ' , [ ComicID ] ) . fetchone ( )
if comic is None :
raise cherrypy . HTTPRedirect ( " home " )
2013-03-13 01:41:45 +00:00
#let's cheat. :)
2013-03-21 17:09:10 +00:00
#comicskip = myDB.select('SELECT * from comics order by ComicSortName COLLATE NOCASE')
skipno = len ( mylar . COMICSORT [ ' SortOrder ' ] )
lastno = mylar . COMICSORT [ ' LastOrderNo ' ]
lastid = mylar . COMICSORT [ ' LastOrderID ' ]
2013-03-13 01:41:45 +00:00
series = { }
2013-03-30 16:53:07 +00:00
if skipno == 0 :
#it's a blank db, let's just null the values and go.
series [ ' Current ' ] = None
series [ ' Previous ' ] = None
series [ ' Next ' ] = None
2013-03-21 17:09:10 +00:00
i = 0
while ( i < skipno ) :
cskip = mylar . COMICSORT [ ' SortOrder ' ] [ i ]
2013-03-14 08:33:16 +00:00
if cskip [ ' ComicID ' ] == ComicID :
2013-03-21 17:09:10 +00:00
cursortnum = cskip [ ' ComicOrder ' ]
2013-03-14 08:33:16 +00:00
series [ ' Current ' ] = cskip [ ' ComicID ' ]
2013-03-21 17:09:10 +00:00
if cursortnum == 0 :
2013-03-14 08:33:16 +00:00
# if first record, set the Previous record to the LAST record.
2013-03-21 17:09:10 +00:00
previous = lastid
2013-03-14 08:33:16 +00:00
else :
2013-03-21 17:09:10 +00:00
previous = mylar . COMICSORT [ ' SortOrder ' ] [ i - 1 ] [ ' ComicID ' ]
2013-03-14 08:33:16 +00:00
2013-03-21 17:09:10 +00:00
# if last record, set the Next record to the FIRST record.
if cursortnum == lastno :
next = mylar . COMICSORT [ ' SortOrder ' ] [ 0 ] [ ' ComicID ' ]
else :
next = mylar . COMICSORT [ ' SortOrder ' ] [ i + 1 ] [ ' ComicID ' ]
series [ ' Previous ' ] = previous
series [ ' Next ' ] = next
2013-03-13 01:41:45 +00:00
break
2013-03-21 17:09:10 +00:00
i + = 1
2013-02-25 15:36:43 +00:00
issues = myDB . select ( ' SELECT * FROM issues WHERE ComicID=? order by Int_IssueNumber DESC ' , [ ComicID ] )
isCounts = { }
isCounts [ 1 ] = 0 #1 skipped
isCounts [ 2 ] = 0 #2 wanted
isCounts [ 3 ] = 0 #3 archived
isCounts [ 4 ] = 0 #4 downloaded
isCounts [ 5 ] = 0 #5 read
for curResult in issues :
baseissues = { ' skipped ' : 1 , ' wanted ' : 2 , ' archived ' : 3 , ' downloaded ' : 4 }
for seas in baseissues :
if seas in curResult [ ' Status ' ] . lower ( ) :
sconv = baseissues [ seas ]
isCounts [ sconv ] + = 1
continue
isCounts = {
" Skipped " : str ( isCounts [ 1 ] ) ,
" Wanted " : str ( isCounts [ 2 ] ) ,
" Archived " : str ( isCounts [ 3 ] ) ,
" Downloaded " : str ( isCounts [ 4 ] )
}
2013-01-15 22:41:00 +00:00
usethefuzzy = comic [ ' UseFuzzy ' ]
2013-01-23 07:34:50 +00:00
skipped2wanted = " 0 "
2013-01-15 22:41:00 +00:00
if usethefuzzy is None : usethefuzzy = " 0 "
2012-09-14 17:29:01 +00:00
comicConfig = {
2013-01-13 15:59:46 +00:00
" comiclocation " : mylar . COMIC_LOCATION ,
2013-01-15 22:41:00 +00:00
" fuzzy_year0 " : helpers . radio ( int ( usethefuzzy ) , 0 ) ,
" fuzzy_year1 " : helpers . radio ( int ( usethefuzzy ) , 1 ) ,
2013-01-23 07:34:50 +00:00
" fuzzy_year2 " : helpers . radio ( int ( usethefuzzy ) , 2 ) ,
" skipped2wanted " : helpers . checked ( skipped2wanted )
2012-09-14 17:29:01 +00:00
}
2013-03-21 17:09:10 +00:00
if mylar . ANNUALS_ON :
annuals = myDB . select ( " SELECT * FROM annuals WHERE ComicID=? " , [ ComicID ] )
else : annuals = None
return serve_template ( templatename = " artistredone.html " , title = comic [ ' ComicName ' ] , comic = comic , issues = issues , comicConfig = comicConfig , isCounts = isCounts , series = series , annuals = annuals )
2012-09-13 15:27:34 +00:00
artistPage . exposed = True
2013-02-25 15:36:43 +00:00
2013-02-27 08:28:40 +00:00
def searchit ( self , name , issue = None , mode = None , type = None ) :
if type is None : type = ' comic ' # let's default this to comic search only for the time being (will add story arc, characters, etc later)
else : print ( str ( type ) + " mode enabled. " )
2012-09-13 15:27:34 +00:00
#mode dictates type of search:
# --series ... search for comicname displaying all results
# --pullseries ... search for comicname displaying a limited # of results based on issue
# --want ... individual comics
if mode is None : mode = ' series '
if len ( name ) == 0 :
raise cherrypy . HTTPRedirect ( " home " )
if type == ' comic ' and mode == ' pullseries ' :
searchresults = mb . findComic ( name , mode , issue = issue )
elif type == ' comic ' and mode == ' series ' :
searchresults = mb . findComic ( name , mode , issue = None )
elif type == ' comic ' and mode == ' want ' :
searchresults = mb . findComic ( name , mode , issue )
2013-02-27 08:28:40 +00:00
elif type == ' storyarc ' :
searchresults = mb . findComic ( name , mode , issue = None , storyarc = ' yes ' )
2013-01-28 20:31:43 +00:00
2013-02-25 15:36:43 +00:00
searchresults = sorted ( searchresults , key = itemgetter ( ' comicyear ' , ' issues ' ) , reverse = True )
2012-09-13 15:27:34 +00:00
#print ("Results: " + str(searchresults))
2013-02-09 03:34:02 +00:00
return serve_template ( templatename = " searchresults.html " , title = ' Search Results for: " ' + name + ' " ' , searchresults = searchresults , type = type , imported = None , ogcname = None )
2012-09-13 15:27:34 +00:00
searchit . exposed = True
2013-02-09 03:34:02 +00:00
def addComic ( self , comicid , comicname = None , comicyear = None , comicimage = None , comicissues = None , comicpublisher = None , imported = None , ogcname = None ) :
2012-10-16 08:16:29 +00:00
myDB = db . DBConnection ( )
2013-02-13 01:27:24 +00:00
if imported == " confirm " :
# if it's coming from the importer and it's just for confirmation, record the right selection and break.
# if it's 'confirmed' coming in as the value for imported
# the ogcname will be the original comicid that is either correct/incorrect (doesn't matter which)
#confirmedid is the selected series (comicid) with the letter C at the beginning to denote Confirmed.
# then sql the original comicid which will hit on all the results for the given series.
# iterate through, and overwrite the existing watchmatch with the new chosen 'C' + comicid value
confirmedid = " C " + str ( comicid )
confirms = myDB . action ( " SELECT * FROM importresults WHERE WatchMatch=? " , [ ogcname ] )
if confirms is None :
2013-02-25 15:36:43 +00:00
logger . Error ( " There are no results that match...this is an ERROR. " )
2013-02-13 01:27:24 +00:00
else :
for confirm in confirms :
controlValue = { " impID " : confirm [ ' impID ' ] }
newValue = { " WatchMatch " : str ( confirmedid ) }
myDB . upsert ( " importresults " , newValue , controlValue )
2013-03-29 04:02:35 +00:00
self . importResults ( )
2013-02-13 01:27:24 +00:00
return
2012-10-16 08:16:29 +00:00
sresults = [ ]
2013-01-11 21:20:51 +00:00
cresults = [ ]
2012-10-16 08:16:29 +00:00
mismatch = " no "
2013-02-25 15:36:43 +00:00
#print ("comicid: " + str(comicid))
#print ("comicname: " + str(comicname))
#print ("comicyear: " + str(comicyear))
#print ("comicissues: " + str(comicissues))
#print ("comicimage: " + str(comicimage))
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
if not mylar . CV_ONLY :
2012-10-16 08:16:29 +00:00
#here we test for exception matches (ie. comics spanning more than one volume, known mismatches, etc).
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
CV_EXcomicid = myDB . action ( " SELECT * from exceptions WHERE ComicID=? " , [ comicid ] ) . fetchone ( )
if CV_EXcomicid is None : # pass #
gcdinfo = parseit . GCDScraper ( comicname , comicyear , comicissues , comicid , quickmatch = " yes " )
if gcdinfo == " No Match " :
2013-01-13 15:59:46 +00:00
#when it no matches, the image will always be blank...let's fix it.
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
cvdata = mylar . cv . getComic ( comicid , ' comic ' )
comicimage = cvdata [ ' ComicImage ' ]
updater . no_searchresults ( comicid )
nomatch = " true "
u_comicname = comicname . encode ( ' utf-8 ' ) . strip ( )
logger . info ( " I couldn ' t find an exact match for " + u_comicname + " ( " + str ( comicyear ) + " ) - gathering data for Error-Checking screen (this could take a minute)... " )
i = 0
loopie , cnt = parseit . ComChk ( comicname , comicyear , comicpublisher , comicissues , comicid )
logger . info ( " total count : " + str ( cnt ) )
while ( i < cnt ) :
try :
stoopie = loopie [ ' comchkchoice ' ] [ i ]
except ( IndexError , TypeError ) :
break
cresults . append ( {
' ComicID ' : stoopie [ ' ComicID ' ] ,
' ComicName ' : stoopie [ ' ComicName ' ] . decode ( ' utf-8 ' , ' replace ' ) ,
' ComicYear ' : stoopie [ ' ComicYear ' ] ,
' ComicIssues ' : stoopie [ ' ComicIssues ' ] ,
' ComicURL ' : stoopie [ ' ComicURL ' ] ,
' ComicPublisher ' : stoopie [ ' ComicPublisher ' ] . decode ( ' utf-8 ' , ' replace ' ) ,
' GCDID ' : stoopie [ ' GCDID ' ]
} )
i + = 1
if imported != ' None ' :
2013-03-29 04:02:35 +00:00
#if it's from an import and it has to go through the UEC, return the values
#to the calling function and have that return the template
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
return cresults
else :
return serve_template ( templatename = " searchfix.html " , title = " Error Check " , comicname = comicname , comicid = comicid , comicyear = comicyear , comicimage = comicimage , comicissues = comicissues , cresults = cresults , imported = None , ogcname = None )
2013-03-29 04:02:35 +00:00
else :
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
nomatch = " false "
logger . info ( u " Quick match success..continuing. " )
2013-01-11 21:20:51 +00:00
else :
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
if CV_EXcomicid [ ' variloop ' ] == ' 99 ' :
logger . info ( u " mismatched name...autocorrecting to correct GID and auto-adding. " )
mismatch = " yes "
if CV_EXcomicid [ ' NewComicID ' ] == ' none ' :
logger . info ( u " multi-volume series detected " )
testspx = CV_EXcomicid [ ' GComicID ' ] . split ( ' / ' )
for exc in testspx :
fakeit = parseit . GCDAdd ( testspx )
howmany = int ( CV_EXcomicid [ ' variloop ' ] )
t = 0
while ( t < = howmany ) :
try :
sres = fakeit [ ' serieschoice ' ] [ t ]
except IndexError :
break
sresults . append ( {
' ComicID ' : sres [ ' ComicID ' ] ,
' ComicName ' : sres [ ' ComicName ' ] ,
' ComicYear ' : sres [ ' ComicYear ' ] ,
' ComicIssues ' : sres [ ' ComicIssues ' ] ,
' ComicPublisher ' : sres [ ' ComicPublisher ' ] ,
' ComicCover ' : sres [ ' ComicCover ' ]
} )
t + = 1
#searchfix(-1).html is for misnamed comics and wrong years.
#searchfix-2.html is for comics that span multiple volumes.
return serve_template ( templatename = " searchfix-2.html " , title = " In-Depth Results " , sresults = sresults )
2013-02-09 03:34:02 +00:00
#print ("imported is: " + str(imported))
threading . Thread ( target = importer . addComictoDB , args = [ comicid , mismatch , None , imported , ogcname ] ) . start ( )
2012-09-13 15:27:34 +00:00
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % comicid )
addComic . exposed = True
2012-10-16 08:16:29 +00:00
2013-03-29 04:02:35 +00:00
def from_Exceptions ( self , comicid , gcdid , comicname = None , comicyear = None , comicissues = None , comicpublisher = None , imported = None , ogcname = None ) :
2013-03-08 01:36:36 +00:00
import unicodedata
2013-01-11 21:20:51 +00:00
mismatch = " yes "
#write it to the custom_exceptions.csv and reload it so that importer will pick it up and do it's thing :)
#custom_exceptions in this format...
#99, (comicid), (gcdid), none
logger . info ( " saving new information into custom_exceptions.csv... " )
2013-03-12 16:06:44 +00:00
except_info = " none # " + str ( comicname ) + " -( " + str ( comicyear ) + " ) \n "
2013-01-15 19:02:32 +00:00
except_file = os . path . join ( mylar . DATA_DIR , " custom_exceptions.csv " )
if not os . path . exists ( except_file ) :
try :
csvfile = open ( str ( except_file ) , ' rb ' )
csvfile . close ( )
except ( OSError , IOError ) :
logger . error ( " Could not locate " + str ( except_file ) + " file. Make sure it ' s in datadir: " + mylar . DATA_DIR + " with proper permissions. " )
return
2013-03-08 01:36:36 +00:00
exceptln = " 99, " + str ( comicid ) + " , " + str ( gcdid ) + " , " + str ( except_info )
exceptline = exceptln . decode ( ' utf-8 ' , ' ignore ' )
2013-01-15 19:02:32 +00:00
with open ( str ( except_file ) , ' a ' ) as f :
2013-03-08 01:36:36 +00:00
#f.write('%s,%s,%s,%s\n' % ("99", comicid, gcdid, except_info)
2013-03-16 17:59:56 +00:00
f . write ( ' %s \n ' % ( exceptline . encode ( ' ascii ' , ' replace ' ) . strip ( ) ) )
2013-01-11 21:20:51 +00:00
logger . info ( " re-loading csv file so it ' s all nice and current. " )
mylar . csv_load ( )
2013-03-29 04:02:35 +00:00
if imported :
threading . Thread ( target = importer . addComictoDB , args = [ comicid , mismatch , None , imported , ogcname ] ) . start ( )
else :
threading . Thread ( target = importer . addComictoDB , args = [ comicid , mismatch ] ) . start ( )
2013-01-11 21:20:51 +00:00
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % comicid )
from_Exceptions . exposed = True
2012-10-16 08:16:29 +00:00
def GCDaddComic ( self , comicid , comicname = None , comicyear = None , comicissues = None , comiccover = None , comicpublisher = None ) :
#since we already know most of the info, let's add it to the db so we can reference it later.
myDB = db . DBConnection ( )
gcomicid = " G " + str ( comicid )
comicyear_len = comicyear . find ( ' ' , 2 )
comyear = comicyear [ comicyear_len + 1 : comicyear_len + 5 ]
2013-01-24 17:01:27 +00:00
if comyear . isdigit ( ) :
logger . fdebug ( " Series year set to : " + str ( comyear ) )
else :
logger . fdebug ( " Invalid Series year detected - trying to adjust from " + str ( comyear ) )
#comicyear_len above will trap wrong year if it's 10 October 2010 - etc ( 2000 AD)...
find_comicyear = comicyear . split ( )
for i in find_comicyear :
if len ( i ) == 4 :
logger . fdebug ( " Series year detected as : " + str ( i ) )
comyear = str ( i )
continue
logger . fdebug ( " Series year set to: " + str ( comyear ) )
2012-10-16 08:16:29 +00:00
controlValueDict = { ' ComicID ' : gcomicid }
newValueDict = { ' ComicName ' : comicname ,
' ComicYear ' : comyear ,
' ComicPublished ' : comicyear ,
' ComicPublisher ' : comicpublisher ,
' ComicImage ' : comiccover ,
' Total ' : comicissues }
myDB . upsert ( " comics " , newValueDict , controlValueDict )
threading . Thread ( target = importer . GCDimport , args = [ gcomicid ] ) . start ( )
2012-10-16 15:12:44 +00:00
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % gcomicid )
2012-10-16 08:16:29 +00:00
GCDaddComic . exposed = True
2012-10-30 10:43:01 +00:00
def post_process ( self , nzb_name , nzb_folder ) :
logger . info ( u " Starting postprocessing for : " + str ( nzb_name ) )
2012-12-27 15:04:03 +00:00
PostProcess = PostProcessor . PostProcessor ( nzb_name , nzb_folder )
result = PostProcess . Process ( )
2012-10-30 10:43:01 +00:00
#result = post_results.replace("\n","<br />\n")
return result
#log2screen = threading.Thread(target=PostProcessor.PostProcess, args=[nzb_name,nzb_folder]).start()
#return serve_template(templatename="postprocess.html", title="postprocess")
#raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % comicid)
post_process . exposed = True
2012-09-13 15:27:34 +00:00
def pauseArtist ( self , ComicID ) :
logger . info ( u " Pausing comic: " + ComicID )
myDB = db . DBConnection ( )
controlValueDict = { ' ComicID ' : ComicID }
newValueDict = { ' Status ' : ' Paused ' }
myDB . upsert ( " comics " , newValueDict , controlValueDict )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
pauseArtist . exposed = True
def resumeArtist ( self , ComicID ) :
logger . info ( u " Resuming comic: " + ComicID )
myDB = db . DBConnection ( )
controlValueDict = { ' ComicID ' : ComicID }
newValueDict = { ' Status ' : ' Active ' }
myDB . upsert ( " comics " , newValueDict , controlValueDict )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
resumeArtist . exposed = True
def deleteArtist ( self , ComicID ) :
myDB = db . DBConnection ( )
comic = myDB . action ( ' SELECT * from comics WHERE ComicID=? ' , [ ComicID ] ) . fetchone ( )
2012-09-24 05:17:29 +00:00
if comic [ ' ComicName ' ] is None : ComicName = " None "
else : ComicName = comic [ ' ComicName ' ]
2013-03-08 01:38:05 +00:00
logger . info ( u " Deleting all traces of Comic: " + ComicName )
2012-09-13 15:27:34 +00:00
myDB . action ( ' DELETE from comics WHERE ComicID=? ' , [ ComicID ] )
myDB . action ( ' DELETE from issues WHERE ComicID=? ' , [ ComicID ] )
2013-02-06 19:55:23 +00:00
myDB . action ( ' DELETE from upcoming WHERE ComicID=? ' , [ ComicID ] )
2012-09-13 15:27:34 +00:00
raise cherrypy . HTTPRedirect ( " home " )
deleteArtist . exposed = True
def refreshArtist ( self , ComicID ) :
2012-10-16 08:16:29 +00:00
myDB = db . DBConnection ( )
mismatch = " no "
2013-04-07 18:06:36 +00:00
if not mylar . CV_ONLY or ComicID [ : 1 ] == " G " :
CV_EXcomicid = myDB . action ( " SELECT * from exceptions WHERE ComicID=? " , [ ComicID ] ) . fetchone ( )
if CV_EXcomicid is None : pass
else :
if CV_EXcomicid [ ' variloop ' ] == ' 99 ' :
mismatch = " yes "
if ComicID [ : 1 ] == " G " : threading . Thread ( target = importer . GCDimport , args = [ ComicID ] ) . start ( )
else : threading . Thread ( target = importer . addComictoDB , args = [ ComicID , mismatch ] ) . start ( )
2012-10-16 08:16:29 +00:00
else :
2013-04-07 18:06:36 +00:00
if mylar . CV_ONETIMER == 1 :
2013-04-07 19:18:26 +00:00
logger . fdebug ( " CV_OneTimer option enabled... " )
2013-04-07 18:06:36 +00:00
#in order to update to JUST CV_ONLY, we need to delete the issues for a given series so it's a clea$
2013-04-07 19:18:26 +00:00
logger . fdebug ( " Gathering the status of all issues for the series. " )
2013-04-07 18:06:36 +00:00
issues = myDB . select ( ' SELECT * FROM issues WHERE ComicID=? ' , [ ComicID ] )
#store the issues' status for a given comicid, after deleting and readding, flip the status back to$
2013-04-07 19:18:26 +00:00
logger . fdebug ( " Deleting all issue data. " )
2013-04-07 18:06:36 +00:00
myDB . select ( ' DELETE FROM issues WHERE ComicID=? ' , [ ComicID ] )
2013-04-07 19:18:26 +00:00
logger . fdebug ( " Refreshing the series and pulling in new data using only CV. " )
2013-04-07 18:06:36 +00:00
mylar . importer . addComictoDB ( ComicID , mismatch )
issues_new = myDB . select ( ' SELECT * FROM issues WHERE ComicID=? ' , [ ComicID ] )
2013-04-07 19:18:26 +00:00
logger . fdebug ( " Attempting to put the Status ' back how they were. " )
2013-04-07 18:06:36 +00:00
icount = 0
for issue in issues :
for issuenew in issues_new :
if issuenew [ ' IssueID ' ] == issue [ ' IssueID ' ] and issuenew [ ' Status ' ] != issue [ ' Status ' ] :
#change the status to the previous status
ctrlVAL = { ' IssueID ' : issue [ ' IssueID ' ] }
newVAL = { ' Status ' : issue [ ' Status ' ] }
myDB . upsert ( " Issues " , newVAL , ctrlVAL )
icount + = 1
break
2013-04-07 19:18:26 +00:00
logger . info ( " In the process of converting the data to CV, I changed the status of " + str ( icount ) + " issues. " )
2013-04-07 18:06:36 +00:00
else :
mylar . importer . addComictoDB ( ComicID , mismatch )
2012-09-13 15:27:34 +00:00
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
refreshArtist . exposed = True
def editIssue ( self , ComicID ) :
myDB = db . DBConnection ( )
comic = myDB . action ( ' SELECT * from comics WHERE ComicID=? ' , [ ComicID ] ) . fetchone ( )
title = ' Now Editing ' + comic [ ' ComicName ' ]
return serve_template ( templatename = " editcomic.html " , title = title , comic = comic )
#raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" & ComicID)
editIssue . exposed = True
2012-10-16 08:16:29 +00:00
def markissues ( self , action = None , * * args ) :
2012-09-13 15:27:34 +00:00
myDB = db . DBConnection ( )
2012-10-16 08:16:29 +00:00
issuesToAdd = [ ]
2012-10-30 10:43:01 +00:00
issuestoArchive = [ ]
2012-09-13 15:27:34 +00:00
if action == ' WantedNew ' :
newaction = ' Wanted '
else :
newaction = action
for IssueID in args :
2013-02-09 03:34:02 +00:00
#print ("issueID: " + str(IssueID) + "... " + str(newaction))
if IssueID is None or ' issue_table ' in IssueID :
continue
2012-09-13 15:27:34 +00:00
else :
2012-10-16 08:16:29 +00:00
mi = myDB . action ( " SELECT * FROM issues WHERE IssueID=? " , [ IssueID ] ) . fetchone ( )
miyr = myDB . action ( " SELECT ComicYear FROM comics WHERE ComicID=? " , [ mi [ ' ComicID ' ] ] ) . fetchone ( )
2012-10-30 10:43:01 +00:00
if action == ' Downloaded ' :
if mi [ ' Status ' ] == " Skipped " or mi [ ' Status ' ] == " Wanted " :
logger . info ( u " Cannot change status to %s as comic is not Snatched or Downloaded " % ( newaction ) )
2013-02-09 03:34:02 +00:00
# continue
2012-10-30 10:43:01 +00:00
elif action == ' Archived ' :
logger . info ( u " Marking %s %s as %s " % ( mi [ ' ComicName ' ] , mi [ ' Issue_Number ' ] , newaction ) )
#updater.forceRescan(mi['ComicID'])
issuestoArchive . append ( IssueID )
elif action == ' Wanted ' :
logger . info ( u " Marking %s %s as %s " % ( mi [ ' ComicName ' ] , mi [ ' Issue_Number ' ] , newaction ) )
issuesToAdd . append ( IssueID )
2013-02-09 03:34:02 +00:00
elif action == ' Skipped ' :
logger . info ( u " Marking " + str ( IssueID ) + " as Skipped " )
2012-10-16 08:16:29 +00:00
controlValueDict = { " IssueID " : IssueID }
newValueDict = { " Status " : newaction }
myDB . upsert ( " issues " , newValueDict , controlValueDict )
2012-10-30 10:43:01 +00:00
if len ( issuestoArchive ) > 0 :
updater . forceRescan ( mi [ ' ComicID ' ] )
2012-10-16 08:16:29 +00:00
if len ( issuesToAdd ) > 0 :
2013-02-09 03:34:02 +00:00
logger . debug ( " Marking issues: %s as Wanted " % ( issuesToAdd ) )
2012-10-16 08:16:29 +00:00
threading . Thread ( target = search . searchIssueIDList , args = [ issuesToAdd ] ) . start ( )
2012-10-16 15:53:46 +00:00
#if IssueID:
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % mi [ ' ComicID ' ] )
#else:
# raise cherrypy.HTTPRedirect("upcoming")
2012-09-13 15:27:34 +00:00
markissues . exposed = True
def addArtists ( self , * * args ) :
threading . Thread ( target = importer . artistlist_to_mbids , args = [ args , True ] ) . start ( )
raise cherrypy . HTTPRedirect ( " home " )
addArtists . exposed = True
2013-04-22 03:11:12 +00:00
def queueissue ( self , mode , ComicName = None , ComicID = None , ComicYear = None , ComicIssue = None , IssueID = None , new = False , redirect = None , SeriesYear = None ) :
print ' tada '
2012-12-31 16:52:16 +00:00
now = datetime . datetime . now ( )
2012-09-19 04:38:25 +00:00
myDB = db . DBConnection ( )
2012-09-13 15:27:34 +00:00
#mode dictates type of queue - either 'want' for individual comics, or 'series' for series watchlist.
if ComicID is None and mode == ' series ' :
issue = None
raise cherrypy . HTTPRedirect ( " searchit?name= %s &issue= %s &mode= %s " % ( ComicName , ' None ' , ' series ' ) )
elif ComicID is None and mode == ' pullseries ' :
# we can limit the search by including the issue # and searching for
# comics that have X many issues
raise cherrypy . HTTPRedirect ( " searchit?name= %s &issue= %s &mode= %s " % ( ComicName , ' None ' , ' pullseries ' ) )
2013-04-22 03:11:12 +00:00
elif ComicID is None and mode == ' readlist ' :
# this is for marking individual comics from a readlist to be downloaded.
# Because there is no associated ComicID or IssueID, follow same pattern as in 'pullwant'
# except we know the Year
if ComicYear is None : ComicYear = SeriesYear
logger . info ( u " Marking " + ComicName + " " + ComicIssue + " as wanted... " )
foundcom = search . search_init ( ComicName = ComicName , IssueNumber = ComicIssue , ComicYear = ComicYear , SeriesYear = None , IssueDate = None , IssueID = None , AlternateSearch = None , UseFuzzy = None , ComicVersion = None )
if foundcom == " yes " :
logger . info ( u " Downloaded " + ComicName + " # " + ComicIssue + " ( " + str ( ComicYear ) + " ) " )
raise cherrypy . HTTPRedirect ( " readlist " )
2012-09-13 15:27:34 +00:00
elif ComicID is None and mode == ' pullwant ' :
#this is for marking individual comics from the pullist to be downloaded.
#because ComicID and IssueID will both be None due to pullist, it's probably
#better to set both to some generic #, and then filter out later...
cyear = myDB . action ( " SELECT SHIPDATE FROM weekly " ) . fetchone ( )
ComicYear = str ( cyear [ ' SHIPDATE ' ] ) [ : 4 ]
2012-12-31 16:52:16 +00:00
if ComicYear == ' ' : ComicYear = now . year
2012-09-13 15:27:34 +00:00
logger . info ( u " Marking " + ComicName + " " + ComicIssue + " as wanted... " )
2013-04-12 02:14:27 +00:00
foundcom = search . search_init ( ComicName = ComicName , IssueNumber = ComicIssue , ComicYear = ComicYear , SeriesYear = None , IssueDate = cyear [ ' SHIPDATE ' ] , IssueID = None , AlternateSearch = None , UseFuzzy = None , ComicVersion = None )
2012-09-13 15:27:34 +00:00
if foundcom == " yes " :
logger . info ( u " Downloaded " + ComicName + " " + ComicIssue )
2013-04-12 02:14:27 +00:00
raise cherrypy . HTTPRedirect ( " pullist " )
#return
2012-09-13 15:27:34 +00:00
elif mode == ' want ' :
2012-10-01 15:01:21 +00:00
cdname = myDB . action ( " SELECT ComicName from comics where ComicID=? " , [ ComicID ] ) . fetchone ( )
ComicName = cdname [ ' ComicName ' ]
2012-09-13 15:27:34 +00:00
logger . info ( u " Marking " + ComicName + " issue: " + ComicIssue + " as wanted... " )
#---
#this should be on it's own somewhere
if IssueID is not None :
controlValueDict = { " IssueID " : IssueID }
newStatus = { " Status " : " Wanted " }
myDB . upsert ( " issues " , newStatus , controlValueDict )
#for future reference, the year should default to current year (.datetime)
2012-10-09 06:33:14 +00:00
issues = myDB . action ( " SELECT IssueDate FROM issues WHERE IssueID=? " , [ IssueID ] ) . fetchone ( )
2012-09-13 15:27:34 +00:00
if ComicYear == None :
ComicYear = str ( issues [ ' IssueDate ' ] ) [ : 4 ]
2012-12-31 16:52:16 +00:00
miy = myDB . action ( " SELECT * FROM comics WHERE ComicID=? " , [ ComicID ] ) . fetchone ( )
SeriesYear = miy [ ' ComicYear ' ]
AlternateSearch = miy [ ' AlternateSearch ' ]
2013-02-06 19:55:23 +00:00
UseAFuzzy = miy [ ' UseFuzzy ' ]
2013-03-08 03:07:14 +00:00
ComicVersion = miy [ ' ComicVersion ' ]
foundcom = search . search_init ( ComicName , ComicIssue , ComicYear , SeriesYear , issues [ ' IssueDate ' ] , IssueID , AlternateSearch , UseAFuzzy , ComicVersion )
2012-09-13 15:27:34 +00:00
if foundcom == " yes " :
# file check to see if issue exists and update 'have' count
if IssueID is not None :
return updater . foundsearch ( ComicID , IssueID )
if ComicID :
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
else :
raise cherrypy . HTTPRedirect ( redirect )
queueissue . exposed = True
def unqueueissue ( self , IssueID , ComicID ) :
myDB = db . DBConnection ( )
issue = myDB . action ( ' SELECT * FROM issues WHERE IssueID=? ' , [ IssueID ] ) . fetchone ( )
logger . info ( u " Marking " + issue [ ' ComicName ' ] + " issue # " + issue [ ' Issue_Number ' ] + " as skipped... " )
controlValueDict = { ' IssueID ' : IssueID }
newValueDict = { ' Status ' : ' Skipped ' }
myDB . upsert ( " issues " , newValueDict , controlValueDict )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
unqueueissue . exposed = True
2013-02-17 10:31:18 +00:00
def archiveissue ( self , IssueID ) :
myDB = db . DBConnection ( )
issue = myDB . action ( ' SELECT * FROM issues WHERE IssueID=? ' , [ IssueID ] ) . fetchone ( )
logger . info ( u " Marking " + issue [ ' ComicName ' ] + " issue # " + issue [ ' Issue_Number ' ] + " as archived... " )
controlValueDict = { ' IssueID ' : IssueID }
newValueDict = { ' Status ' : ' Archived ' }
myDB . upsert ( " issues " , newValueDict , controlValueDict )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % issue [ ' ComicID ' ] )
archiveissue . exposed = True
2012-09-13 15:27:34 +00:00
def pullist ( self ) :
myDB = db . DBConnection ( )
2013-01-28 20:31:43 +00:00
weeklyresults = [ ]
2012-09-13 15:27:34 +00:00
popit = myDB . select ( " SELECT * FROM sqlite_master WHERE name= ' weekly ' and type= ' table ' " )
if popit :
2013-01-28 20:31:43 +00:00
w_results = myDB . select ( " SELECT PUBLISHER, ISSUE, COMIC, STATUS from weekly " )
for weekly in w_results :
2013-04-02 08:56:24 +00:00
if weekly [ ' ISSUE ' ] . isdigit ( ) or ' au ' in weekly [ ' ISSUE ' ] . lower ( ) :
2013-01-28 20:31:43 +00:00
weeklyresults . append ( {
" PUBLISHER " : weekly [ ' PUBLISHER ' ] ,
" ISSUE " : weekly [ ' ISSUE ' ] ,
" COMIC " : weekly [ ' COMIC ' ] ,
" STATUS " : weekly [ ' STATUS ' ]
} )
weeklyresults = sorted ( weeklyresults , key = itemgetter ( ' PUBLISHER ' , ' COMIC ' ) , reverse = False )
2012-09-13 15:27:34 +00:00
pulldate = myDB . action ( " SELECT * from weekly " ) . fetchone ( )
2012-12-16 18:41:01 +00:00
if pulldate is None :
return self . manualpull ( )
#raise cherrypy.HTTPRedirect("home")
2012-09-13 15:27:34 +00:00
else :
return self . manualpull ( )
2013-01-28 20:31:43 +00:00
return serve_template ( templatename = " weeklypull.html " , title = " Weekly Pull " , weeklyresults = weeklyresults , pulldate = pulldate [ ' SHIPDATE ' ] , pullfilter = True )
2012-09-13 15:27:34 +00:00
pullist . exposed = True
def filterpull ( self ) :
myDB = db . DBConnection ( )
weeklyresults = myDB . select ( " SELECT * from weekly " )
pulldate = myDB . action ( " SELECT * from weekly " ) . fetchone ( )
if pulldate is None :
raise cherrypy . HTTPRedirect ( " home " )
2013-01-28 20:31:43 +00:00
return serve_template ( templatename = " weeklypull.html " , title = " Weekly Pull " , weeklyresults = weeklyresults , pulldate = pulldate [ ' SHIPDATE ' ] , pullfilter = True )
2012-09-13 15:27:34 +00:00
filterpull . exposed = True
def manualpull ( self ) :
from mylar import weeklypull
threading . Thread ( target = weeklypull . pullit ( ) ) . start ( )
raise cherrypy . HTTPRedirect ( " pullist " )
manualpull . exposed = True
2013-04-08 16:31:41 +00:00
def pullrecreate ( self ) :
from mylar import weeklypull
myDB = db . DBConnection ( )
myDB . action ( " DROP TABLE weekly " )
mylar . dbcheck ( )
logger . info ( " Deleted existed pull-list data. Recreating Pull-list... " )
threading . Thread ( target = weeklypull . pullit ( forcecheck = ' yes ' ) ) . start ( )
raise cherrypy . HTTPRedirect ( " pullist " )
pullrecreate . exposed = True
2012-09-13 15:27:34 +00:00
def upcoming ( self ) :
myDB = db . DBConnection ( )
#upcoming = myDB.select("SELECT * from issues WHERE ReleaseDate > date('now') order by ReleaseDate DESC")
2013-01-15 22:41:00 +00:00
upcoming = myDB . select ( " SELECT * from upcoming WHERE IssueDate > date( ' now ' ) AND IssueID is NULL order by IssueDate DESC " )
2012-09-13 15:27:34 +00:00
issues = myDB . select ( " SELECT * from issues WHERE Status= ' Wanted ' " )
#let's move any items from the upcoming table into the wanted table if the date has already passed.
2012-10-16 08:16:29 +00:00
#gather the list...
mvupcome = myDB . select ( " SELECT * from upcoming WHERE IssueDate < date( ' now ' ) order by IssueDate DESC " )
#get the issue ID's
for mvup in mvupcome :
2012-10-16 15:12:44 +00:00
myissue = myDB . action ( " SELECT * FROM issues WHERE Issue_Number=? " , [ mvup [ ' IssueNumber ' ] ] ) . fetchone ( )
2012-10-16 08:16:29 +00:00
if myissue is None : pass
else :
2012-10-16 15:12:44 +00:00
#print ("ComicName: " + str(myissue['ComicName']))
#print ("Issue number : " + str(myissue['Issue_Number']) )
2012-10-16 08:16:29 +00:00
mvcontroldict = { " IssueID " : myissue [ ' IssueID ' ] }
2012-10-16 15:12:44 +00:00
mvvalues = { " ComicID " : myissue [ ' ComicID ' ] ,
2012-10-16 08:16:29 +00:00
" Status " : " Wanted " }
2012-10-18 07:08:43 +00:00
myDB . upsert ( " issues " , mvvalues , mvcontroldict )
2012-10-16 08:16:29 +00:00
2012-10-21 15:30:26 +00:00
#remove old entry from upcoming so it won't try to continually download again.
deleteit = myDB . action ( " DELETE from upcoming WHERE ComicName=? AND IssueNumber=? " , [ mvup [ ' ComicName ' ] , mvup [ ' IssueNumber ' ] ] )
2012-09-13 15:27:34 +00:00
return serve_template ( templatename = " upcoming.html " , title = " Upcoming " , upcoming = upcoming , issues = issues )
upcoming . exposed = True
2012-09-24 05:17:29 +00:00
2013-01-23 07:34:50 +00:00
def skipped2wanted ( self , comicid ) :
# change all issues for a given ComicID that are Skipped, into Wanted.
issuestowanted = [ ]
issuesnumwant = [ ]
myDB = db . DBConnection ( )
skipped2 = myDB . select ( " SELECT * from issues WHERE ComicID=? AND Status= ' Skipped ' " , [ comicid ] )
for skippy in skipped2 :
mvcontroldict = { " IssueID " : skippy [ ' IssueID ' ] }
mvvalues = { " Status " : " Wanted " }
#print ("Changing issue " + str(skippy['Issue_Number']) + " to Wanted.")
myDB . upsert ( " issues " , mvvalues , mvcontroldict )
issuestowanted . append ( skippy [ ' IssueID ' ] )
issuesnumwant . append ( skippy [ ' Issue_Number ' ] )
if len ( issuestowanted ) > 0 :
logger . info ( " Marking issues: %s as Wanted " % issuesnumwant )
threading . Thread ( target = search . searchIssueIDList , args = [ issuestowanted ] ) . start ( )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % [ comicid ] )
skipped2wanted . exposed = True
2013-02-13 01:27:24 +00:00
def manualRename ( self , comicid ) :
if mylar . FILE_FORMAT == ' ' :
2013-02-27 08:28:40 +00:00
logger . error ( " You haven ' t specified a File Format in Configuration/Advanced " )
logger . error ( " Cannot rename files. " )
2013-02-13 01:27:24 +00:00
return
myDB = db . DBConnection ( )
comic = myDB . action ( " SELECT * FROM comics WHERE ComicID=? " , [ comicid ] ) . fetchone ( )
comicdir = comic [ ' ComicLocation ' ]
comicname = comic [ ' ComicName ' ]
extensions = ( ' .cbr ' , ' .cbz ' )
2013-02-27 08:28:40 +00:00
issues = myDB . action ( " SELECT * FROM issues WHERE ComicID=? " , [ comicid ] ) . fetchall ( )
2013-02-13 01:27:24 +00:00
comfiles = [ ]
2013-02-27 08:28:40 +00:00
filefind = 0
2013-02-13 01:27:24 +00:00
for root , dirnames , filenames in os . walk ( comicdir ) :
for filename in filenames :
if filename . lower ( ) . endswith ( extensions ) :
2013-02-27 08:28:40 +00:00
#logger.info("filename being checked is : " + str(filename))
2013-02-13 01:27:24 +00:00
for issue in issues :
if issue [ ' Location ' ] == filename :
2013-02-27 08:28:40 +00:00
#logger.error("matched " + str(filename) + " to DB file " + str(issue['Location']))
2013-02-13 01:27:24 +00:00
renameiss = helpers . rename_param ( comicid , comicname , issue [ ' Issue_Number ' ] , filename , comicyear = None , issueid = None )
nfilename = renameiss [ ' nfilename ' ]
srciss = os . path . join ( comicdir , filename )
dstiss = os . path . join ( comicdir , nfilename )
2013-02-27 08:28:40 +00:00
if filename != nfilename :
logger . info ( " Renaming " + str ( filename ) + " ... to ... " + str ( nfilename ) )
try :
shutil . move ( srciss , dstiss )
except ( OSError , IOError ) :
logger . error ( " Failed to move files - check directories and manually re-run. " )
return
filefind + = 1
else :
logger . info ( " Not renaming " + str ( filename ) + " as it is in desired format already. " )
#continue
2013-03-08 01:36:36 +00:00
logger . info ( " I have renamed " + str ( filefind ) + " issues of " + comicname )
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
updater . forceRescan ( comicid )
2013-02-13 01:27:24 +00:00
manualRename . exposed = True
2013-01-28 20:31:43 +00:00
2012-09-24 05:17:29 +00:00
def searchScan ( self , name ) :
return serve_template ( templatename = " searchfix.html " , title = " Manage " , name = name )
searchScan . exposed = True
2012-09-13 15:27:34 +00:00
def manage ( self ) :
return serve_template ( templatename = " manage.html " , title = " Manage " )
manage . exposed = True
2012-09-18 13:13:42 +00:00
def manageComics ( self ) :
2012-09-13 15:27:34 +00:00
myDB = db . DBConnection ( )
comics = myDB . select ( ' SELECT * from comics order by ComicSortName COLLATE NOCASE ' )
2012-09-18 13:13:42 +00:00
return serve_template ( templatename = " managecomics.html " , title = " Manage Comics " , comics = comics )
manageComics . exposed = True
2012-09-13 15:27:34 +00:00
2012-09-18 04:00:43 +00:00
def manageIssues ( self ) :
2012-09-13 15:27:34 +00:00
myDB = db . DBConnection ( )
issues = myDB . select ( ' SELECT * from issues ' )
2012-09-24 05:17:29 +00:00
return serve_template ( templatename = " manageissues.html " , title = " Manage Issues " , issues = issues )
2012-09-18 13:13:42 +00:00
manageIssues . exposed = True
2012-09-13 15:27:34 +00:00
def manageNew ( self ) :
myDB = db . DBConnection ( )
newcomics = myDB . select ( ' SELECT * from newartists ' )
return serve_template ( templatename = " managenew.html " , title = " Manage New Artists " , newcomics = newcomics )
manageNew . exposed = True
2012-09-18 04:00:43 +00:00
def markComics ( self , action = None , * * args ) :
2012-09-13 15:27:34 +00:00
myDB = db . DBConnection ( )
2012-09-18 04:00:43 +00:00
comicsToAdd = [ ]
for ComicID in args :
2012-09-13 15:27:34 +00:00
if action == ' delete ' :
2012-09-18 04:00:43 +00:00
myDB . action ( ' DELETE from comics WHERE ComicID=? ' , [ ComicID ] )
myDB . action ( ' DELETE from issues WHERE ComicID=? ' , [ ComicID ] )
2012-09-13 15:27:34 +00:00
elif action == ' pause ' :
2012-09-18 04:00:43 +00:00
controlValueDict = { ' ComicID ' : ComicID }
2012-09-13 15:27:34 +00:00
newValueDict = { ' Status ' : ' Paused ' }
2012-09-18 04:00:43 +00:00
myDB . upsert ( " comics " , newValueDict , controlValueDict )
2012-09-13 15:27:34 +00:00
elif action == ' resume ' :
2012-09-18 04:00:43 +00:00
controlValueDict = { ' ComicID ' : ComicID }
2012-09-13 15:27:34 +00:00
newValueDict = { ' Status ' : ' Active ' }
2012-09-18 04:00:43 +00:00
myDB . upsert ( " comics " , newValueDict , controlValueDict )
2012-09-13 15:27:34 +00:00
else :
2012-09-18 04:00:43 +00:00
comicsToAdd . append ( ComicID )
if len ( comicsToAdd ) > 0 :
logger . debug ( " Refreshing comics: %s " % comicsToAdd )
threading . Thread ( target = importer . addComicIDListToDB , args = [ comicsToAdd ] ) . start ( )
2012-09-13 15:27:34 +00:00
raise cherrypy . HTTPRedirect ( " home " )
2012-09-18 04:00:43 +00:00
markComics . exposed = True
2012-09-13 15:27:34 +00:00
def forceUpdate ( self ) :
from mylar import updater
threading . Thread ( target = updater . dbUpdate ) . start ( )
raise cherrypy . HTTPRedirect ( " home " )
forceUpdate . exposed = True
def forceSearch ( self ) :
from mylar import search
threading . Thread ( target = search . searchforissue ) . start ( )
raise cherrypy . HTTPRedirect ( " home " )
forceSearch . exposed = True
def forceRescan ( self , ComicID ) :
threading . Thread ( target = updater . forceRescan , args = [ ComicID ] ) . start ( )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
forceRescan . exposed = True
def checkGithub ( self ) :
from mylar import versioncheck
versioncheck . checkGithub ( )
raise cherrypy . HTTPRedirect ( " home " )
checkGithub . exposed = True
def history ( self ) :
myDB = db . DBConnection ( )
history = myDB . select ( ''' SELECT * from snatched order by DateAdded DESC ''' )
return serve_template ( templatename = " history.html " , title = " History " , history = history )
return page
history . exposed = True
2013-02-25 15:36:43 +00:00
def readlist ( self ) :
myDB = db . DBConnection ( )
2013-03-02 01:41:45 +00:00
readlist = myDB . select ( " SELECT * from readinglist group by StoryArcID COLLATE NOCASE " )
2013-03-06 16:20:09 +00:00
issuelist = myDB . select ( " SELECT * from readlist " )
2013-04-22 03:11:12 +00:00
readConfig = {
" read2filename " : helpers . checked ( mylar . READ2FILENAME )
}
return serve_template ( templatename = " readinglist.html " , title = " Readlist " , readlist = readlist , issuelist = issuelist , readConfig = readConfig )
2013-02-25 15:36:43 +00:00
return page
readlist . exposed = True
2013-03-02 01:41:45 +00:00
def detailReadlist ( self , StoryArcID , StoryArcName ) :
myDB = db . DBConnection ( )
readlist = myDB . select ( " SELECT * from readinglist WHERE StoryArcID=? order by ReadingOrder ASC " , [ StoryArcID ] )
2013-04-22 03:11:12 +00:00
return serve_template ( templatename = " readlist.html " , title = " Detailed Arc list " , readlist = readlist , storyarcname = StoryArcName , storyarcid = StoryArcID )
2013-03-02 01:41:45 +00:00
detailReadlist . exposed = True
2013-04-22 03:11:12 +00:00
def removefromreadlist ( self , IssueID = None , StoryArcID = None , IssueArcID = None , AllRead = None ) :
2013-03-06 16:20:09 +00:00
myDB = db . DBConnection ( )
if IssueID :
myDB . action ( ' DELETE from readlist WHERE IssueID=? ' , [ IssueID ] )
logger . info ( " Removed " + str ( IssueID ) + " from Reading List " )
elif StoryArcID :
myDB . action ( ' DELETE from readinglist WHERE StoryArcID=? ' , [ StoryArcID ] )
logger . info ( " Removed " + str ( StoryArcID ) + " from Story Arcs. " )
elif IssueArcID :
myDB . action ( ' DELETE from readinglist WHERE IssueArcID=? ' , [ IssueArcID ] )
logger . info ( " Removed " + str ( IssueArcID ) + " from the Story Arc. " )
2013-04-22 03:11:12 +00:00
elif AllRead :
myDB . action ( " DELETE from readlist WHERE Status= ' Read ' " )
logger . info ( " Removed All issues that have been marked as Read from Reading List " )
2013-03-06 16:20:09 +00:00
removefromreadlist . exposed = True
def markasRead ( self , IssueID = None , IssueArcID = None ) :
myDB = db . DBConnection ( )
if IssueID :
issue = myDB . action ( ' SELECT * from readlist WHERE IssueID=? ' , [ IssueID ] ) . fetchone ( )
if issue [ ' Status ' ] == ' Read ' :
NewVal = { " Status " : " Added " }
else :
NewVal = { " Status " : " Read " }
CtrlVal = { " IssueID " : IssueID }
myDB . upsert ( " readlist " , NewVal , CtrlVal )
logger . info ( " Marked " + str ( issue [ ' ComicName ' ] ) + " # " + str ( issue [ ' Issue_Number ' ] ) + " as Read. " )
elif IssueArcID :
issue = myDB . action ( ' SELECT * from readinglist WHERE IssueArcID=? ' , [ IssueArcID ] ) . fetchone ( )
if issue [ ' Status ' ] == ' Read ' :
NewVal = { " Status " : " Added " }
else :
NewVal = { " Status " : " Read " }
CtrlVal = { " IssueArcID " : IssueArcID }
myDB . upsert ( " readinglist " , NewVal , CtrlVal )
logger . info ( " Marked " + str ( issue [ ' ComicName ' ] ) + " # " + str ( issue [ ' IssueNumber ' ] ) + " as Read. " )
markasRead . exposed = True
2013-02-25 15:36:43 +00:00
def addtoreadlist ( self , IssueID ) :
myDB = db . DBConnection ( )
readlist = myDB . action ( " SELECT * from issues where IssueID=? " , [ IssueID ] ) . fetchone ( )
2013-03-06 16:20:09 +00:00
comicinfo = myDB . action ( " SELECT * from comics where ComicID=? " , [ readlist [ ' ComicID ' ] ] ) . fetchone ( )
2013-02-25 15:36:43 +00:00
if readlist is None :
logger . error ( " Cannot locate IssueID - aborting.. " )
else :
logger . info ( " attempting to add..issueid " + readlist [ ' IssueID ' ] )
ctrlval = { " IssueID " : IssueID }
newval = { " DateAdded " : helpers . today ( ) ,
" Status " : " added " ,
2013-03-06 16:20:09 +00:00
" ComicID " : readlist [ ' ComicID ' ] ,
2013-02-25 15:36:43 +00:00
" Issue_Number " : readlist [ ' Issue_Number ' ] ,
2013-03-06 16:20:09 +00:00
" IssueDate " : readlist [ ' IssueDate ' ] ,
" SeriesYear " : comicinfo [ ' ComicYear ' ] ,
2013-02-25 15:36:43 +00:00
" ComicName " : readlist [ ' ComicName ' ] }
myDB . upsert ( " readlist " , newval , ctrlval )
logger . info ( " Added " + str ( readlist [ ' ComicName ' ] ) + " # " + str ( readlist [ ' Issue_Number ' ] ) + " to the Reading list. " )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % readlist [ ' ComicID ' ] )
addtoreadlist . exposed = True
2013-03-02 01:41:45 +00:00
def importReadlist ( self , filename ) :
from xml . dom . minidom import parseString , Element
import random
myDB = db . DBConnection ( )
file = open ( str ( filename ) )
data = file . read ( )
file . close ( )
dom = parseString ( data )
# of results
storyarc = dom . getElementsByTagName ( ' Name ' ) [ 0 ] . firstChild . wholeText
tracks = dom . getElementsByTagName ( ' Book ' )
i = 1
node = dom . documentElement
print ( " there are " + str ( len ( tracks ) ) + " issues in the story-arc: " + str ( storyarc ) )
#generate a random number for the ID, and tack on the total issue count to the end as a str :)
storyarcid = str ( random . randint ( 1000 , 9999 ) ) + str ( len ( tracks ) )
i = 1
for book_element in tracks :
st_issueid = str ( storyarcid ) + " _ " + str ( random . randint ( 1000 , 9999 ) )
comicname = book_element . getAttribute ( ' Series ' )
2013-03-08 01:36:36 +00:00
print ( " comic: " + comicname )
2013-03-02 01:41:45 +00:00
comicnumber = book_element . getAttribute ( ' Number ' )
print ( " number: " + str ( comicnumber ) )
comicvolume = book_element . getAttribute ( ' Volume ' )
print ( " volume: " + str ( comicvolume ) )
comicyear = book_element . getAttribute ( ' Year ' )
print ( " year: " + str ( comicyear ) )
CtrlVal = { " IssueArcID " : st_issueid }
NewVals = { " StoryArcID " : storyarcid ,
" ComicName " : comicname ,
" IssueNumber " : comicnumber ,
" SeriesYear " : comicvolume ,
" IssueYear " : comicyear ,
" StoryArc " : storyarc ,
" ReadingOrder " : i ,
" TotalIssues " : len ( tracks ) }
myDB . upsert ( " readinglist " , NewVals , CtrlVal )
i + = 1
2013-04-22 03:11:12 +00:00
raise cherrypy . HTTPRedirect ( " detailReadlist?StoryArcID= %s &StoryArcName= %s " % ( storyarcid , storyarc ) )
2013-03-02 01:41:45 +00:00
importReadlist . exposed = True
2013-03-06 16:20:09 +00:00
#Story Arc Ascension...welcome to the next level :)
2013-04-22 03:11:12 +00:00
def ArcWatchlist ( self , StoryArcID = None ) :
2013-03-06 16:20:09 +00:00
myDB = db . DBConnection ( )
2013-04-22 03:11:12 +00:00
if StoryArcID :
ArcWatch = myDB . select ( " SELECT * FROM readinglist WHERE StoryArcID=? " , [ StoryArcID ] )
else :
ArcWatch = myDB . select ( " SELECT * FROM readinglist " )
2013-03-06 16:20:09 +00:00
if ArcWatch is None : logger . info ( " No Story Arcs to search " )
else :
Comics = myDB . select ( " SELECT * FROM comics " )
arc_match = [ ]
2013-04-22 03:11:12 +00:00
wantedlist = [ ]
showonreadlist = 1 # 0 won't show storyarcissues on readinglist main page, 1 will show
2013-03-06 16:20:09 +00:00
for arc in ArcWatch :
2013-04-22 03:11:12 +00:00
logger . fdebug ( " arc: " + arc [ ' storyarc ' ] + " : " + arc [ ' ComicName ' ] + " : " + arc [ ' IssueNumber ' ] )
2013-03-06 16:20:09 +00:00
#cycle through the story arcs here for matches on the watchlist
mod_arc = re . sub ( ' [ \ :/, \' \ / \ - \ & \ % \ $ \ # \ @ \ ! \ * \ + \ .] ' , ' ' , arc [ ' ComicName ' ] )
2013-04-22 03:11:12 +00:00
mod_arc = re . sub ( ' \\ bthe \\ b ' , ' ' , mod_arc . lower ( ) )
mod_arc = re . sub ( ' \\ band \\ b ' , ' ' , mod_arc . lower ( ) )
2013-03-06 16:20:09 +00:00
mod_arc = re . sub ( r ' \ s ' , ' ' , mod_arc )
2013-04-22 03:11:12 +00:00
matcheroso = " no "
2013-03-06 16:20:09 +00:00
for comic in Comics :
2013-04-22 03:11:12 +00:00
logger . fdebug ( " comic: " + comic [ ' ComicName ' ] )
2013-03-06 16:20:09 +00:00
mod_watch = re . sub ( ' [ \ : \ , \' \ / \ - \ & \ % \ $ \ # \ @ \ ! \ * \ + \ .] ' , ' ' , comic [ ' ComicName ' ] )
2013-04-22 03:11:12 +00:00
mod_watch = re . sub ( ' \\ bthe \\ b ' , ' ' , mod_watch . lower ( ) )
mod_watch = re . sub ( ' \\ band \\ b ' , ' ' , mod_watch . lower ( ) )
2013-03-06 16:20:09 +00:00
mod_watch = re . sub ( r ' \ s ' , ' ' , mod_watch )
2013-04-22 03:11:12 +00:00
if mod_watch == mod_arc : # and arc['SeriesYear'] == comic['ComicYear']:
logger . fdebug ( " intial name match - confirming issue # is present in series " )
if comic [ ' ComicID ' ] [ : 1 ] == ' G ' :
# if it's a multi-volume series, it's decimalized - let's get rid of the decimal.
GCDissue , whocares = helpers . decimal_issue ( arc [ ' IssueNumber ' ] )
GCDissue = int ( GCDissue ) / 1000
logger . fdebug ( " issue converted to " + str ( GCDissue ) )
isschk = myDB . action ( " SELECT * FROM issues WHERE ComicName=? AND Issue_Number=? " , [ comic [ ' ComicName ' ] , str ( GCDissue ) ] ) . fetchone ( )
else :
isschk = myDB . action ( " SELECT * FROM issues WHERE ComicName=? AND Issue_Number=? " , [ comic [ ' ComicName ' ] , arc [ ' IssueNumber ' ] ] ) . fetchone ( )
if isschk is None :
logger . fdebug ( " we matched on name, but issue " + str ( arc [ ' IssueNumber ' ] ) + " doesn ' t exist for " + comic [ ' ComicName ' ] )
else :
logger . fdebug ( " issue #: " + str ( arc [ ' IssueNumber ' ] ) + " is present! " )
print isschk
print ( " Comicname: " + arc [ ' ComicName ' ] )
#print ("ComicID: " + str(isschk['ComicID']))
print ( " Issue: " + arc [ ' IssueNumber ' ] )
print ( " IssueArcID: " + arc [ ' IssueArcID ' ] )
#gather the matches now.
arc_match . append ( {
" match_name " : arc [ ' ComicName ' ] ,
" match_id " : isschk [ ' ComicID ' ] ,
" match_issue " : arc [ ' IssueNumber ' ] ,
" match_issuearcid " : arc [ ' IssueArcID ' ] ,
" match_seriesyear " : comic [ ' ComicYear ' ] } )
matcheroso = " yes "
if matcheroso == " no " :
logger . fdebug ( " Unable to find a match for " + arc [ ' ComicName ' ] + " :# " + str ( arc [ ' IssueNumber ' ] ) )
wantedlist . append ( {
" ComicName " : arc [ ' ComicName ' ] ,
" IssueNumber " : arc [ ' IssueNumber ' ] ,
" IssueYear " : arc [ ' IssueYear ' ] } )
logger . fdebug ( " we matched on " + str ( len ( arc_match ) ) + " issues " )
2013-03-06 16:20:09 +00:00
for m_arc in arc_match :
#now we cycle through the issues looking for a match.
2013-04-22 03:11:12 +00:00
issue = myDB . action ( " SELECT * FROM issues where ComicID=? and Issue_Number=? " , [ m_arc [ ' match_id ' ] , m_arc [ ' match_issue ' ] ] ) . fetchone ( )
2013-04-07 18:06:36 +00:00
if issue is None : pass
else :
logger . fdebug ( " issue: " + str ( issue [ ' Issue_Number ' ] ) + " ... " + str ( m_arc [ ' match_issue ' ] ) )
# if helpers.decimal_issue(issuechk['Issue_Number']) == helpers.decimal_issue(m_arc['match_issue']):
if issue [ ' Issue_Number ' ] == m_arc [ ' match_issue ' ] :
logger . fdebug ( " we matched on " + str ( issue [ ' Issue_Number ' ] ) + " for " + str ( m_arc [ ' match_name ' ] ) )
if issue [ ' Status ' ] == ' Downloaded ' or issue [ ' Status ' ] == ' Archived ' :
2013-04-22 03:11:12 +00:00
ctrlVal = { " IssueArcID " : m_arc [ ' match_issuearcid ' ] }
newVal = { " Status " : issue [ ' Status ' ] ,
" IssueID " : issue [ ' IssueID ' ] }
if showonreadlist :
showctrlVal = { " IssueID " : issue [ ' IssueID ' ] }
shownewVal = { " ComicName " : issue [ ' ComicName ' ] ,
" Issue_Number " : issue [ ' Issue_Number ' ] ,
" IssueDate " : issue [ ' IssueDate ' ] ,
" SeriesYear " : m_arc [ ' match_seriesyear ' ] ,
" ComicID " : m_arc [ ' match_id ' ] }
myDB . upsert ( " readlist " , shownewVal , showctrlVal )
2013-03-12 16:06:44 +00:00
myDB . upsert ( " readinglist " , newVal , ctrlVal )
2013-04-22 03:11:12 +00:00
logger . info ( " Already have " + issue [ ' ComicName ' ] + " :# " + str ( issue [ ' Issue_Number ' ] ) )
else :
logger . fdebug ( " We don ' t have " + issue [ ' ComicName ' ] + " :# " + str ( issue [ ' Issue_Number ' ] ) )
ctrlVal = { " IssueArcID " : m_arc [ ' match_issuearcid ' ] }
newVal = { " Status " : " Wanted " ,
" IssueID " : issue [ ' IssueID ' ] }
myDB . upsert ( " readinglist " , newVal , ctrlVal )
logger . info ( " Marked " + issue [ ' ComicName ' ] + " :# " + str ( issue [ ' Issue_Number ' ] ) + " as WANTED. " )
2013-03-06 16:20:09 +00:00
ArcWatchlist . exposed = True
2013-04-22 03:11:12 +00:00
def ReadMassCopy ( self , StoryArcID , StoryArcName ) :
#this copies entire story arcs into the /cache/<storyarc> folder
#alternatively, it will copy the issues individually directly to a 3rd party device (ie.tablet)
myDB = db . DBConnection ( )
copylist = myDB . select ( " SELECT * FROM readlist WHERE StoryArcID=? AND Status= ' Downloaded ' " , [ StoryArcID ] )
if copylist is None :
logger . fdebug ( " You don ' t have any issues from " + StoryArcName + " . Aborting Mass Copy. " )
return
else :
dst = os . path . join ( mylar . CACHE , StoryArcName )
for files in copylist :
copyloc = files [ ' Location ' ]
ReadMassCopy . exposed = True
2012-09-13 15:27:34 +00:00
def logs ( self ) :
2013-02-06 19:55:23 +00:00
if mylar . LOG_LEVEL is None or mylar . LOG_LEVEL == ' ' :
2013-04-22 03:11:12 +00:00
mylar . LOG_LEVEL = ' INFO '
2013-02-06 19:55:23 +00:00
return serve_template ( templatename = " logs.html " , title = " Log " , lineList = mylar . LOG_LIST , log_level = mylar . LOG_LEVEL )
2012-09-13 15:27:34 +00:00
logs . exposed = True
2013-02-06 19:55:23 +00:00
2013-04-22 03:11:12 +00:00
def log_change ( self , loglevel ) :
if log_level is not None :
print ( " changing logger to " + str ( log_level ) )
LOGGER . setLevel ( log_level )
return serve_template ( templatename = " logs.html " , title = " Log " , lineList = mylar . LOG_LIST , log_level = log_level )
2013-02-06 19:55:23 +00:00
log_change . exposed = True
2012-09-13 15:27:34 +00:00
def clearhistory ( self , type = None ) :
myDB = db . DBConnection ( )
if type == ' all ' :
logger . info ( u " Clearing all history " )
myDB . action ( ' DELETE from snatched ' )
else :
logger . info ( u " Clearing history where status is %s " % type )
myDB . action ( ' DELETE from snatched WHERE Status=? ' , [ type ] )
raise cherrypy . HTTPRedirect ( " history " )
clearhistory . exposed = True
2013-02-06 19:55:23 +00:00
2013-04-22 03:11:12 +00:00
def downloadLocal ( self , IssueID = None , IssueArcID = None , ReadOrder = None ) :
print " tada "
print ( " issueid: " + str ( IssueID ) )
2013-02-06 19:55:23 +00:00
myDB = db . DBConnection ( )
2013-04-22 03:11:12 +00:00
if IssueID :
issueDL = myDB . action ( " SELECT * FROM issues WHERE IssueID=? " , [ IssueID ] ) . fetchone ( )
comicid = issueDL [ ' ComicID ' ]
#print ("comicid: " + str(comicid))
comic = myDB . action ( " SELECT * FROM comics WHERE ComicID=? " , [ comicid ] ) . fetchone ( )
#---issue info
comicname = comic [ ' ComicName ' ]
issuenum = issueDL [ ' Issue_Number ' ]
issuedate = issueDL [ ' IssueDate ' ]
seriesyear = comic [ ' ComicYear ' ]
#---
issueLOC = comic [ ' ComicLocation ' ]
#print ("IssueLOC: " + str(issueLOC))
issueFILE = issueDL [ ' Location ' ]
#print ("IssueFILE: "+ str(issueFILE))
issuePATH = os . path . join ( issueLOC , issueFILE )
#print ("IssuePATH: " + str(issuePATH))
dstPATH = os . path . join ( mylar . CACHE_DIR , issueFILE )
#print ("dstPATH: " + str(dstPATH))
if IssueArcID :
if mylar . READ2FILENAME :
#if it's coming from a StoryArc, check to see if we're appending the ReadingOrder to the filename
ARCissueFILE = ReadOrder + " - " + issueFILE
dstPATH = os . path . join ( mylar . CACHE_DIR , ARCissueFILE )
# issueDL = myDB.action("SELECT * FROM readinglist WHERE IssueArcID=?", [IssueArcID]).fetchone()
# storyarcid = issueDL['StoryArcID']
# #print ("comicid: " + str(comicid))
# issueLOC = mylar.DESTINATION_DIR
# #print ("IssueLOC: " + str(issueLOC))
# issueFILE = issueDL['Location']
# #print ("IssueFILE: "+ str(issueFILE))
# issuePATH = os.path.join(issueLOC,issueFILE)
# #print ("IssuePATH: " + str(issuePATH))
# dstPATH = os.path.join(mylar.CACHE_DIR, issueFILE)
# #print ("dstPATH: " + str(dstPATH))
2013-02-17 10:31:18 +00:00
try :
shutil . copy2 ( issuePATH , dstPATH )
except IOError as e :
logger . error ( " Could not copy " + str ( issuePATH ) + " to " + str ( dstPATH ) + " . Copy to Cache terminated. " )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % comicid )
logger . debug ( " sucessfully copied to cache...Enabling Download link " )
controlValueDict = { ' IssueID ' : IssueID }
2013-02-25 15:36:43 +00:00
newValueDict = { ' inCacheDIR ' : ' True ' ,
' Location ' : issueFILE }
myDB . upsert ( " readlist " , newValueDict , controlValueDict )
2013-03-14 08:55:38 +00:00
myDB . upsert ( " issues " , newValueDict , controlValueDict )
2013-04-22 03:11:12 +00:00
if IssueArcID :
controlValueD = { ' IssueArcID ' : IssueArcID }
newValueDict = { ' inCacheDIR ' : ' True ' ,
' Location ' : ARCissueFILE }
myDB . upsert ( " readinglist " , newValueDict , controlValueD )
2013-02-17 10:31:18 +00:00
#print("DB updated - Download link now enabled.")
2013-02-06 19:55:23 +00:00
downloadLocal . exposed = True
2012-09-13 15:27:34 +00:00
2013-01-28 20:31:43 +00:00
#for testing.
def idirectory ( self ) :
return serve_template ( templatename = " idirectory.html " , title = " Import a Directory " )
idirectory . exposed = True
2013-02-13 01:27:24 +00:00
def confirmResult ( self , comicname , comicid ) :
#print ("here.")
mode = ' series '
sresults = mb . findComic ( comicname , mode , None )
#print sresults
type = ' comic '
return serve_template ( templatename = " searchresults.html " , title = ' Import Results for: " ' + comicname + ' " ' , searchresults = sresults , type = type , imported = ' confirm ' , ogcname = comicid )
confirmResult . exposed = True
2013-02-06 19:55:23 +00:00
def comicScan ( self , path , scan = 0 , redirect = None , autoadd = 0 , libraryscan = 0 , imp_move = 0 , imp_rename = 0 , imp_metadata = 0 ) :
2013-01-28 20:31:43 +00:00
mylar . LIBRARYSCAN = libraryscan
mylar . ADD_COMICS = autoadd
mylar . COMIC_DIR = path
mylar . IMP_MOVE = imp_move
mylar . IMP_RENAME = imp_rename
2013-02-06 19:55:23 +00:00
mylar . IMP_METADATA = imp_metadata
2013-01-28 20:31:43 +00:00
mylar . config_write ( )
if scan :
try :
2013-02-06 19:55:23 +00:00
soma , noids = librarysync . libraryScan ( )
2013-01-28 20:31:43 +00:00
except Exception , e :
logger . error ( ' Unable to complete the scan: %s ' % e )
if soma == " Completed " :
print ( " sucessfully completed import. " )
else :
2013-02-06 19:55:23 +00:00
logger . info ( u " Starting mass importing... " + str ( noids ) + " records. " )
2013-01-28 20:31:43 +00:00
#this is what it should do...
#store soma (the list of comic_details from importing) into sql table so import can be whenever
#display webpage showing results
#allow user to select comic to add (one at a time)
#call addComic off of the webpage to initiate the add.
#return to result page to finish or continue adding.
#....
#threading.Thread(target=self.searchit).start()
#threadthis = threadit.ThreadUrl()
#result = threadthis.main(soma)
myDB = db . DBConnection ( )
sl = 0
2013-02-06 19:55:23 +00:00
print ( " number of records: " + str ( noids ) )
while ( sl < int ( noids ) ) :
2013-01-28 20:31:43 +00:00
soma_sl = soma [ ' comic_info ' ] [ sl ]
2013-02-06 19:55:23 +00:00
print ( " soma_sl: " + str ( soma_sl ) )
print ( " comicname: " + soma_sl [ ' comicname ' ] )
print ( " filename: " + soma_sl [ ' comfilename ' ] )
controlValue = { " impID " : soma_sl [ ' impid ' ] }
2013-01-28 20:31:43 +00:00
newValue = { " ComicYear " : soma_sl [ ' comicyear ' ] ,
" Status " : " Not Imported " ,
2013-02-06 19:55:23 +00:00
" ComicName " : soma_sl [ ' comicname ' ] ,
" ComicFilename " : soma_sl [ ' comfilename ' ] ,
" ComicLocation " : soma_sl [ ' comlocation ' ] . encode ( ' utf-8 ' ) ,
2013-02-13 01:27:24 +00:00
" ImportDate " : helpers . today ( ) ,
" WatchMatch " : soma_sl [ ' watchmatch ' ] }
2013-01-28 20:31:43 +00:00
myDB . upsert ( " importresults " , newValue , controlValue )
sl + = 1
2013-02-06 19:55:23 +00:00
# because we could be adding volumes/series that span years, we need to account for this
# add the year to the db under the term, valid-years
# add the issue to the db under the term, min-issue
#locate metadata here.
# unzip -z filename.cbz will show the comment field of the zip which contains the metadata.
# unzip -z filename.cbz < /dev/null will remove the comment field, and thus the metadata.
2013-01-28 20:31:43 +00:00
self . importResults ( )
if redirect :
raise cherrypy . HTTPRedirect ( redirect )
else :
raise cherrypy . HTTPRedirect ( " home " )
comicScan . exposed = True
def importResults ( self ) :
myDB = db . DBConnection ( )
2013-02-13 01:27:24 +00:00
results = myDB . select ( " SELECT * FROM importresults WHERE WatchMatch is Null OR WatchMatch LIKE ' C % ' group by ComicName COLLATE NOCASE " )
2013-02-25 15:36:43 +00:00
#this is to get the count of issues;
for result in results :
countthis = myDB . action ( " SELECT count(*) FROM importresults WHERE ComicName=? " , [ result [ ' ComicName ' ] ] ) . fetchall ( )
countit = countthis [ 0 ] [ 0 ]
ctrlVal = { " ComicName " : result [ ' ComicName ' ] }
newVal = { " IssueCount " : countit }
myDB . upsert ( " importresults " , newVal , ctrlVal )
logger . info ( " counted " + str ( countit ) + " issues for " + str ( result [ ' ComicName ' ] ) )
#need to reload results now
results = myDB . select ( " SELECT * FROM importresults WHERE WatchMatch is Null OR WatchMatch LIKE ' C % ' group by ComicName COLLATE NOCASE " )
2013-02-13 01:27:24 +00:00
watchresults = myDB . select ( " SELECT * FROM importresults WHERE WatchMatch is not Null AND WatchMatch NOT LIKE ' C % ' group by ComicName COLLATE NOCASE " )
return serve_template ( templatename = " importresults.html " , title = " Import Results " , results = results , watchresults = watchresults )
2013-01-28 20:31:43 +00:00
importResults . exposed = True
2013-02-06 19:55:23 +00:00
2013-02-09 03:34:02 +00:00
def deleteimport ( self , ComicName ) :
myDB = db . DBConnection ( )
2013-03-08 01:36:36 +00:00
logger . info ( " Removing import data for Comic: " + ComicName )
2013-02-09 03:34:02 +00:00
myDB . action ( ' DELETE from importresults WHERE ComicName=? ' , [ ComicName ] )
raise cherrypy . HTTPRedirect ( " importResults " )
deleteimport . exposed = True
2013-02-13 01:27:24 +00:00
def preSearchit ( self , ComicName ) :
2013-02-09 03:34:02 +00:00
#print ("imp_rename:" + str(imp_rename))
#print ("imp_move:" + str(imp_move))
2013-02-06 19:55:23 +00:00
myDB = db . DBConnection ( )
results = myDB . action ( " SELECT * FROM importresults WHERE ComicName=? " , [ ComicName ] )
#if results > 0:
# print ("There are " + str(results[7]) + " issues to import of " + str(ComicName))
#build the valid year ranges and the minimum issue# here to pass to search.
yearRANGE = [ ]
yearTOP = 0
minISSUE = 0
2013-02-09 03:34:02 +00:00
startISSUE = 10000000
2013-02-06 19:55:23 +00:00
comicstoIMP = [ ]
for result in results :
if result is None :
break
2013-02-14 18:56:55 +00:00
if result [ ' WatchMatch ' ] :
watchmatched = result [ ' WatchMatch ' ]
else :
watchmatched = ' '
if watchmatched . startswith ( ' C ' ) :
2013-02-13 01:27:24 +00:00
print ( " Confirmed. ComicID already provided - initiating auto-magik mode for import. " )
comicid = result [ ' WatchMatch ' ] [ 1 : ]
print ( result [ ' WatchMatch ' ] + " .to. " + str ( comicid ) )
#since it's already in the watchlist, we just need to move the files and re-run the filechecker.
#self.refreshArtist(comicid=comicid,imported='yes')
if mylar . IMP_MOVE :
logger . info ( " Mass import - Move files " )
comloc = myDB . action ( " SELECT * FROM comics WHERE ComicID=? " , [ comicid ] ) . fetchone ( )
mylar . moveit . movefiles ( comicid , comloc [ ' ComicLocation ' ] , ComicName )
#check for existing files...
updater . forceRescan ( comicid )
else :
print ( " nothing to do if I ' m not moving. " )
2013-02-17 10:31:18 +00:00
#hit the archiver in movefiles here...
2013-02-13 01:27:24 +00:00
raise cherrypy . HTTPRedirect ( " importResults " )
2013-02-06 19:55:23 +00:00
else :
comicstoIMP . append ( result [ ' ComicLocation ' ] . decode ( mylar . SYS_ENCODING , ' replace ' ) )
getiss = result [ ' impID ' ] . rfind ( ' - ' )
getiss = result [ ' impID ' ] [ getiss + 1 : ]
print ( " figured issue is : " + str ( getiss ) )
if ( result [ ' ComicYear ' ] not in yearRANGE ) or ( yearRANGE is None ) :
if result [ ' ComicYear ' ] < > " 0000 " :
print ( " adding... " + str ( result [ ' ComicYear ' ] ) )
yearRANGE . append ( result [ ' ComicYear ' ] )
yearTOP = str ( result [ ' ComicYear ' ] )
2013-02-09 03:34:02 +00:00
if int ( getiss ) > int ( minISSUE ) :
2013-02-06 19:55:23 +00:00
print ( " issue now set to : " + str ( getiss ) + " ... it was : " + str ( minISSUE ) )
minISSUE = str ( getiss )
2013-02-09 03:34:02 +00:00
if int ( getiss ) < int ( startISSUE ) :
print ( " issue now set to : " + str ( getiss ) + " ... it was : " + str ( startISSUE ) )
startISSUE = str ( getiss )
2013-02-06 19:55:23 +00:00
#figure out # of issues and the year range allowable
2013-02-09 03:34:02 +00:00
if yearTOP > 0 :
maxyear = int ( yearTOP ) - ( int ( minISSUE ) / 12 )
yearRANGE . append ( str ( maxyear ) )
print ( " there is a " + str ( maxyear ) + " year variation based on the 12 issues/year " )
2013-02-25 15:36:43 +00:00
else :
print ( " no year detected in any issues...Nulling the value " )
yearRANGE = None
2013-02-09 03:34:02 +00:00
#determine a best-guess to # of issues in series
#this needs to be reworked / refined ALOT more.
#minISSUE = highest issue #, startISSUE = lowest issue #
numissues = int ( minISSUE ) - int ( startISSUE )
#normally minissue would work if the issue #'s started at #1.
2013-02-06 19:55:23 +00:00
print ( " the years involved are : " + str ( yearRANGE ) )
2013-02-09 03:34:02 +00:00
print ( " highest issue # is : " + str ( minISSUE ) )
print ( " lowest issue # is : " + str ( startISSUE ) )
print ( " approximate number of issues : " + str ( numissues ) )
print ( " issues present on system : " + str ( len ( comicstoIMP ) ) )
print ( " versioning checking: " )
cnsplit = ComicName . split ( )
2013-02-25 15:36:43 +00:00
#cnwords = len(cnsplit)
#cnvers = cnsplit[cnwords-1]
2013-02-09 03:34:02 +00:00
ogcname = ComicName
2013-02-25 15:36:43 +00:00
for splitt in cnsplit :
print ( " split " )
if ' v ' in str ( splitt ) :
print ( " possible versioning detected. " )
if splitt [ 1 : ] . isdigit ( ) :
print ( splitt + " - assuming versioning. Removing from initial search pattern. " )
ComicName = re . sub ( str ( splitt ) , ' ' , ComicName )
2013-03-08 01:36:36 +00:00
print ( " new comicname is : " + ComicName )
2013-02-09 03:34:02 +00:00
# we need to pass the original comicname here into the entire importer module
# so that we can reference the correct issues later.
2013-02-06 19:55:23 +00:00
mode = ' series '
2013-02-25 15:36:43 +00:00
if yearRANGE is None :
sresults = mb . findComic ( ComicName , mode , issue = numissues )
else :
sresults = mb . findComic ( ComicName , mode , issue = numissues , limityear = yearRANGE )
2013-02-06 19:55:23 +00:00
type = ' comic '
2013-02-09 03:34:02 +00:00
2013-02-06 19:55:23 +00:00
if len ( sresults ) == 1 :
sr = sresults [ 0 ]
print ( " only one result...automagik-mode enabled for " + str ( sr [ ' comicid ' ] ) )
2013-02-09 03:34:02 +00:00
resultset = 1
# #need to move the files here.
elif len ( sresults ) == 0 or len ( sresults ) is None :
2013-02-06 19:55:23 +00:00
print ( " no results, removing the year from the agenda and re-querying. " )
2013-02-09 03:34:02 +00:00
sresults = mb . findComic ( ComicName , mode , issue = numissues )
if len ( sresults ) == 1 :
2013-02-25 15:36:43 +00:00
sr = sresults [ 0 ]
2013-02-09 03:34:02 +00:00
print ( " only one result...automagik-mode enabled for " + str ( sr [ ' comicid ' ] ) )
resultset = 1
else :
resultset = 0
else :
print ( " returning results to screen - more than one possibility. " )
resultset = 0
if resultset == 1 :
2013-03-29 04:02:35 +00:00
cresults = self . addComic ( comicid = sr [ ' comicid ' ] , comicname = sr [ ' name ' ] , comicyear = sr [ ' comicyear ' ] , comicpublisher = sr [ ' publisher ' ] , comicimage = sr [ ' comicimage ' ] , comicissues = sr [ ' issues ' ] , imported = ' yes ' , ogcname = ogcname ) #imported=comicstoIMP,ogcname=ogcname)
print ( " ogcname -- " + str ( ogcname ) )
return serve_template ( templatename = " searchfix.html " , title = " Error Check " , comicname = sr [ ' name ' ] , comicid = sr [ ' comicid ' ] , comicyear = sr [ ' comicyear ' ] , comicimage = sr [ ' comicimage ' ] , comicissues = sr [ ' issues ' ] , cresults = cresults , imported = ' yes ' , ogcname = str ( ogcname ) )
2013-02-09 03:34:02 +00:00
else :
2013-02-13 01:27:24 +00:00
return serve_template ( templatename = " searchresults.html " , title = ' Import Results for: " ' + ComicName + ' " ' , searchresults = sresults , type = type , imported = ' yes ' , ogcname = ogcname ) #imported=comicstoIMP, ogcname=ogcname)
2013-02-06 19:55:23 +00:00
preSearchit . exposed = True
2013-01-28 20:31:43 +00:00
#---
2012-09-13 15:27:34 +00:00
def config ( self ) :
interface_dir = os . path . join ( mylar . PROG_DIR , ' data/interfaces/ ' )
interface_list = [ name for name in os . listdir ( interface_dir ) if os . path . isdir ( os . path . join ( interface_dir , name ) ) ]
2013-01-13 15:59:46 +00:00
# branch_history, err = mylar.versioncheck.runGit("log --oneline --pretty=format:'%h - %ar - %s' -n 4")
# br_hist = branch_history.replace("\n", "<br />\n")
2013-02-17 10:31:18 +00:00
myDB = db . DBConnection ( )
CCOMICS = myDB . action ( " SELECT COUNT(*) FROM comics " ) . fetchall ( )
CHAVES = myDB . action ( " SELECT COUNT(*) FROM issues WHERE Status= ' Downloaded ' OR Status= ' Archived ' " ) . fetchall ( )
CISSUES = myDB . action ( " SELECT COUNT(*) FROM issues " ) . fetchall ( )
2013-02-25 15:36:43 +00:00
CSIZE = myDB . action ( " select SUM(ComicSize) from issues where Status= ' Downloaded ' or Status= ' Archived ' " ) . fetchall ( )
2013-02-17 10:31:18 +00:00
COUNT_COMICS = CCOMICS [ 0 ] [ 0 ]
COUNT_HAVES = CHAVES [ 0 ] [ 0 ]
COUNT_ISSUES = CISSUES [ 0 ] [ 0 ]
2013-02-25 15:36:43 +00:00
COUNT_SIZE = helpers . human_size ( CSIZE [ 0 ] [ 0 ] )
2013-02-17 10:31:18 +00:00
comicinfo = { " COUNT_COMICS " : COUNT_COMICS ,
" COUNT_HAVES " : COUNT_HAVES ,
2013-02-25 15:36:43 +00:00
" COUNT_ISSUES " : COUNT_ISSUES ,
" COUNT_SIZE " : COUNT_SIZE }
2013-01-11 21:20:51 +00:00
2012-09-13 15:27:34 +00:00
config = {
" http_host " : mylar . HTTP_HOST ,
" http_user " : mylar . HTTP_USERNAME ,
" http_port " : mylar . HTTP_PORT ,
" http_pass " : mylar . HTTP_PASSWORD ,
2012-10-30 10:43:01 +00:00
" launch_browser " : helpers . checked ( mylar . LAUNCH_BROWSER ) ,
2012-12-20 10:39:37 +00:00
" logverbose " : helpers . checked ( mylar . LOGVERBOSE ) ,
2012-09-13 15:27:34 +00:00
" download_scan_interval " : mylar . DOWNLOAD_SCAN_INTERVAL ,
" nzb_search_interval " : mylar . SEARCH_INTERVAL ,
2013-01-15 17:32:08 +00:00
" nzb_startup_search " : helpers . checked ( mylar . NZB_STARTUP_SEARCH ) ,
2012-09-13 15:27:34 +00:00
" libraryscan_interval " : mylar . LIBRARYSCAN_INTERVAL ,
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
" search_delay " : mylar . SEARCH_DELAY ,
2013-02-20 03:03:51 +00:00
" use_sabnzbd " : helpers . checked ( mylar . USE_SABNZBD ) ,
2012-09-13 15:27:34 +00:00
" sab_host " : mylar . SAB_HOST ,
" sab_user " : mylar . SAB_USERNAME ,
" sab_api " : mylar . SAB_APIKEY ,
" sab_pass " : mylar . SAB_PASSWORD ,
" sab_cat " : mylar . SAB_CATEGORY ,
2013-01-13 15:59:46 +00:00
" sab_priority " : mylar . SAB_PRIORITY ,
2013-02-09 03:34:02 +00:00
" sab_directory " : mylar . SAB_DIRECTORY ,
2013-02-20 03:03:51 +00:00
" use_nzbget " : helpers . checked ( mylar . USE_NZBGET ) ,
" nzbget_host " : mylar . NZBGET_HOST ,
" nzbget_port " : mylar . NZBGET_PORT ,
" nzbget_user " : mylar . NZBGET_USERNAME ,
" nzbget_pass " : mylar . NZBGET_PASSWORD ,
" nzbget_cat " : mylar . NZBGET_CATEGORY ,
" nzbget_priority " : mylar . NZBGET_PRIORITY ,
2012-09-13 15:27:34 +00:00
" use_blackhole " : helpers . checked ( mylar . BLACKHOLE ) ,
" blackhole_dir " : mylar . BLACKHOLE_DIR ,
" usenet_retention " : mylar . USENET_RETENTION ,
" use_nzbsu " : helpers . checked ( mylar . NZBSU ) ,
" nzbsu_api " : mylar . NZBSU_APIKEY ,
" use_dognzb " : helpers . checked ( mylar . DOGNZB ) ,
" dognzb_api " : mylar . DOGNZB_APIKEY ,
2013-01-11 21:20:51 +00:00
" use_nzbx " : helpers . checked ( mylar . NZBX ) ,
2012-09-13 15:27:34 +00:00
" use_experimental " : helpers . checked ( mylar . EXPERIMENTAL ) ,
2012-12-16 17:57:02 +00:00
" use_newznab " : helpers . checked ( mylar . NEWZNAB ) ,
" newznab_host " : mylar . NEWZNAB_HOST ,
" newznab_api " : mylar . NEWZNAB_APIKEY ,
" newznab_enabled " : helpers . checked ( mylar . NEWZNAB_ENABLED ) ,
" extra_newznabs " : mylar . EXTRA_NEWZNABS ,
2012-09-13 15:27:34 +00:00
" destination_dir " : mylar . DESTINATION_DIR ,
2013-04-22 03:43:57 +00:00
" chmod_dir " : mylar . CHMOD_DIR ,
" chmod_file " : mylar . CHMOD_FILE ,
2012-09-14 17:29:01 +00:00
" replace_spaces " : helpers . checked ( mylar . REPLACE_SPACES ) ,
" replace_char " : mylar . REPLACE_CHAR ,
2013-01-15 17:32:08 +00:00
" use_minsize " : helpers . checked ( mylar . USE_MINSIZE ) ,
2013-01-13 15:59:46 +00:00
" minsize " : mylar . MINSIZE ,
2013-01-15 17:32:08 +00:00
" use_maxsize " : helpers . checked ( mylar . USE_MAXSIZE ) ,
2013-01-13 15:59:46 +00:00
" maxsize " : mylar . MAXSIZE ,
2012-09-13 15:27:34 +00:00
" interface_list " : interface_list ,
" autowant_all " : helpers . checked ( mylar . AUTOWANT_ALL ) ,
" autowant_upcoming " : helpers . checked ( mylar . AUTOWANT_UPCOMING ) ,
2012-12-27 15:04:03 +00:00
" comic_cover_local " : helpers . checked ( mylar . COMIC_COVER_LOCAL ) ,
2012-09-13 15:27:34 +00:00
" pref_qual_0 " : helpers . radio ( mylar . PREFERRED_QUALITY , 0 ) ,
" pref_qual_1 " : helpers . radio ( mylar . PREFERRED_QUALITY , 1 ) ,
" pref_qual_3 " : helpers . radio ( mylar . PREFERRED_QUALITY , 3 ) ,
" pref_qual_2 " : helpers . radio ( mylar . PREFERRED_QUALITY , 2 ) ,
" move_files " : helpers . checked ( mylar . MOVE_FILES ) ,
" rename_files " : helpers . checked ( mylar . RENAME_FILES ) ,
" folder_format " : mylar . FOLDER_FORMAT ,
" file_format " : mylar . FILE_FORMAT ,
2012-10-30 10:43:01 +00:00
" zero_level " : helpers . checked ( mylar . ZERO_LEVEL ) ,
" zero_level_n " : mylar . ZERO_LEVEL_N ,
2013-01-15 17:32:08 +00:00
" add_to_csv " : helpers . checked ( mylar . ADD_TO_CSV ) ,
2013-01-23 08:22:22 +00:00
" cvinfo " : helpers . checked ( mylar . CVINFO ) ,
2013-01-15 17:32:08 +00:00
" lowercase_filenames " : helpers . checked ( mylar . LOWERCASE_FILENAMES ) ,
2013-03-06 16:20:09 +00:00
" syno_fix " : helpers . checked ( mylar . SYNO_FIX ) ,
2013-02-18 17:39:00 +00:00
" prowl_enabled " : helpers . checked ( mylar . PROWL_ENABLED ) ,
" prowl_onsnatch " : helpers . checked ( mylar . PROWL_ONSNATCH ) ,
" prowl_keys " : mylar . PROWL_KEYS ,
" prowl_priority " : mylar . PROWL_PRIORITY ,
" nma_enabled " : helpers . checked ( mylar . NMA_ENABLED ) ,
" nma_apikey " : mylar . NMA_APIKEY ,
" nma_priority " : int ( mylar . NMA_PRIORITY ) ,
" nma_onsnatch " : helpers . checked ( mylar . NMA_ONSNATCH ) ,
2012-12-27 15:04:03 +00:00
" enable_extra_scripts " : helpers . checked ( mylar . ENABLE_EXTRA_SCRIPTS ) ,
" extra_scripts " : mylar . EXTRA_SCRIPTS ,
2013-02-13 01:27:24 +00:00
" post_processing " : helpers . checked ( mylar . POST_PROCESSING ) ,
2013-01-11 21:20:51 +00:00
" branch " : version . MYLAR_VERSION ,
" br_type " : mylar . INSTALL_TYPE ,
" br_version " : mylar . versioncheck . getVersion ( ) ,
" py_version " : platform . python_version ( ) ,
" data_dir " : mylar . DATA_DIR ,
" prog_dir " : mylar . PROG_DIR ,
" cache_dir " : mylar . CACHE_DIR ,
2013-01-13 17:10:41 +00:00
" config_file " : mylar . CONFIG_FILE ,
2013-01-13 15:59:46 +00:00
# "branch_history" : br_hist
2013-01-13 17:10:41 +00:00
" enable_pre_scripts " : helpers . checked ( mylar . ENABLE_PRE_SCRIPTS ) ,
" pre_scripts " : mylar . PRE_SCRIPTS ,
" log_dir " : mylar . LOG_DIR
2012-09-13 15:27:34 +00:00
}
2013-02-17 10:31:18 +00:00
return serve_template ( templatename = " config.html " , title = " Settings " , config = config , comicinfo = comicinfo )
2012-09-13 15:27:34 +00:00
config . exposed = True
2013-01-11 21:20:51 +00:00
2013-03-29 04:02:35 +00:00
def error_change ( self , comicid , errorgcd , comicname , comicyear , imported = None , mogcname = None ) :
2013-01-29 09:02:23 +00:00
# if comicname contains a "," it will break the exceptions import.
import urllib
b = urllib . unquote_plus ( comicname )
2013-03-06 16:20:09 +00:00
# cname = b.decode("utf-8")
cname = b . encode ( ' utf-8 ' )
2013-01-29 09:02:23 +00:00
cname = re . sub ( " \ , " , " " , cname )
2013-03-30 17:31:13 +00:00
if mogcname != None :
c = urllib . unquote_plus ( mogcname )
ogcname = c . encode ( ' utf-8 ' )
else :
ogcname = None
2013-03-29 04:02:35 +00:00
2013-01-11 21:20:51 +00:00
if errorgcd [ : 5 ] . isdigit ( ) :
2013-03-30 17:31:13 +00:00
logger . info ( " GCD-ID detected : " + str ( errorgcd ) [ : 5 ] )
logger . info ( " ogcname: " + str ( ogcname ) )
logger . info ( " I ' m assuming you know what you ' re doing - going to force-match for " + cname )
2013-03-29 04:02:35 +00:00
self . from_Exceptions ( comicid = comicid , gcdid = errorgcd , comicname = cname , comicyear = comicyear , imported = imported , ogcname = ogcname )
2013-01-11 21:20:51 +00:00
else :
2013-03-30 17:31:13 +00:00
logger . info ( " Assuming rewording of Comic - adjusting to : " + str ( errorgcd ) )
2013-01-18 09:18:31 +00:00
Err_Info = mylar . cv . getComic ( comicid , ' comic ' )
2013-01-21 18:11:37 +00:00
self . addComic ( comicid = comicid , comicname = str ( errorgcd ) , comicyear = Err_Info [ ' ComicYear ' ] , comicissues = Err_Info [ ' ComicIssues ' ] , comicpublisher = Err_Info [ ' ComicPublisher ' ] )
2013-01-11 21:20:51 +00:00
error_change . exposed = True
2013-03-08 03:07:14 +00:00
def comic_config ( self , com_location , ComicID , alt_search = None , fuzzy_year = None , comic_version = None ) :
2012-09-28 15:39:44 +00:00
myDB = db . DBConnection ( )
2013-01-06 08:51:44 +00:00
#--- this is for multipe search terms............
#--- works, just need to redo search.py to accomodate multiple search terms
# ffs_alt = []
# if '+' in alt_search:
#find first +
# ffs = alt_search.find('+')
# ffs_alt.append(alt_search[:ffs])
# ffs_alt_st = str(ffs_alt[0])
# print("ffs_alt: " + str(ffs_alt[0]))
# split the entire string by the delimter +
# ffs_test = alt_search.split('+')
# if len(ffs_test) > 0:
# print("ffs_test names: " + str(len(ffs_test)))
# ffs_count = len(ffs_test)
# n=1
# while (n < ffs_count):
# ffs_alt.append(ffs_test[n])
# print("adding : " + str(ffs_test[n]))
#print("ffs_alt : " + str(ffs_alt))
# ffs_alt_st = str(ffs_alt_st) + "..." + str(ffs_test[n])
# n+=1
# asearch = ffs_alt
# else:
# asearch = alt_search
asearch = str ( alt_search )
2012-09-28 15:39:44 +00:00
controlValueDict = { ' ComicID ' : ComicID }
2013-01-18 07:32:05 +00:00
newValues = { " ComicLocation " : com_location }
2012-09-28 15:39:44 +00:00
#"QUALalt_vers": qual_altvers,
#"QUALScanner": qual_scanner,
#"QUALtype": qual_type,
#"QUALquality": qual_quality
#}
2013-01-18 07:32:05 +00:00
if asearch is not None :
2013-02-06 19:55:23 +00:00
if re . sub ( r ' \ s ' , ' ' , asearch ) == ' ' :
2013-01-18 07:32:05 +00:00
newValues [ ' AlternateSearch ' ] = " None "
else :
newValues [ ' AlternateSearch ' ] = str ( asearch )
2013-02-06 19:55:23 +00:00
else :
newValues [ ' AlternateSearch ' ] = " None "
2013-01-06 08:51:44 +00:00
2013-01-15 22:41:00 +00:00
if fuzzy_year is None :
newValues [ ' UseFuzzy ' ] = " 0 "
else :
newValues [ ' UseFuzzy ' ] = str ( fuzzy_year )
2013-03-08 03:07:14 +00:00
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
if comic_version is None or comic_version == ' None ' :
2013-03-08 03:07:14 +00:00
newValues [ ' ComicVersion ' ] = " None "
else :
if comic_version [ 1 : ] . isdigit ( ) and comic_version [ : 1 ] . lower ( ) == ' v ' :
newValues [ ' ComicVersion ' ] = str ( comic_version )
else :
logger . info ( " Invalid Versioning entered - it must be in the format of v# " )
newValues [ ' ComicVersion ' ] = " None "
2013-01-15 22:41:00 +00:00
2012-12-20 11:52:21 +00:00
#force the check/creation of directory com_location here
if os . path . isdir ( str ( com_location ) ) :
logger . info ( u " Validating Directory ( " + str ( com_location ) + " ). Already exists! Continuing... " )
else :
logger . fdebug ( " Updated Directory doesn ' t exist! - attempting to create now. " )
2013-04-22 03:43:57 +00:00
#try:
# os.makedirs(str(com_location))
# logger.info(u"Directory successfully created at: " + str(com_location))
#except OSError:
# logger.error(u"Could not create comicdir : " + str(com_location))
filechecker . validateAndCreateDirectory ( com_location , True )
2012-12-20 11:52:21 +00:00
2012-09-28 15:39:44 +00:00
myDB . upsert ( " comics " , newValues , controlValueDict )
2012-09-29 04:56:28 +00:00
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
2012-09-28 15:39:44 +00:00
comic_config . exposed = True
2012-09-13 15:27:34 +00:00
2013-01-15 17:32:08 +00:00
def configUpdate ( self , http_host = ' 0.0.0.0 ' , http_username = None , http_port = 8090 , http_password = None , launch_browser = 0 , logverbose = 0 , download_scan_interval = None , nzb_search_interval = None , nzb_startup_search = 0 , libraryscan_interval = None ,
2013-02-20 03:03:51 +00:00
use_sabnzbd = 0 , sab_host = None , sab_username = None , sab_apikey = None , sab_password = None , sab_category = None , sab_priority = None , sab_directory = None , log_dir = None , log_level = 0 , blackhole = 0 , blackhole_dir = None ,
use_nzbget = 0 , nzbget_host = None , nzbget_port = None , nzbget_username = None , nzbget_password = None , nzbget_category = None , nzbget_priority = None ,
2013-01-11 21:20:51 +00:00
usenet_retention = None , nzbsu = 0 , nzbsu_apikey = None , dognzb = 0 , dognzb_apikey = None , nzbx = 0 , newznab = 0 , newznab_host = None , newznab_apikey = None , newznab_enabled = 0 ,
2012-09-13 15:27:34 +00:00
raw = 0 , raw_provider = None , raw_username = None , raw_password = None , raw_groups = None , experimental = 0 ,
2013-02-18 17:39:00 +00:00
prowl_enabled = 0 , prowl_onsnatch = 0 , prowl_keys = None , prowl_priority = None , nma_enabled = 0 , nma_apikey = None , nma_priority = 0 , nma_onsnatch = 0 ,
2013-04-22 03:43:57 +00:00
preferred_quality = 0 , move_files = 0 , rename_files = 0 , add_to_csv = 1 , cvinfo = 0 , lowercase_filenames = 0 , folder_format = None , file_format = None , enable_extra_scripts = 0 , extra_scripts = None , enable_pre_scripts = 0 , pre_scripts = None , post_processing = 0 , syno_fix = 0 , search_delay = None , chmod_dir = 0777 , chmod_file = 0660 ,
2013-01-13 15:59:46 +00:00
destination_dir = None , replace_spaces = 0 , replace_char = None , use_minsize = 0 , minsize = None , use_maxsize = 0 , maxsize = None , autowant_all = 0 , autowant_upcoming = 0 , comic_cover_local = 0 , zero_level = 0 , zero_level_n = None , interface = None , * * kwargs ) :
2012-09-13 15:27:34 +00:00
mylar . HTTP_HOST = http_host
mylar . HTTP_PORT = http_port
mylar . HTTP_USERNAME = http_username
mylar . HTTP_PASSWORD = http_password
mylar . LAUNCH_BROWSER = launch_browser
2012-12-20 10:39:37 +00:00
mylar . LOGVERBOSE = logverbose
2012-09-13 15:27:34 +00:00
mylar . DOWNLOAD_SCAN_INTERVAL = download_scan_interval
mylar . SEARCH_INTERVAL = nzb_search_interval
2013-01-15 17:32:08 +00:00
mylar . NZB_STARTUP_SEARCH = nzb_startup_search
2012-09-13 15:27:34 +00:00
mylar . LIBRARYSCAN_INTERVAL = libraryscan_interval
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
mylar . SEARCH_DELAY = search_delay
2013-02-20 03:03:51 +00:00
mylar . USE_SABNZBD = use_sabnzbd
2012-09-13 15:27:34 +00:00
mylar . SAB_HOST = sab_host
mylar . SAB_USERNAME = sab_username
mylar . SAB_PASSWORD = sab_password
mylar . SAB_APIKEY = sab_apikey
mylar . SAB_CATEGORY = sab_category
2012-09-28 15:39:44 +00:00
mylar . SAB_PRIORITY = sab_priority
2013-02-09 03:34:02 +00:00
mylar . SAB_DIRECTORY = sab_directory
2013-02-20 03:03:51 +00:00
mylar . USE_NZBGET = use_nzbget
mylar . NZBGET_HOST = nzbget_host
mylar . NZBGET_USERNAME = nzbget_username
mylar . NZBGET_PASSWORD = nzbget_password
mylar . NZBGET_PORT = nzbget_port
mylar . NZBGET_CATEGORY = nzbget_category
mylar . NZBGET_PRIORITY = nzbget_priority
2012-09-13 15:27:34 +00:00
mylar . BLACKHOLE = blackhole
mylar . BLACKHOLE_DIR = blackhole_dir
mylar . USENET_RETENTION = usenet_retention
mylar . NZBSU = nzbsu
mylar . NZBSU_APIKEY = nzbsu_apikey
mylar . DOGNZB = dognzb
mylar . DOGNZB_APIKEY = dognzb_apikey
2013-01-11 21:20:51 +00:00
mylar . NZBX = nzbx
2012-09-13 15:27:34 +00:00
mylar . RAW = raw
mylar . RAW_PROVIDER = raw_provider
mylar . RAW_USERNAME = raw_username
mylar . RAW_PASSWORD = raw_password
mylar . RAW_GROUPS = raw_groups
mylar . EXPERIMENTAL = experimental
2012-12-16 17:57:02 +00:00
mylar . NEWZNAB = newznab
mylar . NEWZNAB_HOST = newznab_host
mylar . NEWZNAB_APIKEY = newznab_apikey
mylar . NEWZNAB_ENABLED = newznab_enabled
2012-09-13 15:27:34 +00:00
mylar . PREFERRED_QUALITY = int ( preferred_quality )
mylar . MOVE_FILES = move_files
mylar . RENAME_FILES = rename_files
2012-09-14 17:29:01 +00:00
mylar . REPLACE_SPACES = replace_spaces
mylar . REPLACE_CHAR = replace_char
2012-10-30 10:43:01 +00:00
mylar . ZERO_LEVEL = zero_level
mylar . ZERO_LEVEL_N = zero_level_n
2013-01-15 17:32:08 +00:00
mylar . ADD_TO_CSV = add_to_csv
2013-01-23 08:22:22 +00:00
mylar . CVINFO = cvinfo
2013-01-14 05:12:59 +00:00
mylar . LOWERCASE_FILENAMES = lowercase_filenames
2013-03-06 16:20:09 +00:00
mylar . SYNO_FIX = syno_fix
2013-02-18 17:39:00 +00:00
mylar . PROWL_ENABLED = prowl_enabled
mylar . PROWL_ONSNATCH = prowl_onsnatch
mylar . PROWL_KEYS = prowl_keys
mylar . PROWL_PRIORITY = prowl_priority
mylar . NMA_ENABLED = nma_enabled
mylar . NMA_APIKEY = nma_apikey
mylar . NMA_PRIORITY = nma_priority
mylar . NMA_ONSNATCH = nma_onsnatch
2013-01-13 15:59:46 +00:00
mylar . USE_MINSIZE = use_minsize
mylar . MINSIZE = minsize
mylar . USE_MAXSIZE = use_maxsize
mylar . MAXSIZE = maxsize
2012-09-13 15:27:34 +00:00
mylar . FOLDER_FORMAT = folder_format
mylar . FILE_FORMAT = file_format
mylar . DESTINATION_DIR = destination_dir
mylar . AUTOWANT_ALL = autowant_all
mylar . AUTOWANT_UPCOMING = autowant_upcoming
2012-12-27 15:04:03 +00:00
mylar . COMIC_COVER_LOCAL = comic_cover_local
2012-09-13 15:27:34 +00:00
mylar . INTERFACE = interface
2012-12-27 15:04:03 +00:00
mylar . ENABLE_EXTRA_SCRIPTS = enable_extra_scripts
mylar . EXTRA_SCRIPTS = extra_scripts
2013-01-13 17:10:41 +00:00
mylar . ENABLE_PRE_SCRIPTS = enable_pre_scripts
2013-02-13 01:27:24 +00:00
mylar . POST_PROCESSING = post_processing
2013-01-13 17:10:41 +00:00
mylar . PRE_SCRIPTS = pre_scripts
2012-09-13 15:27:34 +00:00
mylar . LOG_DIR = log_dir
2013-02-06 19:55:23 +00:00
mylar . LOG_LEVEL = log_level
2013-04-22 03:43:57 +00:00
mylar . CHMOD_DIR = chmod_dir
mylar . CHMOD_FILE = chmod_file
2012-12-16 17:57:02 +00:00
# Handle the variable config options. Note - keys with False values aren't getting passed
mylar . EXTRA_NEWZNABS = [ ]
2012-12-16 18:41:01 +00:00
2012-12-16 17:57:02 +00:00
for kwarg in kwargs :
if kwarg . startswith ( ' newznab_host ' ) :
newznab_number = kwarg [ 12 : ]
newznab_host = kwargs [ ' newznab_host ' + newznab_number ]
newznab_api = kwargs [ ' newznab_api ' + newznab_number ]
try :
newznab_enabled = int ( kwargs [ ' newznab_enabled ' + newznab_number ] )
except KeyError :
newznab_enabled = 0
mylar . EXTRA_NEWZNABS . append ( ( newznab_host , newznab_api , newznab_enabled ) )
2012-12-16 18:41:01 +00:00
2012-12-16 17:57:02 +00:00
# Sanity checking
if mylar . SEARCH_INTERVAL < 360 :
logger . info ( " Search interval too low. Resetting to 6 hour minimum " )
mylar . SEARCH_INTERVAL = 360
2012-12-16 18:41:01 +00:00
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
2013-04-06 09:43:18 +00:00
if mylar . SEARCH_DELAY < 1 :
logger . info ( " Minimum search delay set for 1 minute to avoid hammering. " )
mylar . SEARCH_DELAY = 1
2013-04-22 03:43:57 +00:00
if not helpers . is_number ( mylar . CHMOD_DIR ) :
logger . info ( " CHMOD Directory value is not a valid numeric - please correct. Defaulting to 0777 " )
mylar . CHMOD_DIR = ' 0777 '
if not helpers . is_number ( mylar . CHMOD_FILE ) :
logger . info ( " CHMOD File value is not a valid numeric - please correct. Defaulting to 0660 " )
mylar . CHMOD_FILE = ' 0660 '
2012-12-16 17:57:02 +00:00
# Write the config
2012-09-13 15:27:34 +00:00
mylar . config_write ( )
raise cherrypy . HTTPRedirect ( " config " )
configUpdate . exposed = True
def shutdown ( self ) :
mylar . SIGNAL = ' shutdown '
message = ' Shutting Down... '
return serve_template ( templatename = " shutdown.html " , title = " Shutting Down " , message = message , timer = 15 )
return page
shutdown . exposed = True
def restart ( self ) :
mylar . SIGNAL = ' restart '
message = ' Restarting... '
return serve_template ( templatename = " shutdown.html " , title = " Restarting " , message = message , timer = 30 )
restart . exposed = True
def update ( self ) :
mylar . SIGNAL = ' update '
2012-09-17 05:12:40 +00:00
message = ' Updating...<br/><small>Main screen will appear in 60s</small> '
2012-09-13 15:27:34 +00:00
return serve_template ( templatename = " shutdown.html " , title = " Updating " , message = message , timer = 30 )
return page
update . exposed = True
def getInfo ( self , ComicID = None , IssueID = None ) :
from mylar import cache
info_dict = cache . getInfo ( ComicID , IssueID )
return simplejson . dumps ( info_dict )
getInfo . exposed = True
def getComicArtwork ( self , ComicID = None , imageURL = None ) :
from mylar import cache
logger . info ( u " Retrieving image for : " + comicID )
return cache . getArtwork ( ComicID , imageURL )
getComicArtwork . exposed = True