2012-09-14 17:29:01 +00:00
# This file is part of Mylar.
2012-09-13 15:27:34 +00:00
#
2012-09-14 17:29:01 +00:00
# Mylar is free software: you can redistribute it and/or modify
2012-09-13 15:27:34 +00:00
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
2012-09-14 17:29:01 +00:00
# Mylar is distributed in the hope that it will be useful,
2012-09-13 15:27:34 +00:00
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
2012-09-14 17:29:01 +00:00
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
2012-09-13 15:27:34 +00:00
2013-01-15 19:02:32 +00:00
from __future__ import with_statement
2012-09-13 15:27:34 +00:00
import os
import cherrypy
2012-12-31 16:52:16 +00:00
import datetime
2013-01-06 08:51:44 +00:00
import re
2012-09-13 15:27:34 +00:00
from mako . template import Template
from mako . lookup import TemplateLookup
from mako import exceptions
import time
import threading
2013-01-11 21:20:51 +00:00
import csv
import platform
2013-01-28 20:31:43 +00:00
import Queue
2013-02-06 19:55:23 +00:00
import urllib
import shutil
2012-09-13 15:27:34 +00:00
import mylar
2013-02-09 03:34:02 +00:00
from mylar import logger , db , importer , mb , search , filechecker , helpers , updater , parseit , weeklypull , PostProcessor , version , librarysync , moveit
2012-09-13 15:27:34 +00:00
#from mylar.helpers import checked, radio, today
import lib . simplejson as simplejson
from operator import itemgetter
def serve_template ( templatename , * * kwargs ) :
interface_dir = os . path . join ( str ( mylar . PROG_DIR ) , ' data/interfaces/ ' )
template_dir = os . path . join ( str ( interface_dir ) , mylar . INTERFACE )
_hplookup = TemplateLookup ( directories = [ template_dir ] )
try :
template = _hplookup . get_template ( templatename )
return template . render ( * * kwargs )
except :
return exceptions . html_error_template ( ) . render ( )
class WebInterface ( object ) :
def index ( self ) :
raise cherrypy . HTTPRedirect ( " home " )
index . exposed = True
def home ( self ) :
myDB = db . DBConnection ( )
comics = myDB . select ( ' SELECT * from comics order by ComicSortName COLLATE NOCASE ' )
return serve_template ( templatename = " index.html " , title = " Home " , comics = comics )
home . exposed = True
def artistPage ( self , ComicID ) :
myDB = db . DBConnection ( )
comic = myDB . action ( ' SELECT * FROM comics WHERE ComicID=? ' , [ ComicID ] ) . fetchone ( )
issues = myDB . select ( ' SELECT * from issues WHERE ComicID=? order by Int_IssueNumber DESC ' , [ ComicID ] )
if comic is None :
raise cherrypy . HTTPRedirect ( " home " )
2013-01-15 22:41:00 +00:00
usethefuzzy = comic [ ' UseFuzzy ' ]
2013-01-23 07:34:50 +00:00
skipped2wanted = " 0 "
2013-01-15 22:41:00 +00:00
if usethefuzzy is None : usethefuzzy = " 0 "
2012-09-14 17:29:01 +00:00
comicConfig = {
2013-01-13 15:59:46 +00:00
" comiclocation " : mylar . COMIC_LOCATION ,
2013-01-15 22:41:00 +00:00
" fuzzy_year0 " : helpers . radio ( int ( usethefuzzy ) , 0 ) ,
" fuzzy_year1 " : helpers . radio ( int ( usethefuzzy ) , 1 ) ,
2013-01-23 07:34:50 +00:00
" fuzzy_year2 " : helpers . radio ( int ( usethefuzzy ) , 2 ) ,
" skipped2wanted " : helpers . checked ( skipped2wanted )
2012-09-14 17:29:01 +00:00
}
return serve_template ( templatename = " artistredone.html " , title = comic [ ' ComicName ' ] , comic = comic , issues = issues , comicConfig = comicConfig )
2012-09-13 15:27:34 +00:00
artistPage . exposed = True
def searchit ( self , name , issue = None , mode = None ) :
type = ' comic ' # let's default this to comic search only for the time being (will add story arc, characters, etc later)
#mode dictates type of search:
# --series ... search for comicname displaying all results
# --pullseries ... search for comicname displaying a limited # of results based on issue
# --want ... individual comics
if mode is None : mode = ' series '
if len ( name ) == 0 :
raise cherrypy . HTTPRedirect ( " home " )
if type == ' comic ' and mode == ' pullseries ' :
searchresults = mb . findComic ( name , mode , issue = issue )
elif type == ' comic ' and mode == ' series ' :
searchresults = mb . findComic ( name , mode , issue = None )
elif type == ' comic ' and mode == ' want ' :
searchresults = mb . findComic ( name , mode , issue )
2013-01-28 20:31:43 +00:00
2012-09-13 15:27:34 +00:00
searchresults = sorted ( searchresults , key = itemgetter ( ' comicyear ' , ' issues ' ) , reverse = True )
#print ("Results: " + str(searchresults))
2013-02-09 03:34:02 +00:00
return serve_template ( templatename = " searchresults.html " , title = ' Search Results for: " ' + name + ' " ' , searchresults = searchresults , type = type , imported = None , ogcname = None )
2012-09-13 15:27:34 +00:00
searchit . exposed = True
2013-02-09 03:34:02 +00:00
def addComic ( self , comicid , comicname = None , comicyear = None , comicimage = None , comicissues = None , comicpublisher = None , imported = None , ogcname = None ) :
2012-10-16 08:16:29 +00:00
myDB = db . DBConnection ( )
2013-02-13 01:27:24 +00:00
print ( " I ' m here. " )
if imported == " confirm " :
# if it's coming from the importer and it's just for confirmation, record the right selection and break.
# if it's 'confirmed' coming in as the value for imported
# the ogcname will be the original comicid that is either correct/incorrect (doesn't matter which)
#confirmedid is the selected series (comicid) with the letter C at the beginning to denote Confirmed.
# then sql the original comicid which will hit on all the results for the given series.
# iterate through, and overwrite the existing watchmatch with the new chosen 'C' + comicid value
confirmedid = " C " + str ( comicid )
confirms = myDB . action ( " SELECT * FROM importresults WHERE WatchMatch=? " , [ ogcname ] )
if confirms is None :
print ( " There are no results that match...this is an ERROR. " )
else :
for confirm in confirms :
controlValue = { " impID " : confirm [ ' impID ' ] }
newValue = { " WatchMatch " : str ( confirmedid ) }
myDB . upsert ( " importresults " , newValue , controlValue )
self . importResults ( )
return
2012-10-16 08:16:29 +00:00
sresults = [ ]
2013-01-11 21:20:51 +00:00
cresults = [ ]
2012-10-16 08:16:29 +00:00
mismatch = " no "
2013-02-06 19:55:23 +00:00
print ( " comicid: " + str ( comicid ) )
print ( " comicname: " + str ( comicname ) )
print ( " comicyear: " + str ( comicyear ) )
print ( " comicissues: " + str ( comicissues ) )
print ( " comicimage: " + str ( comicimage ) )
2012-10-16 08:16:29 +00:00
#here we test for exception matches (ie. comics spanning more than one volume, known mismatches, etc).
CV_EXcomicid = myDB . action ( " SELECT * from exceptions WHERE ComicID=? " , [ comicid ] ) . fetchone ( )
2013-01-11 21:20:51 +00:00
if CV_EXcomicid is None : # pass #
2013-01-24 18:31:03 +00:00
gcdinfo = parseit . GCDScraper ( comicname , comicyear , comicissues , comicid , quickmatch = " yes " )
2013-01-11 21:20:51 +00:00
if gcdinfo == " No Match " :
2013-01-13 15:59:46 +00:00
#when it no matches, the image will always be blank...let's fix it.
cvdata = mylar . cv . getComic ( comicid , ' comic ' )
comicimage = cvdata [ ' ComicImage ' ]
2013-01-11 21:20:51 +00:00
updater . no_searchresults ( comicid )
nomatch = " true "
logger . info ( u " I couldn ' t find an exact match for " + str ( comicname ) + " ( " + str ( comicyear ) + " ) - gathering data for Error-Checking screen (this could take a minute)... " )
i = 0
loopie , cnt = parseit . ComChk ( comicname , comicyear , comicpublisher , comicissues , comicid )
2013-02-06 19:55:23 +00:00
print ( " total count : " + str ( cnt ) )
2013-01-11 21:20:51 +00:00
while ( i < cnt ) :
try :
stoopie = loopie [ ' comchkchoice ' ] [ i ]
except ( IndexError , TypeError ) :
break
cresults . append ( {
' ComicID ' : stoopie [ ' ComicID ' ] ,
' ComicName ' : stoopie [ ' ComicName ' ] ,
' ComicYear ' : stoopie [ ' ComicYear ' ] ,
' ComicIssues ' : stoopie [ ' ComicIssues ' ] ,
' ComicURL ' : stoopie [ ' ComicURL ' ] ,
' ComicPublisher ' : stoopie [ ' ComicPublisher ' ] ,
' GCDID ' : stoopie [ ' GCDID ' ]
} )
i + = 1
2013-01-14 05:42:03 +00:00
return serve_template ( templatename = " searchfix.html " , title = " Error Check " , comicname = comicname , comicid = comicid , comicyear = comicyear , comicimage = comicimage , comicissues = comicissues , cresults = cresults )
2013-01-11 21:20:51 +00:00
else :
nomatch = " false "
logger . info ( u " Quick match success..continuing. " )
2012-10-16 08:16:29 +00:00
else :
if CV_EXcomicid [ ' variloop ' ] == ' 99 ' :
logger . info ( u " mismatched name...autocorrecting to correct GID and auto-adding. " )
mismatch = " yes "
if CV_EXcomicid [ ' NewComicID ' ] == ' none ' :
logger . info ( u " multi-volume series detected " )
testspx = CV_EXcomicid [ ' GComicID ' ] . split ( ' / ' )
for exc in testspx :
fakeit = parseit . GCDAdd ( testspx )
howmany = int ( CV_EXcomicid [ ' variloop ' ] )
t = 0
while ( t < = howmany ) :
try :
sres = fakeit [ ' serieschoice ' ] [ t ]
except IndexError :
break
sresults . append ( {
' ComicID ' : sres [ ' ComicID ' ] ,
' ComicName ' : sres [ ' ComicName ' ] ,
' ComicYear ' : sres [ ' ComicYear ' ] ,
' ComicIssues ' : sres [ ' ComicIssues ' ] ,
' ComicPublisher ' : sres [ ' ComicPublisher ' ] ,
' ComicCover ' : sres [ ' ComicCover ' ]
} )
t + = 1
#searchfix(-1).html is for misnamed comics and wrong years.
#searchfix-2.html is for comics that span multiple volumes.
return serve_template ( templatename = " searchfix-2.html " , title = " In-Depth Results " , sresults = sresults )
2013-02-09 03:34:02 +00:00
#print ("imported is: " + str(imported))
threading . Thread ( target = importer . addComictoDB , args = [ comicid , mismatch , None , imported , ogcname ] ) . start ( )
2012-09-13 15:27:34 +00:00
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % comicid )
addComic . exposed = True
2012-10-16 08:16:29 +00:00
2013-01-11 21:20:51 +00:00
def from_Exceptions ( self , comicid , gcdid , comicname = None , comicyear = None , comicissues = None , comicpublisher = None ) :
mismatch = " yes "
2013-01-24 17:01:27 +00:00
#print ("gcdid:" + str(gcdid))
2013-01-11 21:20:51 +00:00
#write it to the custom_exceptions.csv and reload it so that importer will pick it up and do it's thing :)
#custom_exceptions in this format...
#99, (comicid), (gcdid), none
logger . info ( " saving new information into custom_exceptions.csv... " )
except_info = " none # " + str ( comicname ) + " -( " + str ( comicyear ) + " ) "
2013-01-15 19:02:32 +00:00
except_file = os . path . join ( mylar . DATA_DIR , " custom_exceptions.csv " )
if not os . path . exists ( except_file ) :
try :
csvfile = open ( str ( except_file ) , ' rb ' )
csvfile . close ( )
except ( OSError , IOError ) :
logger . error ( " Could not locate " + str ( except_file ) + " file. Make sure it ' s in datadir: " + mylar . DATA_DIR + " with proper permissions. " )
return
with open ( str ( except_file ) , ' a ' ) as f :
2013-01-11 21:20:51 +00:00
f . write ( ' %s , %s , %s , %s \n ' % ( " 99 " , str ( comicid ) , str ( gcdid ) , str ( except_info ) ) )
logger . info ( " re-loading csv file so it ' s all nice and current. " )
mylar . csv_load ( )
threading . Thread ( target = importer . addComictoDB , args = [ comicid , mismatch ] ) . start ( )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % comicid )
from_Exceptions . exposed = True
2012-10-16 08:16:29 +00:00
def GCDaddComic ( self , comicid , comicname = None , comicyear = None , comicissues = None , comiccover = None , comicpublisher = None ) :
#since we already know most of the info, let's add it to the db so we can reference it later.
myDB = db . DBConnection ( )
gcomicid = " G " + str ( comicid )
comicyear_len = comicyear . find ( ' ' , 2 )
comyear = comicyear [ comicyear_len + 1 : comicyear_len + 5 ]
2013-01-24 17:01:27 +00:00
if comyear . isdigit ( ) :
logger . fdebug ( " Series year set to : " + str ( comyear ) )
else :
logger . fdebug ( " Invalid Series year detected - trying to adjust from " + str ( comyear ) )
#comicyear_len above will trap wrong year if it's 10 October 2010 - etc ( 2000 AD)...
find_comicyear = comicyear . split ( )
for i in find_comicyear :
if len ( i ) == 4 :
logger . fdebug ( " Series year detected as : " + str ( i ) )
comyear = str ( i )
continue
logger . fdebug ( " Series year set to: " + str ( comyear ) )
2012-10-16 08:16:29 +00:00
controlValueDict = { ' ComicID ' : gcomicid }
newValueDict = { ' ComicName ' : comicname ,
' ComicYear ' : comyear ,
' ComicPublished ' : comicyear ,
' ComicPublisher ' : comicpublisher ,
' ComicImage ' : comiccover ,
' Total ' : comicissues }
myDB . upsert ( " comics " , newValueDict , controlValueDict )
threading . Thread ( target = importer . GCDimport , args = [ gcomicid ] ) . start ( )
2012-10-16 15:12:44 +00:00
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % gcomicid )
2012-10-16 08:16:29 +00:00
GCDaddComic . exposed = True
2012-10-30 10:43:01 +00:00
def post_process ( self , nzb_name , nzb_folder ) :
logger . info ( u " Starting postprocessing for : " + str ( nzb_name ) )
2012-12-27 15:04:03 +00:00
PostProcess = PostProcessor . PostProcessor ( nzb_name , nzb_folder )
result = PostProcess . Process ( )
2012-10-30 10:43:01 +00:00
#result = post_results.replace("\n","<br />\n")
return result
#log2screen = threading.Thread(target=PostProcessor.PostProcess, args=[nzb_name,nzb_folder]).start()
#return serve_template(templatename="postprocess.html", title="postprocess")
#raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % comicid)
post_process . exposed = True
2012-09-13 15:27:34 +00:00
def pauseArtist ( self , ComicID ) :
logger . info ( u " Pausing comic: " + ComicID )
myDB = db . DBConnection ( )
controlValueDict = { ' ComicID ' : ComicID }
newValueDict = { ' Status ' : ' Paused ' }
myDB . upsert ( " comics " , newValueDict , controlValueDict )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
pauseArtist . exposed = True
def resumeArtist ( self , ComicID ) :
logger . info ( u " Resuming comic: " + ComicID )
myDB = db . DBConnection ( )
controlValueDict = { ' ComicID ' : ComicID }
newValueDict = { ' Status ' : ' Active ' }
myDB . upsert ( " comics " , newValueDict , controlValueDict )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
resumeArtist . exposed = True
def deleteArtist ( self , ComicID ) :
myDB = db . DBConnection ( )
comic = myDB . action ( ' SELECT * from comics WHERE ComicID=? ' , [ ComicID ] ) . fetchone ( )
2012-09-24 05:17:29 +00:00
if comic [ ' ComicName ' ] is None : ComicName = " None "
else : ComicName = comic [ ' ComicName ' ]
logger . info ( u " Deleting all traces of Comic: " + str ( ComicName ) )
2012-09-13 15:27:34 +00:00
myDB . action ( ' DELETE from comics WHERE ComicID=? ' , [ ComicID ] )
myDB . action ( ' DELETE from issues WHERE ComicID=? ' , [ ComicID ] )
2013-02-06 19:55:23 +00:00
myDB . action ( ' DELETE from upcoming WHERE ComicID=? ' , [ ComicID ] )
2012-09-13 15:27:34 +00:00
raise cherrypy . HTTPRedirect ( " home " )
deleteArtist . exposed = True
def refreshArtist ( self , ComicID ) :
2012-10-16 08:16:29 +00:00
myDB = db . DBConnection ( )
mismatch = " no "
CV_EXcomicid = myDB . action ( " SELECT * from exceptions WHERE ComicID=? " , [ ComicID ] ) . fetchone ( )
if CV_EXcomicid is None : pass
else :
if CV_EXcomicid [ ' variloop ' ] == ' 99 ' :
mismatch = " yes "
if ComicID [ : 1 ] == " G " : threading . Thread ( target = importer . GCDimport , args = [ ComicID ] ) . start ( )
else : threading . Thread ( target = importer . addComictoDB , args = [ ComicID , mismatch ] ) . start ( )
2012-09-13 15:27:34 +00:00
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
refreshArtist . exposed = True
def editIssue ( self , ComicID ) :
myDB = db . DBConnection ( )
comic = myDB . action ( ' SELECT * from comics WHERE ComicID=? ' , [ ComicID ] ) . fetchone ( )
title = ' Now Editing ' + comic [ ' ComicName ' ]
return serve_template ( templatename = " editcomic.html " , title = title , comic = comic )
#raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" & ComicID)
editIssue . exposed = True
2012-10-16 08:16:29 +00:00
def markissues ( self , action = None , * * args ) :
2012-09-13 15:27:34 +00:00
myDB = db . DBConnection ( )
2012-10-16 08:16:29 +00:00
issuesToAdd = [ ]
2012-10-30 10:43:01 +00:00
issuestoArchive = [ ]
2012-09-13 15:27:34 +00:00
if action == ' WantedNew ' :
newaction = ' Wanted '
else :
newaction = action
for IssueID in args :
2013-02-09 03:34:02 +00:00
#print ("issueID: " + str(IssueID) + "... " + str(newaction))
if IssueID is None or ' issue_table ' in IssueID :
continue
2012-09-13 15:27:34 +00:00
else :
2012-10-16 08:16:29 +00:00
mi = myDB . action ( " SELECT * FROM issues WHERE IssueID=? " , [ IssueID ] ) . fetchone ( )
miyr = myDB . action ( " SELECT ComicYear FROM comics WHERE ComicID=? " , [ mi [ ' ComicID ' ] ] ) . fetchone ( )
2012-10-30 10:43:01 +00:00
if action == ' Downloaded ' :
if mi [ ' Status ' ] == " Skipped " or mi [ ' Status ' ] == " Wanted " :
logger . info ( u " Cannot change status to %s as comic is not Snatched or Downloaded " % ( newaction ) )
2013-02-09 03:34:02 +00:00
# continue
2012-10-30 10:43:01 +00:00
elif action == ' Archived ' :
logger . info ( u " Marking %s %s as %s " % ( mi [ ' ComicName ' ] , mi [ ' Issue_Number ' ] , newaction ) )
#updater.forceRescan(mi['ComicID'])
issuestoArchive . append ( IssueID )
elif action == ' Wanted ' :
logger . info ( u " Marking %s %s as %s " % ( mi [ ' ComicName ' ] , mi [ ' Issue_Number ' ] , newaction ) )
issuesToAdd . append ( IssueID )
2013-02-09 03:34:02 +00:00
elif action == ' Skipped ' :
logger . info ( u " Marking " + str ( IssueID ) + " as Skipped " )
2012-10-16 08:16:29 +00:00
controlValueDict = { " IssueID " : IssueID }
newValueDict = { " Status " : newaction }
myDB . upsert ( " issues " , newValueDict , controlValueDict )
2012-10-30 10:43:01 +00:00
if len ( issuestoArchive ) > 0 :
updater . forceRescan ( mi [ ' ComicID ' ] )
2012-10-16 08:16:29 +00:00
if len ( issuesToAdd ) > 0 :
2013-02-09 03:34:02 +00:00
logger . debug ( " Marking issues: %s as Wanted " % ( issuesToAdd ) )
2012-10-16 08:16:29 +00:00
threading . Thread ( target = search . searchIssueIDList , args = [ issuesToAdd ] ) . start ( )
2012-10-16 15:53:46 +00:00
#if IssueID:
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % mi [ ' ComicID ' ] )
#else:
# raise cherrypy.HTTPRedirect("upcoming")
2012-09-13 15:27:34 +00:00
markissues . exposed = True
def addArtists ( self , * * args ) :
threading . Thread ( target = importer . artistlist_to_mbids , args = [ args , True ] ) . start ( )
raise cherrypy . HTTPRedirect ( " home " )
addArtists . exposed = True
2012-10-01 15:01:21 +00:00
def queueissue ( self , mode , ComicName = None , ComicID = None , ComicYear = None , ComicIssue = None , IssueID = None , new = False , redirect = None ) :
2012-12-31 16:52:16 +00:00
now = datetime . datetime . now ( )
2012-09-19 04:38:25 +00:00
myDB = db . DBConnection ( )
2012-09-13 15:27:34 +00:00
#mode dictates type of queue - either 'want' for individual comics, or 'series' for series watchlist.
if ComicID is None and mode == ' series ' :
issue = None
raise cherrypy . HTTPRedirect ( " searchit?name= %s &issue= %s &mode= %s " % ( ComicName , ' None ' , ' series ' ) )
elif ComicID is None and mode == ' pullseries ' :
# we can limit the search by including the issue # and searching for
# comics that have X many issues
raise cherrypy . HTTPRedirect ( " searchit?name= %s &issue= %s &mode= %s " % ( ComicName , ' None ' , ' pullseries ' ) )
elif ComicID is None and mode == ' pullwant ' :
#this is for marking individual comics from the pullist to be downloaded.
#because ComicID and IssueID will both be None due to pullist, it's probably
#better to set both to some generic #, and then filter out later...
cyear = myDB . action ( " SELECT SHIPDATE FROM weekly " ) . fetchone ( )
ComicYear = str ( cyear [ ' SHIPDATE ' ] ) [ : 4 ]
2012-12-31 16:52:16 +00:00
if ComicYear == ' ' : ComicYear = now . year
2012-09-13 15:27:34 +00:00
logger . info ( u " Marking " + ComicName + " " + ComicIssue + " as wanted... " )
2013-02-06 19:55:23 +00:00
foundcom = search . search_init ( ComicName = ComicName , IssueNumber = ComicIssue , ComicYear = ComicYear , SeriesYear = None , IssueDate = cyear [ ' SHIPDATE ' ] , IssueID = IssueID , AlternateSearch = None , UseFuzzy = None )
2012-09-13 15:27:34 +00:00
if foundcom == " yes " :
logger . info ( u " Downloaded " + ComicName + " " + ComicIssue )
return
elif mode == ' want ' :
2012-10-01 15:01:21 +00:00
cdname = myDB . action ( " SELECT ComicName from comics where ComicID=? " , [ ComicID ] ) . fetchone ( )
ComicName = cdname [ ' ComicName ' ]
2012-09-13 15:27:34 +00:00
logger . info ( u " Marking " + ComicName + " issue: " + ComicIssue + " as wanted... " )
#---
#this should be on it's own somewhere
if IssueID is not None :
controlValueDict = { " IssueID " : IssueID }
newStatus = { " Status " : " Wanted " }
myDB . upsert ( " issues " , newStatus , controlValueDict )
#for future reference, the year should default to current year (.datetime)
2012-10-09 06:33:14 +00:00
issues = myDB . action ( " SELECT IssueDate FROM issues WHERE IssueID=? " , [ IssueID ] ) . fetchone ( )
2012-09-13 15:27:34 +00:00
if ComicYear == None :
ComicYear = str ( issues [ ' IssueDate ' ] ) [ : 4 ]
2012-12-31 16:52:16 +00:00
miy = myDB . action ( " SELECT * FROM comics WHERE ComicID=? " , [ ComicID ] ) . fetchone ( )
SeriesYear = miy [ ' ComicYear ' ]
AlternateSearch = miy [ ' AlternateSearch ' ]
2013-02-06 19:55:23 +00:00
UseAFuzzy = miy [ ' UseFuzzy ' ]
foundcom = search . search_init ( ComicName , ComicIssue , ComicYear , SeriesYear , issues [ ' IssueDate ' ] , IssueID , AlternateSearch , UseAFuzzy )
2012-09-13 15:27:34 +00:00
if foundcom == " yes " :
# file check to see if issue exists and update 'have' count
if IssueID is not None :
return updater . foundsearch ( ComicID , IssueID )
if ComicID :
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
else :
raise cherrypy . HTTPRedirect ( redirect )
queueissue . exposed = True
def unqueueissue ( self , IssueID , ComicID ) :
myDB = db . DBConnection ( )
issue = myDB . action ( ' SELECT * FROM issues WHERE IssueID=? ' , [ IssueID ] ) . fetchone ( )
logger . info ( u " Marking " + issue [ ' ComicName ' ] + " issue # " + issue [ ' Issue_Number ' ] + " as skipped... " )
controlValueDict = { ' IssueID ' : IssueID }
newValueDict = { ' Status ' : ' Skipped ' }
myDB . upsert ( " issues " , newValueDict , controlValueDict )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
unqueueissue . exposed = True
2013-02-17 10:31:18 +00:00
def archiveissue ( self , IssueID ) :
myDB = db . DBConnection ( )
issue = myDB . action ( ' SELECT * FROM issues WHERE IssueID=? ' , [ IssueID ] ) . fetchone ( )
logger . info ( u " Marking " + issue [ ' ComicName ' ] + " issue # " + issue [ ' Issue_Number ' ] + " as archived... " )
controlValueDict = { ' IssueID ' : IssueID }
newValueDict = { ' Status ' : ' Archived ' }
myDB . upsert ( " issues " , newValueDict , controlValueDict )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % issue [ ' ComicID ' ] )
archiveissue . exposed = True
2012-09-13 15:27:34 +00:00
def pullist ( self ) :
myDB = db . DBConnection ( )
2013-01-28 20:31:43 +00:00
weeklyresults = [ ]
2012-09-13 15:27:34 +00:00
popit = myDB . select ( " SELECT * FROM sqlite_master WHERE name= ' weekly ' and type= ' table ' " )
if popit :
2013-01-28 20:31:43 +00:00
w_results = myDB . select ( " SELECT PUBLISHER, ISSUE, COMIC, STATUS from weekly " )
for weekly in w_results :
if weekly [ ' ISSUE ' ] . isdigit ( ) :
weeklyresults . append ( {
" PUBLISHER " : weekly [ ' PUBLISHER ' ] ,
" ISSUE " : weekly [ ' ISSUE ' ] ,
" COMIC " : weekly [ ' COMIC ' ] ,
" STATUS " : weekly [ ' STATUS ' ]
} )
weeklyresults = sorted ( weeklyresults , key = itemgetter ( ' PUBLISHER ' , ' COMIC ' ) , reverse = False )
2012-09-13 15:27:34 +00:00
pulldate = myDB . action ( " SELECT * from weekly " ) . fetchone ( )
2012-12-16 18:41:01 +00:00
if pulldate is None :
return self . manualpull ( )
#raise cherrypy.HTTPRedirect("home")
2012-09-13 15:27:34 +00:00
else :
return self . manualpull ( )
2013-01-28 20:31:43 +00:00
return serve_template ( templatename = " weeklypull.html " , title = " Weekly Pull " , weeklyresults = weeklyresults , pulldate = pulldate [ ' SHIPDATE ' ] , pullfilter = True )
2012-09-13 15:27:34 +00:00
pullist . exposed = True
def filterpull ( self ) :
myDB = db . DBConnection ( )
weeklyresults = myDB . select ( " SELECT * from weekly " )
pulldate = myDB . action ( " SELECT * from weekly " ) . fetchone ( )
if pulldate is None :
raise cherrypy . HTTPRedirect ( " home " )
2013-01-28 20:31:43 +00:00
return serve_template ( templatename = " weeklypull.html " , title = " Weekly Pull " , weeklyresults = weeklyresults , pulldate = pulldate [ ' SHIPDATE ' ] , pullfilter = True )
2012-09-13 15:27:34 +00:00
filterpull . exposed = True
def manualpull ( self ) :
from mylar import weeklypull
threading . Thread ( target = weeklypull . pullit ( ) ) . start ( )
raise cherrypy . HTTPRedirect ( " pullist " )
manualpull . exposed = True
def upcoming ( self ) :
myDB = db . DBConnection ( )
#upcoming = myDB.select("SELECT * from issues WHERE ReleaseDate > date('now') order by ReleaseDate DESC")
2013-01-15 22:41:00 +00:00
upcoming = myDB . select ( " SELECT * from upcoming WHERE IssueDate > date( ' now ' ) AND IssueID is NULL order by IssueDate DESC " )
2012-09-13 15:27:34 +00:00
issues = myDB . select ( " SELECT * from issues WHERE Status= ' Wanted ' " )
#let's move any items from the upcoming table into the wanted table if the date has already passed.
2012-10-16 08:16:29 +00:00
#gather the list...
mvupcome = myDB . select ( " SELECT * from upcoming WHERE IssueDate < date( ' now ' ) order by IssueDate DESC " )
#get the issue ID's
for mvup in mvupcome :
2012-10-16 15:12:44 +00:00
myissue = myDB . action ( " SELECT * FROM issues WHERE Issue_Number=? " , [ mvup [ ' IssueNumber ' ] ] ) . fetchone ( )
2012-10-16 08:16:29 +00:00
if myissue is None : pass
else :
2012-10-16 15:12:44 +00:00
#print ("ComicName: " + str(myissue['ComicName']))
#print ("Issue number : " + str(myissue['Issue_Number']) )
2012-10-16 08:16:29 +00:00
mvcontroldict = { " IssueID " : myissue [ ' IssueID ' ] }
2012-10-16 15:12:44 +00:00
mvvalues = { " ComicID " : myissue [ ' ComicID ' ] ,
2012-10-16 08:16:29 +00:00
" Status " : " Wanted " }
2012-10-18 07:08:43 +00:00
myDB . upsert ( " issues " , mvvalues , mvcontroldict )
2012-10-16 08:16:29 +00:00
2012-10-21 15:30:26 +00:00
#remove old entry from upcoming so it won't try to continually download again.
deleteit = myDB . action ( " DELETE from upcoming WHERE ComicName=? AND IssueNumber=? " , [ mvup [ ' ComicName ' ] , mvup [ ' IssueNumber ' ] ] )
2012-09-13 15:27:34 +00:00
return serve_template ( templatename = " upcoming.html " , title = " Upcoming " , upcoming = upcoming , issues = issues )
upcoming . exposed = True
2012-09-24 05:17:29 +00:00
2013-01-23 07:34:50 +00:00
def skipped2wanted ( self , comicid ) :
# change all issues for a given ComicID that are Skipped, into Wanted.
issuestowanted = [ ]
issuesnumwant = [ ]
myDB = db . DBConnection ( )
skipped2 = myDB . select ( " SELECT * from issues WHERE ComicID=? AND Status= ' Skipped ' " , [ comicid ] )
for skippy in skipped2 :
mvcontroldict = { " IssueID " : skippy [ ' IssueID ' ] }
mvvalues = { " Status " : " Wanted " }
#print ("Changing issue " + str(skippy['Issue_Number']) + " to Wanted.")
myDB . upsert ( " issues " , mvvalues , mvcontroldict )
issuestowanted . append ( skippy [ ' IssueID ' ] )
issuesnumwant . append ( skippy [ ' Issue_Number ' ] )
if len ( issuestowanted ) > 0 :
logger . info ( " Marking issues: %s as Wanted " % issuesnumwant )
threading . Thread ( target = search . searchIssueIDList , args = [ issuestowanted ] ) . start ( )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % [ comicid ] )
skipped2wanted . exposed = True
2013-02-13 01:27:24 +00:00
def manualRename ( self , comicid ) :
print ( " entering. " )
if mylar . FILE_FORMAT == ' ' :
print ( " You haven ' t specified a File Format in Configuration/Advanced " )
print ( " Cannot rename files. " )
return
myDB = db . DBConnection ( )
comic = myDB . action ( " SELECT * FROM comics WHERE ComicID=? " , [ comicid ] ) . fetchone ( )
comicdir = comic [ ' ComicLocation ' ]
comicname = comic [ ' ComicName ' ]
extensions = ( ' .cbr ' , ' .cbz ' )
issues = myDB . action ( " SELECT * FROM issues WHERE ComicID=? " , [ comicid ] )
comfiles = [ ]
for root , dirnames , filenames in os . walk ( comicdir ) :
for filename in filenames :
if filename . lower ( ) . endswith ( extensions ) :
print ( " filename being checked is : " + str ( filename ) )
for issue in issues :
if issue [ ' Location ' ] == filename :
print ( " matched " + str ( filename ) + " to DB file " + str ( issue [ ' Location ' ] ) )
renameiss = helpers . rename_param ( comicid , comicname , issue [ ' Issue_Number ' ] , filename , comicyear = None , issueid = None )
nfilename = renameiss [ ' nfilename ' ]
srciss = os . path . join ( comicdir , filename )
dstiss = os . path . join ( comicdir , nfilename )
logger . info ( " Renaming " + str ( filename ) + " ... to " + str ( nfilename ) )
try :
shutil . move ( srciss , dstiss )
except ( OSError , IOError ) :
logger . error ( " Failed to move files - check directories and manually re-run. " )
continue
2013-01-28 20:31:43 +00:00
print ( " hello " )
2013-02-13 01:27:24 +00:00
manualRename . exposed = True
2013-01-28 20:31:43 +00:00
2012-09-24 05:17:29 +00:00
def searchScan ( self , name ) :
return serve_template ( templatename = " searchfix.html " , title = " Manage " , name = name )
searchScan . exposed = True
2012-09-13 15:27:34 +00:00
def manage ( self ) :
return serve_template ( templatename = " manage.html " , title = " Manage " )
manage . exposed = True
2012-09-18 13:13:42 +00:00
def manageComics ( self ) :
2012-09-13 15:27:34 +00:00
myDB = db . DBConnection ( )
comics = myDB . select ( ' SELECT * from comics order by ComicSortName COLLATE NOCASE ' )
2012-09-18 13:13:42 +00:00
return serve_template ( templatename = " managecomics.html " , title = " Manage Comics " , comics = comics )
manageComics . exposed = True
2012-09-13 15:27:34 +00:00
2012-09-18 04:00:43 +00:00
def manageIssues ( self ) :
2012-09-13 15:27:34 +00:00
myDB = db . DBConnection ( )
issues = myDB . select ( ' SELECT * from issues ' )
2012-09-24 05:17:29 +00:00
return serve_template ( templatename = " manageissues.html " , title = " Manage Issues " , issues = issues )
2012-09-18 13:13:42 +00:00
manageIssues . exposed = True
2012-09-13 15:27:34 +00:00
def manageNew ( self ) :
myDB = db . DBConnection ( )
newcomics = myDB . select ( ' SELECT * from newartists ' )
return serve_template ( templatename = " managenew.html " , title = " Manage New Artists " , newcomics = newcomics )
manageNew . exposed = True
2012-09-18 04:00:43 +00:00
def markComics ( self , action = None , * * args ) :
2012-09-13 15:27:34 +00:00
myDB = db . DBConnection ( )
2012-09-18 04:00:43 +00:00
comicsToAdd = [ ]
for ComicID in args :
2012-09-13 15:27:34 +00:00
if action == ' delete ' :
2012-09-18 04:00:43 +00:00
myDB . action ( ' DELETE from comics WHERE ComicID=? ' , [ ComicID ] )
myDB . action ( ' DELETE from issues WHERE ComicID=? ' , [ ComicID ] )
2012-09-13 15:27:34 +00:00
elif action == ' pause ' :
2012-09-18 04:00:43 +00:00
controlValueDict = { ' ComicID ' : ComicID }
2012-09-13 15:27:34 +00:00
newValueDict = { ' Status ' : ' Paused ' }
2012-09-18 04:00:43 +00:00
myDB . upsert ( " comics " , newValueDict , controlValueDict )
2012-09-13 15:27:34 +00:00
elif action == ' resume ' :
2012-09-18 04:00:43 +00:00
controlValueDict = { ' ComicID ' : ComicID }
2012-09-13 15:27:34 +00:00
newValueDict = { ' Status ' : ' Active ' }
2012-09-18 04:00:43 +00:00
myDB . upsert ( " comics " , newValueDict , controlValueDict )
2012-09-13 15:27:34 +00:00
else :
2012-09-18 04:00:43 +00:00
comicsToAdd . append ( ComicID )
if len ( comicsToAdd ) > 0 :
logger . debug ( " Refreshing comics: %s " % comicsToAdd )
threading . Thread ( target = importer . addComicIDListToDB , args = [ comicsToAdd ] ) . start ( )
2012-09-13 15:27:34 +00:00
raise cherrypy . HTTPRedirect ( " home " )
2012-09-18 04:00:43 +00:00
markComics . exposed = True
2012-09-13 15:27:34 +00:00
def forceUpdate ( self ) :
from mylar import updater
threading . Thread ( target = updater . dbUpdate ) . start ( )
raise cherrypy . HTTPRedirect ( " home " )
forceUpdate . exposed = True
def forceSearch ( self ) :
from mylar import search
threading . Thread ( target = search . searchforissue ) . start ( )
raise cherrypy . HTTPRedirect ( " home " )
forceSearch . exposed = True
def forceRescan ( self , ComicID ) :
threading . Thread ( target = updater . forceRescan , args = [ ComicID ] ) . start ( )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
forceRescan . exposed = True
def checkGithub ( self ) :
from mylar import versioncheck
versioncheck . checkGithub ( )
raise cherrypy . HTTPRedirect ( " home " )
checkGithub . exposed = True
def history ( self ) :
myDB = db . DBConnection ( )
history = myDB . select ( ''' SELECT * from snatched order by DateAdded DESC ''' )
return serve_template ( templatename = " history.html " , title = " History " , history = history )
return page
history . exposed = True
def logs ( self ) :
2013-02-06 19:55:23 +00:00
if mylar . LOG_LEVEL is None or mylar . LOG_LEVEL == ' ' :
mylar . LOG_LEVEL = ' info '
return serve_template ( templatename = " logs.html " , title = " Log " , lineList = mylar . LOG_LIST , log_level = mylar . LOG_LEVEL )
2012-09-13 15:27:34 +00:00
logs . exposed = True
2013-02-06 19:55:23 +00:00
def log_change ( self , * * args ) :
print ( " here: " + str ( args ) )
for loglevel in args :
if loglevel is None : continue
else :
print ( " changing logger to " + str ( loglevel ) )
LOGGER . setLevel ( loglevel )
return serve_template ( templatename = " logs.html " , title = " Log " , lineList = mylar . LOG_LIST )
log_change . exposed = True
2012-09-13 15:27:34 +00:00
def clearhistory ( self , type = None ) :
myDB = db . DBConnection ( )
if type == ' all ' :
logger . info ( u " Clearing all history " )
myDB . action ( ' DELETE from snatched ' )
else :
logger . info ( u " Clearing history where status is %s " % type )
myDB . action ( ' DELETE from snatched WHERE Status=? ' , [ type ] )
raise cherrypy . HTTPRedirect ( " history " )
clearhistory . exposed = True
2013-02-06 19:55:23 +00:00
def downloadLocal ( self , IssueID ) :
2013-02-17 10:31:18 +00:00
#print ("issueid: " + str(IssueID))
2013-02-06 19:55:23 +00:00
myDB = db . DBConnection ( )
issueDL = myDB . action ( " SELECT * FROM issues WHERE IssueID=? " , [ IssueID ] ) . fetchone ( )
comicid = issueDL [ ' ComicID ' ]
2013-02-17 10:31:18 +00:00
#print ("comicid: " + str(comicid))
2013-02-06 19:55:23 +00:00
comic = myDB . action ( " SELECT * FROM comics WHERE ComicID=? " , [ comicid ] ) . fetchone ( )
issueLOC = comic [ ' ComicLocation ' ]
2013-02-17 10:31:18 +00:00
#print ("IssueLOC: " + str(issueLOC))
2013-02-06 19:55:23 +00:00
issueFILE = issueDL [ ' Location ' ]
2013-02-17 10:31:18 +00:00
#print ("IssueFILE: "+ str(issueFILE))
2013-02-06 19:55:23 +00:00
issuePATH = os . path . join ( issueLOC , issueFILE )
2013-02-17 10:31:18 +00:00
#print ("IssuePATH: " + str(issuePATH))
2013-02-06 19:55:23 +00:00
dstPATH = os . path . join ( mylar . CACHE_DIR , issueFILE )
2013-02-17 10:31:18 +00:00
#print ("dstPATH: " + str(dstPATH))
try :
shutil . copy2 ( issuePATH , dstPATH )
except IOError as e :
logger . error ( " Could not copy " + str ( issuePATH ) + " to " + str ( dstPATH ) + " . Copy to Cache terminated. " )
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % comicid )
logger . debug ( " sucessfully copied to cache...Enabling Download link " )
controlValueDict = { ' IssueID ' : IssueID }
newValueDict = { ' inCacheDIR ' : ' True ' }
myDB . upsert ( " issues " , newValueDict , controlValueDict )
#print("DB updated - Download link now enabled.")
2013-02-06 19:55:23 +00:00
downloadLocal . exposed = True
2012-09-13 15:27:34 +00:00
2013-01-28 20:31:43 +00:00
#for testing.
def idirectory ( self ) :
return serve_template ( templatename = " idirectory.html " , title = " Import a Directory " )
idirectory . exposed = True
2013-02-13 01:27:24 +00:00
def confirmResult ( self , comicname , comicid ) :
#print ("here.")
mode = ' series '
sresults = mb . findComic ( comicname , mode , None )
#print sresults
type = ' comic '
return serve_template ( templatename = " searchresults.html " , title = ' Import Results for: " ' + comicname + ' " ' , searchresults = sresults , type = type , imported = ' confirm ' , ogcname = comicid )
confirmResult . exposed = True
2013-02-06 19:55:23 +00:00
def comicScan ( self , path , scan = 0 , redirect = None , autoadd = 0 , libraryscan = 0 , imp_move = 0 , imp_rename = 0 , imp_metadata = 0 ) :
2013-01-28 20:31:43 +00:00
mylar . LIBRARYSCAN = libraryscan
mylar . ADD_COMICS = autoadd
mylar . COMIC_DIR = path
mylar . IMP_MOVE = imp_move
mylar . IMP_RENAME = imp_rename
2013-02-06 19:55:23 +00:00
mylar . IMP_METADATA = imp_metadata
2013-01-28 20:31:43 +00:00
mylar . config_write ( )
if scan :
try :
2013-02-06 19:55:23 +00:00
soma , noids = librarysync . libraryScan ( )
2013-01-28 20:31:43 +00:00
except Exception , e :
logger . error ( ' Unable to complete the scan: %s ' % e )
if soma == " Completed " :
print ( " sucessfully completed import. " )
else :
2013-02-06 19:55:23 +00:00
logger . info ( u " Starting mass importing... " + str ( noids ) + " records. " )
2013-01-28 20:31:43 +00:00
#this is what it should do...
#store soma (the list of comic_details from importing) into sql table so import can be whenever
#display webpage showing results
#allow user to select comic to add (one at a time)
#call addComic off of the webpage to initiate the add.
#return to result page to finish or continue adding.
#....
#threading.Thread(target=self.searchit).start()
#threadthis = threadit.ThreadUrl()
#result = threadthis.main(soma)
myDB = db . DBConnection ( )
sl = 0
2013-02-06 19:55:23 +00:00
print ( " number of records: " + str ( noids ) )
while ( sl < int ( noids ) ) :
2013-01-28 20:31:43 +00:00
soma_sl = soma [ ' comic_info ' ] [ sl ]
2013-02-06 19:55:23 +00:00
print ( " soma_sl: " + str ( soma_sl ) )
print ( " comicname: " + soma_sl [ ' comicname ' ] )
print ( " filename: " + soma_sl [ ' comfilename ' ] )
controlValue = { " impID " : soma_sl [ ' impid ' ] }
2013-01-28 20:31:43 +00:00
newValue = { " ComicYear " : soma_sl [ ' comicyear ' ] ,
" Status " : " Not Imported " ,
2013-02-06 19:55:23 +00:00
" ComicName " : soma_sl [ ' comicname ' ] ,
" ComicFilename " : soma_sl [ ' comfilename ' ] ,
" ComicLocation " : soma_sl [ ' comlocation ' ] . encode ( ' utf-8 ' ) ,
2013-02-13 01:27:24 +00:00
" ImportDate " : helpers . today ( ) ,
" WatchMatch " : soma_sl [ ' watchmatch ' ] }
2013-01-28 20:31:43 +00:00
myDB . upsert ( " importresults " , newValue , controlValue )
sl + = 1
2013-02-06 19:55:23 +00:00
# because we could be adding volumes/series that span years, we need to account for this
# add the year to the db under the term, valid-years
# add the issue to the db under the term, min-issue
#locate metadata here.
# unzip -z filename.cbz will show the comment field of the zip which contains the metadata.
# unzip -z filename.cbz < /dev/null will remove the comment field, and thus the metadata.
2013-01-28 20:31:43 +00:00
self . importResults ( )
if redirect :
raise cherrypy . HTTPRedirect ( redirect )
else :
raise cherrypy . HTTPRedirect ( " home " )
comicScan . exposed = True
def importResults ( self ) :
myDB = db . DBConnection ( )
2013-02-13 01:27:24 +00:00
results = myDB . select ( " SELECT * FROM importresults WHERE WatchMatch is Null OR WatchMatch LIKE ' C % ' group by ComicName COLLATE NOCASE " )
watchresults = myDB . select ( " SELECT * FROM importresults WHERE WatchMatch is not Null AND WatchMatch NOT LIKE ' C % ' group by ComicName COLLATE NOCASE " )
return serve_template ( templatename = " importresults.html " , title = " Import Results " , results = results , watchresults = watchresults )
2013-01-28 20:31:43 +00:00
importResults . exposed = True
2013-02-06 19:55:23 +00:00
2013-02-09 03:34:02 +00:00
def deleteimport ( self , ComicName ) :
myDB = db . DBConnection ( )
logger . info ( " Removing import data for Comic: " + str ( ComicName ) )
myDB . action ( ' DELETE from importresults WHERE ComicName=? ' , [ ComicName ] )
raise cherrypy . HTTPRedirect ( " importResults " )
deleteimport . exposed = True
2013-02-13 01:27:24 +00:00
def preSearchit ( self , ComicName ) :
2013-02-09 03:34:02 +00:00
#print ("imp_rename:" + str(imp_rename))
#print ("imp_move:" + str(imp_move))
2013-02-06 19:55:23 +00:00
myDB = db . DBConnection ( )
results = myDB . action ( " SELECT * FROM importresults WHERE ComicName=? " , [ ComicName ] )
#if results > 0:
# print ("There are " + str(results[7]) + " issues to import of " + str(ComicName))
#build the valid year ranges and the minimum issue# here to pass to search.
yearRANGE = [ ]
yearTOP = 0
minISSUE = 0
2013-02-09 03:34:02 +00:00
startISSUE = 10000000
2013-02-06 19:55:23 +00:00
comicstoIMP = [ ]
for result in results :
if result is None :
break
2013-02-14 18:56:55 +00:00
if result [ ' WatchMatch ' ] :
watchmatched = result [ ' WatchMatch ' ]
else :
watchmatched = ' '
if watchmatched . startswith ( ' C ' ) :
2013-02-13 01:27:24 +00:00
print ( " Confirmed. ComicID already provided - initiating auto-magik mode for import. " )
comicid = result [ ' WatchMatch ' ] [ 1 : ]
print ( result [ ' WatchMatch ' ] + " .to. " + str ( comicid ) )
#since it's already in the watchlist, we just need to move the files and re-run the filechecker.
#self.refreshArtist(comicid=comicid,imported='yes')
if mylar . IMP_MOVE :
logger . info ( " Mass import - Move files " )
comloc = myDB . action ( " SELECT * FROM comics WHERE ComicID=? " , [ comicid ] ) . fetchone ( )
mylar . moveit . movefiles ( comicid , comloc [ ' ComicLocation ' ] , ComicName )
#check for existing files...
updater . forceRescan ( comicid )
else :
print ( " nothing to do if I ' m not moving. " )
2013-02-17 10:31:18 +00:00
#hit the archiver in movefiles here...
2013-02-13 01:27:24 +00:00
raise cherrypy . HTTPRedirect ( " importResults " )
2013-02-06 19:55:23 +00:00
else :
comicstoIMP . append ( result [ ' ComicLocation ' ] . decode ( mylar . SYS_ENCODING , ' replace ' ) )
getiss = result [ ' impID ' ] . rfind ( ' - ' )
getiss = result [ ' impID ' ] [ getiss + 1 : ]
print ( " figured issue is : " + str ( getiss ) )
if ( result [ ' ComicYear ' ] not in yearRANGE ) or ( yearRANGE is None ) :
if result [ ' ComicYear ' ] < > " 0000 " :
print ( " adding... " + str ( result [ ' ComicYear ' ] ) )
yearRANGE . append ( result [ ' ComicYear ' ] )
yearTOP = str ( result [ ' ComicYear ' ] )
2013-02-09 03:34:02 +00:00
if int ( getiss ) > int ( minISSUE ) :
2013-02-06 19:55:23 +00:00
print ( " issue now set to : " + str ( getiss ) + " ... it was : " + str ( minISSUE ) )
minISSUE = str ( getiss )
2013-02-09 03:34:02 +00:00
if int ( getiss ) < int ( startISSUE ) :
print ( " issue now set to : " + str ( getiss ) + " ... it was : " + str ( startISSUE ) )
startISSUE = str ( getiss )
2013-02-06 19:55:23 +00:00
#figure out # of issues and the year range allowable
2013-02-09 03:34:02 +00:00
if yearTOP > 0 :
maxyear = int ( yearTOP ) - ( int ( minISSUE ) / 12 )
yearRANGE . append ( str ( maxyear ) )
print ( " there is a " + str ( maxyear ) + " year variation based on the 12 issues/year " )
#determine a best-guess to # of issues in series
#this needs to be reworked / refined ALOT more.
#minISSUE = highest issue #, startISSUE = lowest issue #
numissues = int ( minISSUE ) - int ( startISSUE )
#normally minissue would work if the issue #'s started at #1.
2013-02-06 19:55:23 +00:00
print ( " the years involved are : " + str ( yearRANGE ) )
2013-02-09 03:34:02 +00:00
print ( " highest issue # is : " + str ( minISSUE ) )
print ( " lowest issue # is : " + str ( startISSUE ) )
print ( " approximate number of issues : " + str ( numissues ) )
print ( " issues present on system : " + str ( len ( comicstoIMP ) ) )
print ( " versioning checking: " )
cnsplit = ComicName . split ( )
cnwords = len ( cnsplit )
cnvers = cnsplit [ cnwords - 1 ]
ogcname = ComicName
if ' v ' in cnvers :
print ( " possible versioning detected. " )
if cnvers [ 1 : ] . isdigit ( ) :
print ( cnvers + " - assuming versioning. Removing from initial search pattern. " )
ComicName = ComicName [ : - ( ( len ( cnvers ) ) + 1 ) ]
print ( " new comicname is : " + str ( ComicName ) )
# we need to pass the original comicname here into the entire importer module
# so that we can reference the correct issues later.
2013-02-06 19:55:23 +00:00
mode = ' series '
2013-02-09 03:34:02 +00:00
sresults = mb . findComic ( ComicName , mode , issue = numissues , limityear = yearRANGE )
2013-02-06 19:55:23 +00:00
type = ' comic '
2013-02-09 03:34:02 +00:00
2013-02-06 19:55:23 +00:00
if len ( sresults ) == 1 :
sr = sresults [ 0 ]
print ( " only one result...automagik-mode enabled for " + str ( sr [ ' comicid ' ] ) )
2013-02-09 03:34:02 +00:00
resultset = 1
# #need to move the files here.
elif len ( sresults ) == 0 or len ( sresults ) is None :
2013-02-06 19:55:23 +00:00
print ( " no results, removing the year from the agenda and re-querying. " )
2013-02-09 03:34:02 +00:00
sresults = mb . findComic ( ComicName , mode , issue = numissues )
if len ( sresults ) == 1 :
print ( " only one result...automagik-mode enabled for " + str ( sr [ ' comicid ' ] ) )
resultset = 1
else :
resultset = 0
else :
print ( " returning results to screen - more than one possibility. " )
resultset = 0
if resultset == 1 :
2013-02-13 01:27:24 +00:00
self . addComic ( comicid = sr [ ' comicid ' ] , comicname = sr [ ' name ' ] , comicyear = sr [ ' comicyear ' ] , comicpublisher = sr [ ' publisher ' ] , comicimage = sr [ ' comicimage ' ] , comicissues = sr [ ' issues ' ] , imported = ' yes ' , ogcname = ogcname ) #imported=comicstoIMP,ogcname=ogcname)
2013-02-09 03:34:02 +00:00
else :
2013-02-13 01:27:24 +00:00
return serve_template ( templatename = " searchresults.html " , title = ' Import Results for: " ' + ComicName + ' " ' , searchresults = sresults , type = type , imported = ' yes ' , ogcname = ogcname ) #imported=comicstoIMP, ogcname=ogcname)
2013-02-06 19:55:23 +00:00
preSearchit . exposed = True
2013-01-28 20:31:43 +00:00
#---
2012-09-13 15:27:34 +00:00
def config ( self ) :
interface_dir = os . path . join ( mylar . PROG_DIR , ' data/interfaces/ ' )
interface_list = [ name for name in os . listdir ( interface_dir ) if os . path . isdir ( os . path . join ( interface_dir , name ) ) ]
2013-01-13 15:59:46 +00:00
# branch_history, err = mylar.versioncheck.runGit("log --oneline --pretty=format:'%h - %ar - %s' -n 4")
# br_hist = branch_history.replace("\n", "<br />\n")
2013-02-17 10:31:18 +00:00
myDB = db . DBConnection ( )
CCOMICS = myDB . action ( " SELECT COUNT(*) FROM comics " ) . fetchall ( )
CHAVES = myDB . action ( " SELECT COUNT(*) FROM issues WHERE Status= ' Downloaded ' OR Status= ' Archived ' " ) . fetchall ( )
CISSUES = myDB . action ( " SELECT COUNT(*) FROM issues " ) . fetchall ( )
COUNT_COMICS = CCOMICS [ 0 ] [ 0 ]
COUNT_HAVES = CHAVES [ 0 ] [ 0 ]
COUNT_ISSUES = CISSUES [ 0 ] [ 0 ]
comicinfo = { " COUNT_COMICS " : COUNT_COMICS ,
" COUNT_HAVES " : COUNT_HAVES ,
" COUNT_ISSUES " : COUNT_ISSUES }
2013-01-11 21:20:51 +00:00
2012-09-13 15:27:34 +00:00
config = {
" http_host " : mylar . HTTP_HOST ,
" http_user " : mylar . HTTP_USERNAME ,
" http_port " : mylar . HTTP_PORT ,
" http_pass " : mylar . HTTP_PASSWORD ,
2012-10-30 10:43:01 +00:00
" launch_browser " : helpers . checked ( mylar . LAUNCH_BROWSER ) ,
2012-12-20 10:39:37 +00:00
" logverbose " : helpers . checked ( mylar . LOGVERBOSE ) ,
2012-09-13 15:27:34 +00:00
" download_scan_interval " : mylar . DOWNLOAD_SCAN_INTERVAL ,
" nzb_search_interval " : mylar . SEARCH_INTERVAL ,
2013-01-15 17:32:08 +00:00
" nzb_startup_search " : helpers . checked ( mylar . NZB_STARTUP_SEARCH ) ,
2012-09-13 15:27:34 +00:00
" libraryscan_interval " : mylar . LIBRARYSCAN_INTERVAL ,
" sab_host " : mylar . SAB_HOST ,
" sab_user " : mylar . SAB_USERNAME ,
" sab_api " : mylar . SAB_APIKEY ,
" sab_pass " : mylar . SAB_PASSWORD ,
" sab_cat " : mylar . SAB_CATEGORY ,
2013-01-13 15:59:46 +00:00
" sab_priority " : mylar . SAB_PRIORITY ,
2013-02-09 03:34:02 +00:00
" sab_directory " : mylar . SAB_DIRECTORY ,
2012-09-13 15:27:34 +00:00
" use_blackhole " : helpers . checked ( mylar . BLACKHOLE ) ,
" blackhole_dir " : mylar . BLACKHOLE_DIR ,
" usenet_retention " : mylar . USENET_RETENTION ,
" use_nzbsu " : helpers . checked ( mylar . NZBSU ) ,
" nzbsu_api " : mylar . NZBSU_APIKEY ,
" use_dognzb " : helpers . checked ( mylar . DOGNZB ) ,
" dognzb_api " : mylar . DOGNZB_APIKEY ,
2013-01-11 21:20:51 +00:00
" use_nzbx " : helpers . checked ( mylar . NZBX ) ,
2012-09-13 15:27:34 +00:00
" use_experimental " : helpers . checked ( mylar . EXPERIMENTAL ) ,
2012-12-16 17:57:02 +00:00
" use_newznab " : helpers . checked ( mylar . NEWZNAB ) ,
" newznab_host " : mylar . NEWZNAB_HOST ,
" newznab_api " : mylar . NEWZNAB_APIKEY ,
" newznab_enabled " : helpers . checked ( mylar . NEWZNAB_ENABLED ) ,
" extra_newznabs " : mylar . EXTRA_NEWZNABS ,
2012-09-13 15:27:34 +00:00
" destination_dir " : mylar . DESTINATION_DIR ,
2012-09-14 17:29:01 +00:00
" replace_spaces " : helpers . checked ( mylar . REPLACE_SPACES ) ,
" replace_char " : mylar . REPLACE_CHAR ,
2013-01-15 17:32:08 +00:00
" use_minsize " : helpers . checked ( mylar . USE_MINSIZE ) ,
2013-01-13 15:59:46 +00:00
" minsize " : mylar . MINSIZE ,
2013-01-15 17:32:08 +00:00
" use_maxsize " : helpers . checked ( mylar . USE_MAXSIZE ) ,
2013-01-13 15:59:46 +00:00
" maxsize " : mylar . MAXSIZE ,
2012-09-13 15:27:34 +00:00
" interface_list " : interface_list ,
" autowant_all " : helpers . checked ( mylar . AUTOWANT_ALL ) ,
" autowant_upcoming " : helpers . checked ( mylar . AUTOWANT_UPCOMING ) ,
2012-12-27 15:04:03 +00:00
" comic_cover_local " : helpers . checked ( mylar . COMIC_COVER_LOCAL ) ,
2012-09-13 15:27:34 +00:00
" pref_qual_0 " : helpers . radio ( mylar . PREFERRED_QUALITY , 0 ) ,
" pref_qual_1 " : helpers . radio ( mylar . PREFERRED_QUALITY , 1 ) ,
" pref_qual_3 " : helpers . radio ( mylar . PREFERRED_QUALITY , 3 ) ,
" pref_qual_2 " : helpers . radio ( mylar . PREFERRED_QUALITY , 2 ) ,
" move_files " : helpers . checked ( mylar . MOVE_FILES ) ,
" rename_files " : helpers . checked ( mylar . RENAME_FILES ) ,
" folder_format " : mylar . FOLDER_FORMAT ,
" file_format " : mylar . FILE_FORMAT ,
2012-10-30 10:43:01 +00:00
" zero_level " : helpers . checked ( mylar . ZERO_LEVEL ) ,
" zero_level_n " : mylar . ZERO_LEVEL_N ,
2013-01-15 17:32:08 +00:00
" add_to_csv " : helpers . checked ( mylar . ADD_TO_CSV ) ,
2013-01-23 08:22:22 +00:00
" cvinfo " : helpers . checked ( mylar . CVINFO ) ,
2013-01-15 17:32:08 +00:00
" lowercase_filenames " : helpers . checked ( mylar . LOWERCASE_FILENAMES ) ,
2013-02-18 17:39:00 +00:00
" prowl_enabled " : helpers . checked ( mylar . PROWL_ENABLED ) ,
" prowl_onsnatch " : helpers . checked ( mylar . PROWL_ONSNATCH ) ,
" prowl_keys " : mylar . PROWL_KEYS ,
" prowl_priority " : mylar . PROWL_PRIORITY ,
" nma_enabled " : helpers . checked ( mylar . NMA_ENABLED ) ,
" nma_apikey " : mylar . NMA_APIKEY ,
" nma_priority " : int ( mylar . NMA_PRIORITY ) ,
" nma_onsnatch " : helpers . checked ( mylar . NMA_ONSNATCH ) ,
2012-12-27 15:04:03 +00:00
" enable_extra_scripts " : helpers . checked ( mylar . ENABLE_EXTRA_SCRIPTS ) ,
" extra_scripts " : mylar . EXTRA_SCRIPTS ,
2013-02-13 01:27:24 +00:00
" post_processing " : helpers . checked ( mylar . POST_PROCESSING ) ,
2013-01-11 21:20:51 +00:00
" branch " : version . MYLAR_VERSION ,
" br_type " : mylar . INSTALL_TYPE ,
" br_version " : mylar . versioncheck . getVersion ( ) ,
" py_version " : platform . python_version ( ) ,
" data_dir " : mylar . DATA_DIR ,
" prog_dir " : mylar . PROG_DIR ,
" cache_dir " : mylar . CACHE_DIR ,
2013-01-13 17:10:41 +00:00
" config_file " : mylar . CONFIG_FILE ,
2013-01-13 15:59:46 +00:00
# "branch_history" : br_hist
2013-01-13 17:10:41 +00:00
" enable_pre_scripts " : helpers . checked ( mylar . ENABLE_PRE_SCRIPTS ) ,
" pre_scripts " : mylar . PRE_SCRIPTS ,
" log_dir " : mylar . LOG_DIR
2012-09-13 15:27:34 +00:00
}
2013-02-17 10:31:18 +00:00
return serve_template ( templatename = " config.html " , title = " Settings " , config = config , comicinfo = comicinfo )
2012-09-13 15:27:34 +00:00
config . exposed = True
2013-01-11 21:20:51 +00:00
2013-01-29 09:02:23 +00:00
def error_change ( self , comicid , errorgcd , comicname ) :
# if comicname contains a "," it will break the exceptions import.
import urllib
b = urllib . unquote_plus ( comicname )
cname = b . decode ( " utf-8 " )
cname = re . sub ( " \ , " , " " , cname )
2013-01-11 21:20:51 +00:00
if errorgcd [ : 5 ] . isdigit ( ) :
2013-01-15 17:32:08 +00:00
print ( " GCD-ID detected : " + str ( errorgcd ) [ : 5 ] )
2013-01-29 09:02:23 +00:00
print ( " I ' m assuming you know what you ' re doing - going to force-match for " + cname . encode ( " utf-8 " ) )
self . from_Exceptions ( comicid = comicid , gcdid = errorgcd , comicname = cname )
2013-01-11 21:20:51 +00:00
else :
print ( " Assuming rewording of Comic - adjusting to : " + str ( errorgcd ) )
2013-01-18 09:18:31 +00:00
Err_Info = mylar . cv . getComic ( comicid , ' comic ' )
2013-01-21 18:11:37 +00:00
self . addComic ( comicid = comicid , comicname = str ( errorgcd ) , comicyear = Err_Info [ ' ComicYear ' ] , comicissues = Err_Info [ ' ComicIssues ' ] , comicpublisher = Err_Info [ ' ComicPublisher ' ] )
2013-01-11 21:20:51 +00:00
error_change . exposed = True
2012-09-13 15:27:34 +00:00
2013-01-18 07:32:05 +00:00
def comic_config ( self , com_location , ComicID , alt_search = None , fuzzy_year = None ) :
2012-09-28 15:39:44 +00:00
myDB = db . DBConnection ( )
2013-01-06 08:51:44 +00:00
#--- this is for multipe search terms............
#--- works, just need to redo search.py to accomodate multiple search terms
# ffs_alt = []
# if '+' in alt_search:
#find first +
# ffs = alt_search.find('+')
# ffs_alt.append(alt_search[:ffs])
# ffs_alt_st = str(ffs_alt[0])
# print("ffs_alt: " + str(ffs_alt[0]))
# split the entire string by the delimter +
# ffs_test = alt_search.split('+')
# if len(ffs_test) > 0:
# print("ffs_test names: " + str(len(ffs_test)))
# ffs_count = len(ffs_test)
# n=1
# while (n < ffs_count):
# ffs_alt.append(ffs_test[n])
# print("adding : " + str(ffs_test[n]))
#print("ffs_alt : " + str(ffs_alt))
# ffs_alt_st = str(ffs_alt_st) + "..." + str(ffs_test[n])
# n+=1
# asearch = ffs_alt
# else:
# asearch = alt_search
asearch = str ( alt_search )
2012-09-28 15:39:44 +00:00
controlValueDict = { ' ComicID ' : ComicID }
2013-01-18 07:32:05 +00:00
newValues = { " ComicLocation " : com_location }
2012-09-28 15:39:44 +00:00
#"QUALalt_vers": qual_altvers,
#"QUALScanner": qual_scanner,
#"QUALtype": qual_type,
#"QUALquality": qual_quality
#}
2013-01-18 07:32:05 +00:00
if asearch is not None :
2013-02-06 19:55:23 +00:00
if re . sub ( r ' \ s ' , ' ' , asearch ) == ' ' :
2013-01-18 07:32:05 +00:00
newValues [ ' AlternateSearch ' ] = " None "
else :
newValues [ ' AlternateSearch ' ] = str ( asearch )
2013-02-06 19:55:23 +00:00
else :
newValues [ ' AlternateSearch ' ] = " None "
2013-01-06 08:51:44 +00:00
2013-01-15 22:41:00 +00:00
if fuzzy_year is None :
newValues [ ' UseFuzzy ' ] = " 0 "
else :
newValues [ ' UseFuzzy ' ] = str ( fuzzy_year )
2012-12-20 11:52:21 +00:00
#force the check/creation of directory com_location here
if os . path . isdir ( str ( com_location ) ) :
logger . info ( u " Validating Directory ( " + str ( com_location ) + " ). Already exists! Continuing... " )
else :
logger . fdebug ( " Updated Directory doesn ' t exist! - attempting to create now. " )
try :
os . makedirs ( str ( com_location ) )
logger . info ( u " Directory successfully created at: " + str ( com_location ) )
except OSError :
logger . error ( u " Could not create comicdir : " + str ( com_location ) )
2012-09-28 15:39:44 +00:00
myDB . upsert ( " comics " , newValues , controlValueDict )
2012-09-29 04:56:28 +00:00
raise cherrypy . HTTPRedirect ( " artistPage?ComicID= %s " % ComicID )
2012-09-28 15:39:44 +00:00
comic_config . exposed = True
2012-09-13 15:27:34 +00:00
2013-01-15 17:32:08 +00:00
def configUpdate ( self , http_host = ' 0.0.0.0 ' , http_username = None , http_port = 8090 , http_password = None , launch_browser = 0 , logverbose = 0 , download_scan_interval = None , nzb_search_interval = None , nzb_startup_search = 0 , libraryscan_interval = None ,
2013-02-09 03:34:02 +00:00
sab_host = None , sab_username = None , sab_apikey = None , sab_password = None , sab_category = None , sab_priority = None , sab_directory = None , log_dir = None , log_level = 0 , blackhole = 0 , blackhole_dir = None ,
2013-01-11 21:20:51 +00:00
usenet_retention = None , nzbsu = 0 , nzbsu_apikey = None , dognzb = 0 , dognzb_apikey = None , nzbx = 0 , newznab = 0 , newznab_host = None , newznab_apikey = None , newznab_enabled = 0 ,
2012-09-13 15:27:34 +00:00
raw = 0 , raw_provider = None , raw_username = None , raw_password = None , raw_groups = None , experimental = 0 ,
2013-02-18 17:39:00 +00:00
prowl_enabled = 0 , prowl_onsnatch = 0 , prowl_keys = None , prowl_priority = None , nma_enabled = 0 , nma_apikey = None , nma_priority = 0 , nma_onsnatch = 0 ,
2013-02-13 01:27:24 +00:00
preferred_quality = 0 , move_files = 0 , rename_files = 0 , add_to_csv = 1 , cvinfo = 0 , lowercase_filenames = 0 , folder_format = None , file_format = None , enable_extra_scripts = 0 , extra_scripts = None , enable_pre_scripts = 0 , pre_scripts = None , post_processing = 0 ,
2013-01-13 15:59:46 +00:00
destination_dir = None , replace_spaces = 0 , replace_char = None , use_minsize = 0 , minsize = None , use_maxsize = 0 , maxsize = None , autowant_all = 0 , autowant_upcoming = 0 , comic_cover_local = 0 , zero_level = 0 , zero_level_n = None , interface = None , * * kwargs ) :
2012-09-13 15:27:34 +00:00
mylar . HTTP_HOST = http_host
mylar . HTTP_PORT = http_port
mylar . HTTP_USERNAME = http_username
mylar . HTTP_PASSWORD = http_password
mylar . LAUNCH_BROWSER = launch_browser
2012-12-20 10:39:37 +00:00
mylar . LOGVERBOSE = logverbose
2012-09-13 15:27:34 +00:00
mylar . DOWNLOAD_SCAN_INTERVAL = download_scan_interval
mylar . SEARCH_INTERVAL = nzb_search_interval
2013-01-15 17:32:08 +00:00
mylar . NZB_STARTUP_SEARCH = nzb_startup_search
2012-09-13 15:27:34 +00:00
mylar . LIBRARYSCAN_INTERVAL = libraryscan_interval
mylar . SAB_HOST = sab_host
mylar . SAB_USERNAME = sab_username
mylar . SAB_PASSWORD = sab_password
mylar . SAB_APIKEY = sab_apikey
mylar . SAB_CATEGORY = sab_category
2012-09-28 15:39:44 +00:00
mylar . SAB_PRIORITY = sab_priority
2013-02-09 03:34:02 +00:00
mylar . SAB_DIRECTORY = sab_directory
2012-09-13 15:27:34 +00:00
mylar . BLACKHOLE = blackhole
mylar . BLACKHOLE_DIR = blackhole_dir
mylar . USENET_RETENTION = usenet_retention
mylar . NZBSU = nzbsu
mylar . NZBSU_APIKEY = nzbsu_apikey
mylar . DOGNZB = dognzb
mylar . DOGNZB_APIKEY = dognzb_apikey
2013-01-11 21:20:51 +00:00
mylar . NZBX = nzbx
2012-09-13 15:27:34 +00:00
mylar . RAW = raw
mylar . RAW_PROVIDER = raw_provider
mylar . RAW_USERNAME = raw_username
mylar . RAW_PASSWORD = raw_password
mylar . RAW_GROUPS = raw_groups
mylar . EXPERIMENTAL = experimental
2012-12-16 17:57:02 +00:00
mylar . NEWZNAB = newznab
mylar . NEWZNAB_HOST = newznab_host
mylar . NEWZNAB_APIKEY = newznab_apikey
mylar . NEWZNAB_ENABLED = newznab_enabled
2012-09-13 15:27:34 +00:00
mylar . PREFERRED_QUALITY = int ( preferred_quality )
mylar . MOVE_FILES = move_files
mylar . RENAME_FILES = rename_files
2012-09-14 17:29:01 +00:00
mylar . REPLACE_SPACES = replace_spaces
mylar . REPLACE_CHAR = replace_char
2012-10-30 10:43:01 +00:00
mylar . ZERO_LEVEL = zero_level
mylar . ZERO_LEVEL_N = zero_level_n
2013-01-15 17:32:08 +00:00
mylar . ADD_TO_CSV = add_to_csv
2013-01-23 08:22:22 +00:00
mylar . CVINFO = cvinfo
2013-01-14 05:12:59 +00:00
mylar . LOWERCASE_FILENAMES = lowercase_filenames
2013-02-18 17:39:00 +00:00
mylar . PROWL_ENABLED = prowl_enabled
mylar . PROWL_ONSNATCH = prowl_onsnatch
mylar . PROWL_KEYS = prowl_keys
mylar . PROWL_PRIORITY = prowl_priority
mylar . NMA_ENABLED = nma_enabled
mylar . NMA_APIKEY = nma_apikey
mylar . NMA_PRIORITY = nma_priority
mylar . NMA_ONSNATCH = nma_onsnatch
2013-01-13 15:59:46 +00:00
mylar . USE_MINSIZE = use_minsize
mylar . MINSIZE = minsize
mylar . USE_MAXSIZE = use_maxsize
mylar . MAXSIZE = maxsize
2012-09-13 15:27:34 +00:00
mylar . FOLDER_FORMAT = folder_format
mylar . FILE_FORMAT = file_format
mylar . DESTINATION_DIR = destination_dir
mylar . AUTOWANT_ALL = autowant_all
mylar . AUTOWANT_UPCOMING = autowant_upcoming
2012-12-27 15:04:03 +00:00
mylar . COMIC_COVER_LOCAL = comic_cover_local
2012-09-13 15:27:34 +00:00
mylar . INTERFACE = interface
2012-12-27 15:04:03 +00:00
mylar . ENABLE_EXTRA_SCRIPTS = enable_extra_scripts
mylar . EXTRA_SCRIPTS = extra_scripts
2013-01-13 17:10:41 +00:00
mylar . ENABLE_PRE_SCRIPTS = enable_pre_scripts
2013-02-13 01:27:24 +00:00
mylar . POST_PROCESSING = post_processing
2013-01-13 17:10:41 +00:00
mylar . PRE_SCRIPTS = pre_scripts
2012-09-13 15:27:34 +00:00
mylar . LOG_DIR = log_dir
2013-02-06 19:55:23 +00:00
mylar . LOG_LEVEL = log_level
2012-12-16 17:57:02 +00:00
# Handle the variable config options. Note - keys with False values aren't getting passed
mylar . EXTRA_NEWZNABS = [ ]
2012-12-16 18:41:01 +00:00
2012-12-16 17:57:02 +00:00
for kwarg in kwargs :
if kwarg . startswith ( ' newznab_host ' ) :
newznab_number = kwarg [ 12 : ]
newznab_host = kwargs [ ' newznab_host ' + newznab_number ]
newznab_api = kwargs [ ' newznab_api ' + newznab_number ]
try :
newznab_enabled = int ( kwargs [ ' newznab_enabled ' + newznab_number ] )
except KeyError :
newznab_enabled = 0
mylar . EXTRA_NEWZNABS . append ( ( newznab_host , newznab_api , newznab_enabled ) )
2012-12-16 18:41:01 +00:00
2012-12-16 17:57:02 +00:00
# Sanity checking
if mylar . SEARCH_INTERVAL < 360 :
logger . info ( " Search interval too low. Resetting to 6 hour minimum " )
mylar . SEARCH_INTERVAL = 360
2012-12-16 18:41:01 +00:00
2012-12-16 17:57:02 +00:00
# Write the config
2012-09-13 15:27:34 +00:00
mylar . config_write ( )
raise cherrypy . HTTPRedirect ( " config " )
configUpdate . exposed = True
def shutdown ( self ) :
mylar . SIGNAL = ' shutdown '
message = ' Shutting Down... '
return serve_template ( templatename = " shutdown.html " , title = " Shutting Down " , message = message , timer = 15 )
return page
shutdown . exposed = True
def restart ( self ) :
mylar . SIGNAL = ' restart '
message = ' Restarting... '
return serve_template ( templatename = " shutdown.html " , title = " Restarting " , message = message , timer = 30 )
restart . exposed = True
def update ( self ) :
mylar . SIGNAL = ' update '
2012-09-17 05:12:40 +00:00
message = ' Updating...<br/><small>Main screen will appear in 60s</small> '
2012-09-13 15:27:34 +00:00
return serve_template ( templatename = " shutdown.html " , title = " Updating " , message = message , timer = 30 )
return page
update . exposed = True
def getInfo ( self , ComicID = None , IssueID = None ) :
from mylar import cache
info_dict = cache . getInfo ( ComicID , IssueID )
return simplejson . dumps ( info_dict )
getInfo . exposed = True
def getComicArtwork ( self , ComicID = None , imageURL = None ) :
from mylar import cache
logger . info ( u " Retrieving image for : " + comicID )
return cache . getArtwork ( ComicID , imageURL )
getComicArtwork . exposed = True