several bug fixes - hopefully not breaking something else..new search, better adds, force chk works, other bugs fixed.

This commit is contained in:
evilhero 2012-09-24 01:17:29 -04:00
parent d3660049cc
commit 8482cba507
10 changed files with 228 additions and 219 deletions

View File

@ -128,8 +128,7 @@
</tr>
</table>
</div>
<div id="tabs-3">
<div id="tabs-3">
<table class="comictable" summary="Edit Settings">
<tr>
<td>
@ -141,34 +140,7 @@
</td>
<td>
<fieldset>
<form action="comic_configUpdate" method="post" class="form" id="comic_configUpdate">
<div class="row">
<label>Directory Location</label>
<input type="text" name="comiclocation" value="1" size="45">
<small>the directory where all the comics are for this comic</small>
</div>
<div class="row">
<label>Alternate versions</label>
<input type="text" name="qualaltvers" value="1" size="30">
<small>if the comic is v5 or whatever, enter 'v5' here</small>
</div>
<div class="row">
<label>Scanner</label>
<input type="text" name="qualscanner" value="1" size="20">
<small>list preference of scanner</small>
</div>
<div class="row">
<label>type</label>
<input type="text" name="qualtype" value="1" size="36">
<small>c2c / noads</small>
</div>
<div class="row">
<label>Quality</label>
<input type="text" name="qualquality" value="1" size="20">
<small>resolution of scan (ie.1440px)</small>
</div>
</fieldset>
</form>
</fieldset>
</td>
</tr>

View File

@ -101,7 +101,7 @@ FILE_FORMAT = None
REPLACE_SPACES = False
REPLACE_CHAR = None
AUTOWANT_UPCOMING = False
AUTOWANT_UPCOMING = True
AUTOWANT_ALL = False
SAB_HOST = None
@ -230,7 +230,7 @@ def initialize():
DOWNLOAD_SCAN_INTERVAL = check_setting_int(CFG, 'General', 'download_scan_interval', 5)
INTERFACE = check_setting_str(CFG, 'General', 'interface', 'default')
AUTOWANT_ALL = bool(check_setting_int(CFG, 'General', 'autowant_all', 0))
AUTOWANT_UPCOMING = bool(check_setting_int(CFG, 'General', 'autowant_upcoming', 0))
AUTOWANT_UPCOMING = bool(check_setting_int(CFG, 'General', 'autowant_upcoming', 1))
PREFERRED_QUALITY = check_setting_int(CFG, 'General', 'preferred_quality', 0)
CORRECT_METADATA = bool(check_setting_int(CFG, 'General', 'correct_metadata', 0))
MOVE_FILES = bool(check_setting_int(CFG, 'General', 'move_files', 0))
@ -303,7 +303,7 @@ def initialize():
CONFIG_VERSION = '2'
if 'http://' not in SAB_HOST[:7]:
if 'http://' not in SAB_HOST[:7] and 'https://' not in SAB_HOST[:8]:
SAB_HOST = 'http://' + SAB_HOST
#print ("SAB_HOST:" + SAB_HOST)

View File

@ -25,8 +25,7 @@ from bs4 import BeautifulSoup as Soup
def getComic(comicid,type):
comicapi='583939a3df0a25fc4e8b7a29934a13078002dc27'
#api
#http://api.comicvine.com/search/?api_key=583939a3df0a25fc4e8b7a29934a13078002dc27&resources=volume
PULLURL='http://api.comicvine.com/volume/' + str(comicid) + '/?api_key=' + str(comicapi) + '&format=xml&field_list=name,description,count_of_issues,start_year,last_issue,site_detail_url,image,publisher'
PULLURL='http://api.comicvine.com/volume/' + str(comicid) + '/?api_key=' + str(comicapi) + '&format=xml&field_list=name,count_of_issues,start_year,last_issue,site_detail_url,image,publisher'
#import library to do http requests:
import urllib2
@ -63,45 +62,46 @@ def getComic(comicid,type):
if type == 'issue': return GetIssuesInfo(comicid,dom)
def GetComicInfo(comicid,dom):
#comicvine isn't as up-to-date with issue counts..
#so this can get really buggered, really fast.
tracks = dom.getElementsByTagName('name')
tracks = dom.getElementsByTagName('issue')
cntit = dom.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
trackcnt = len(tracks)
if str(trackcnt) != str(int(cntit)+2):
cntit = int(cntit) + 1
n = 0
# if the two don't match, use trackcnt as count_of_issues might be not upto-date for some reason
if int(trackcnt) != int(cntit):
cntit = trackcnt
vari = "yes"
else: vari = "no"
#if str(trackcnt) != str(int(cntit)+2):
# cntit = int(cntit) + 1
comic = {}
comicchoice = []
cntit = int(cntit)
for track in tracks:
#retrieve the first xml tag (<tag>data</tag>)
#that the parser finds with name tagName:
comic['ComicName'] = dom.getElementsByTagName('name')[cntit].firstChild.wholeText
comic['ComicYear'] = dom.getElementsByTagName('start_year')[n].firstChild.wholeText
comic['ComicURL'] = dom.getElementsByTagName('site_detail_url')[n].firstChild.wholeText
comic['ComicIssues'] = dom.getElementsByTagName('count_of_issues')[n].firstChild.wholeText
#comic['ComicDesc'] = dom.getElementsByTagName('description')[n].firstChild.wholeText
comic['ComicImage'] = dom.getElementsByTagName('super_url')[n].firstChild.wholeText
comic['ComicPublisher'] = dom.getElementsByTagName('name')[cntit+1].firstChild.wholeText
#comic['description'] = dom.getElementsByTagName('description')[n].firstChild.wholeText
#comdescst = comic['description'].find('</p>')
#comdesc = comic['description'][:comdescst]
#print ("Description: " + str(comdesc))
#retrieve the first xml tag (<tag>data</tag>)
#that the parser finds with name tagName:
comic['ComicName'] = dom.getElementsByTagName('name')[trackcnt].firstChild.wholeText
comic['ComicName'] = comic['ComicName'].rstrip()
comic['ComicYear'] = dom.getElementsByTagName('start_year')[0].firstChild.wholeText
comic['ComicURL'] = dom.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
if vari == "yes":
comic['ComicIssues'] = str(cntit)
else:
comic['ComicIssues'] = dom.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
comic['ComicImage'] = dom.getElementsByTagName('super_url')[0].firstChild.wholeText
comic['ComicPublisher'] = dom.getElementsByTagName('name')[trackcnt+1].firstChild.wholeText
comicchoice.append({
'ComicName': comic['ComicName'],
'ComicYear': comic['ComicYear'],
'Comicid': comicid,
'ComicURL': comic['ComicURL'],
'ComicIssues': comic['ComicIssues'],
# 'ComicDesc': comic['ComicDesc'],
'ComicImage': comic['ComicImage'],
'ComicPublisher': comic['ComicPublisher']
})
comic['comicchoice'] = comicchoice
comicchoice.append({
'ComicName': comic['ComicName'],
'ComicYear': comic['ComicYear'],
'Comicid': comicid,
'ComicURL': comic['ComicURL'],
'ComicIssues': comic['ComicIssues'],
'ComicImage': comic['ComicImage'],
'ComicPublisher': comic['ComicPublisher']
})
comic['comicchoice'] = comicchoice
return comic
def GetIssuesInfo(comicid,dom):

View File

@ -41,7 +41,7 @@ def listFiles(dir,watchcomic):
subname = subname.replace('_', ' ')
if watchcomic.lower() in subname.lower():
if 'annual' in subname.lower():
print ("it's an annual - unsure how to proceed")
#print ("it's an annual - unsure how to proceed")
break
comicpath = os.path.join(basedir, item)
#print ( watchcomic + " - watchlist match on : " + comicpath)

View File

@ -45,8 +45,6 @@ def addComictoDB(comicid):
myDB = db.DBConnection()
# myDB.action('DELETE from blacklist WHERE ComicID=?', [comicid])
# We need the current minimal info in the database instantly
# so we don't throw a 500 error when we redirect to the artistPage
@ -88,7 +86,8 @@ def addComictoDB(comicid):
if gcdinfo == "No Match":
logger.warn("No matching result found for " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" )
updater.no_searchresults(comicid)
return
nomatch = "true"
return nomatch
logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] )
# print ("Series Published" + parseit.resultPublished)
#--End
@ -117,7 +116,6 @@ def addComictoDB(comicid):
if e.errno != errno.EEXIST:
raise
#print ("root dir for series: " + comlocation)
#try to account for CV not updating new issues as fast as GCD
#seems CV doesn't update total counts
#comicIssues = gcdinfo['totalissues']
@ -125,6 +123,7 @@ def addComictoDB(comicid):
comicIssues = str(int(comic['ComicIssues']) + 1)
else:
comicIssues = comic['ComicIssues']
controlValueDict = {"ComicID": comicid}
newValueDict = {"ComicName": comic['ComicName'],
"ComicSortName": sortname,
@ -196,7 +195,7 @@ def addComictoDB(comicid):
except IndexError:
#account for gcd variation here
if gcdinfo['gcdvariation'] == 'gcd':
print ("gcd-variation accounted for.")
#print ("gcd-variation accounted for.")
issdate = '0000-00-00'
int_issnum = int ( issis / 1000 )
break
@ -277,6 +276,11 @@ def addComictoDB(comicid):
logger.info(u"Updating complete for: " + comic['ComicName'])
# lets' check the pullist for anyting at this time as well since we're here.
#if mylar.AUTOWANT_UPCOMING:
# logger.info(u"Checking this week's pullist for new issues of " + str(comic['ComicName']))
# updater.newpullcheck()
#here we grab issues that have been marked as wanted above...
results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid])

View File

@ -98,12 +98,11 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
#print ( "comicyear: " + str(comicyr) )
#print ( "comichave: " + str(comicis) )
#print ( "comicid: " + str(comicid) )
comicnm = re.sub(' ', '%20', comicnm)
comicnm = re.sub(' ', '+', comicnm)
#input = 'http://www.comics.org/series/name/' + str(comicnm) + '/sort/alpha/'
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
cnt1 = len(soup.findAll("tr", {"class" : "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class" : "listing_odd"}))
@ -161,10 +160,11 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
#as GCD does. Therefore, let's increase the CV count by 1 to get it
#to match, any more variation could cause incorrect matching.
#ie. witchblade on GCD says 159 issues, CV states 161.
if resultIssues[n] == Total or resultIssues[n] == str(int(Total)+1) or str(int(resultIssues[n])+1) == Total:
if resultIssues[n] == str(int(Total)+1):
if int(resultIssues[n]) == int(Total) or int(resultIssues[n]) == int(Total)+1 or (int(resultIssues[n])+1) == int(Total):
#print ("initial issue match..continuing.")
if int(resultIssues[n]) == int(Total)+1:
issvariation = "cv"
elif str(int(resultIssues[n])+1) == Total:
elif int(resultIssues[n])+1 == int(Total):
issvariation = "gcd"
else:
issvariation = "no"
@ -181,7 +181,19 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
# it's possible that comicvine would return a comic name incorrectly, or gcd
# has the wrong title and won't match 100%...
# (ie. The Flash-2011 on comicvine is Flash-2011 on gcd)
# this section is to account for variations in spelling, punctuation, etc/
basnumbs = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10}
if resultURL is None:
#search for number as text, and change to numeric
for numbs in basnumbs:
#print ("numbs:" + str(numbs))
if numbs in ComicName.lower():
numconv = basnumbs[numbs]
#print ("numconv: " + str(numconv))
ComicNm = re.sub(str(numbs), str(numconv), ComicName.lower())
#print ("comicname-reVISED:" + str(ComicNm))
return GCDScraper(ComicNm, ComicYear, Total, ComicID)
break
if ComicName.startswith('The '):
ComicName = ComicName[4:]
return GCDScraper(ComicName, ComicYear, Total, ComicID)
@ -193,7 +205,7 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if 'and' in ComicName.lower():
ComicName = ComicName.replace('and', '&')
return GCDScraper(ComicName, ComicYear, Total, ComicID)
return GCDScraper(ComicName, ComicYear, Total, ComicID)
return 'No Match'
gcdinfo = {}
gcdchoice = []

View File

@ -34,37 +34,29 @@ import time
from datetime import datetime
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear):
#print ("ComicName:" + ComicName)
#print ("Issue:" + str(IssueNumber))
if ComicYear == None: ComicYear = '2012'
else: ComicYear = str(ComicYear)[:4]
#print ("ComicYear:" + str(ComicYear))
#print ("SeriesYear:" + str(SeriesYear))
##nzb provider selection##
##'dognzb' or 'nzb.su'
##'dognzb' or 'nzb.su' or 'experimental'
nzbprovider = []
nzbp = 0
if mylar.NZBSU == 1:
nzbprovider.append('nzb.su')
nzbp+=1
#print ("nzb.su search activated")
if mylar.DOGNZB == 1:
nzbprovider.append('dognzb')
nzbp+=1
#print ("dognzb search activated")
# --------
# Xperimental
if mylar.EXPERIMENTAL == 1:
nzbprovider.append('experimental')
nzbp+=1
#print ("Experimental raw search activated!")
# --------
nzbpr = nzbp-1
while (nzbpr >= 0 ):
if nzbprovider[nzbpr] == 'experimental':
#this is for experimental
nzbprov = 'experimental'
#print ("engaging experimental search for " + str(ComicName) + " " + str(IssueNumber))
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr)
if findit == 'yes':
break
@ -172,7 +164,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
if mylar.PREFERRED_QUALITY == 0: filetype = ""
elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr"
elif mylar.PREFERRED_QUALITY == 2: filetype = ".cbz"
# search dognzb via api!
# figure out what was missed via rss feeds and do a manual search via api
#tsc = int(tot-1)
@ -194,9 +185,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
# replace whitespace in comic name with %20 for api search
cm = re.sub(" ", "%20", str(findcomic[findcount]))
#print (cmi)
#---issue problem
# if issue is '011' instead of '11' in nzb search results, will not have same
# results. '011' will return different than '11', as will '009' and '09'.
if len(str(findcomiciss[findcount])) == 1:
cmloopit = 3
elif len(str(findcomiciss[findcount])) == 2:
@ -251,7 +239,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
pass
elif (len(bb['entries']) == 0):
#print ("Nothing found for : " + str(findcomic[findloop]) + " Issue: #" + str(findcomiciss[findloop]))
#print ("Will try search again in 60 minutes...")
foundc = "no"
else:
#print ("Found for: " + str(findcomic[findloop]))
@ -265,52 +252,98 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
#let's narrow search down - take out year (2010), (2011), etc
#let's check for first occurance of '(' as generally indicates
#that the 'title' has ended
comlen = str(cleantitle).find(' (')
comsub = str(cleantitle)[:comlen]
#print("first bracket occurs at position: " + str(comlen))
#print("actual name with iss: " + str(comsub))
#we need to now determine the last position BEFORE the issue number
#take length of findcomic (add 1 for space) and subtract comlen
#will result in issue
comspos = comsub.rfind(" ")
#print ("last space @ position: " + str(comspos) )
#print ("COMLEN: " + str(comlen) )
comiss = comsub[comspos:comlen]
# -- we need to change if there is no space after issue #
# -- and bracket ie...star trek tng 1(c2c)(2012) etc
# --
#print ("the comic issue is actually: #" + str(comiss))
splitit = []
splitcomp = []
comyx = comsub[:comspos]
#print ("comyx: " + str(comyx))
splitchk = comyx.replace(" - ", " ")
splitit = splitchk.split(None)
#print (str(splitit))
splitcomp = findcomic[findloop].split(None)
#print ( "split length:" + str(len(splitit)) )
if len(splitit) != len(splitcomp):
ripperlist=['digital-',
'empire',
'dcp']
#this takes care of the brackets :)
# m = re.findall(r"\((\w+)\)", cleantitle)
m = re.findall('[^()]+', cleantitle)
lenm = len(m)
#print ("there are " + str(lenm) + " words.")
cnt = 0
while (cnt < lenm):
if m[cnt] is None: break
if m[cnt] == ' ': print ("space detected")
#print (str(cnt) + ". Bracket Word: " + m[cnt] )
if cnt == 0:
comic_andiss = m[cnt]
#print ("Comic:" + str(comic_andiss))
if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
#print ("year detected!")
result_comyear = m[cnt]
if str(comyear) in result_comyear:
#print (str(comyear) + " - right - years match baby!")
yearmatch = "true"
else:
#print (str(comyear) + " - not right - years don't match ")
yearmatch = "false"
if 'digital' in m[cnt] and len(m[cnt]) == 7:
pass
#print ("digital edition")
if ' of ' in m[cnt]:
#print ("mini-series detected : " + str(m[cnt]))
result_of = m[cnt]
if 'cover' in m[cnt]:
#print ("covers detected")
result_comcovers = m[cnt]
for ripper in ripperlist:
if ripper in m[cnt]:
#print ("Scanner detected:" + str(m[cnt]))
result_comscanner = m[cnt]
cnt+=1
if yearmatch == "false": break
splitit = []
watchcomic_split = []
comic_iss = re.sub('[\-\:\,]', '', str(comic_andiss))
splitit = comic_iss.split(None)
watchcomic_split = findcomic[findloop].split(None)
bmm = re.findall('v\d', comic_iss)
#print ("vers - " + str(bmm))
if len(bmm) > 0: splitst = len(splitit) - 2
else: splitst = len(splitit) - 1
if (splitst) != len(watchcomic_split):
#print ("incorrect comic lengths...not a match")
if str(comyx[:3]).lower() == "the":
if str(splitit[0]).lower() == "the":
#print ("THE word detected...attempting to adjust pattern matching")
splitMOD = splitchk[4:]
splitit = splitMOD.split(None)
splitit[0] = splitit[4:]
else:
#print ("length match..proceeding")
n = 0
scount = 0
while ( n <= (len(splitit)-1) ):
#print ("Comparing: " + splitcomp[n] + " .to. " + splitit[n] )
if str(splitcomp[n].lower()) in str(splitit[n].lower()):
#print ("word matched on : " + splitit[n])
scount+=1
elif ':' in splitit[n] or '-' in splitit[n]:
splitrep = splitit[n].replace('-', '')
#print ("non-character keyword...skipped on " + splitit[n])
pass
#print ("length:" + str(len(splitit)))
while ( n <= len(splitit)-1 ):
if n < len(splitit)-1:
#print ( str(n) + ". Comparing: " + watchcomic_split[n] + " .to. " + splitit[n] )
if str(watchcomic_split[n].lower()) in str(splitit[n].lower()):
#print ("word matched on : " + splitit[n])
scount+=1
#elif ':' in splitit[n] or '-' in splitit[n]:
# splitrep = splitit[n].replace('-', '')
# print ("non-character keyword...skipped on " + splitit[n])
elif len(splitit[n]) < 3 or (splitit[n][1:]) == "v":
#print ("possible verisoning..checking")
#we hit a versioning # - account for it
if splitit[n][2:].isdigit():
comicversion = str(splitit[n])
#print ("version found:" + str(comicversion))
else:
#print ("non-match for: " + splitit[n])
pass
if splitit[n].isdigit():
#print ("issue detected")
comiss = splitit[n]
comicNAMER = n - 1
comNAME = splitit[0]
cmnam = 1
while (cmnam < comicNAMER):
comNAME = str(comNAME) + " " + str(splitit[cmnam])
cmnam+=1
#print ("comic: " + str(comNAME))
else:
#print ("non-match for: " + splitit[n])
pass
n+=1
spercent = ( scount/int(len(splitit)) ) * 100
#print (str(spercent) + "% match")
@ -318,52 +351,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
#if spercent < 75: print ("failure - we only got " + str(spercent) + "% right!")
#print ("this should be a match!")
#issue comparison now as well
#print ("comiss:" + str(comiss))
#print ("findcomiss:" + str(findcomiciss[findloop]))
if int(findcomiciss[findloop]) == int(comiss):
#print ("issues match!")
#check for 'extra's - ie. Year
comex = str(cleantitle)[comlen:]
comspl = comex.split()
LENcomspl = len(comspl)
n = 0
while (LENcomspl > n):
if str(comyear) not in comspl[n]:
#print (str(comyear) + " - not right year baby!")
yearmatch = "false"
break
else:
#print (str(comyear) + " - years match baby!")
yearmatch = "true"
break
n+=1
if yearmatch == "false": break
## -- start.
## -- start.
## -- inherit issue. Comic year is non-standard. nzb year is the year
## -- comic was printed, not the start year of the comic series and
## -- thus the deciding component if matches are correct or not
## -- check to see if directory exists for given comic
#splitcom = ComicName.replace(' ', '_')
# here we should decide if adding year or not and format
#comyear = '_(2012)'
#compath = '/mount/mediavg/Comics/Comics/' + str(splitcom) + str(comyear)
#print ("The directory should be: " + str(compath))
#if os.path.isdir(str(compath)):
# print("Directory exists!")
#else:
# print ("Directory doesn't exist!")
# try:
# os.makedirs(str(compath))
# print ("Directory successfully created at: " + str(compath))
# except OSError.e:
# if e.errno != errno.EEXIST:
# raise
## -- end.
linkstart = os.path.splitext(entry['link'])[0]
print ("linkstart:" + str(linkstart))
#following is JUST for nzb.su
if nzbprov == 'nzb.su':
linkit = os.path.splitext(entry['link'])[1]
@ -375,7 +369,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
linkstart = linkstart.replace("&", "%26")
linkapi = str(linkstart)
#here we distinguish between rename and not.
#blackhole functionality---
#blackhole functinality---
#let's download the file to a temporary cache.
if mylar.BLACKHOLE:
@ -386,24 +380,32 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
#end blackhole
else:
tmppath = "cache/"
if nzbprov == 'nzb.su':
filenzb = linkstart[21:]
elif nzbprov == 'experimental':
filenzb = os.path.splitext(linkapi)[0][31:]
elif nzbprov == 'dognzb':
filenamenzb = os.path.splitext(linkapi)[0][23:]
lenfilenzb = filenamenzb.find('/', 23)
filenzb = str(filenamenzb)[:lenfilenzb]
tmppath = mylar.CACHE_DIR
if os.path.exists(tmppath):
savefile = str(mylar.PROG_DIR) + "/" + str(tmppath) + str(filenzb) + ".nzb"
filenamenzb = os.path.split(linkapi)[1]
#filenzb = os.path.join(tmppath,filenamenzb)
if nzbprov == 'nzb.su':
filenzb = linkstart[21:]
if nzbprov == 'experimental':
filenzb = filenamenzb[6:]
if nzbprov == 'dognzb':
filenzb == str(filenamenzb)
savefile = str(tmppath) + "/" + str(filenzb) + ".nzb"
else:
savefile = str(mylar.PROG_DIR) + "/" + str(filenzb) + ".nzb"
print ("savefile:" + str(savefile))
#let's make the dir.
try:
os.makedirs(str(mylar.CACHE_DIR))
logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))
savefile = str(mylar.CACHE_DIR) + "/" + str(filenzb) + ".nzb"
except OSError.e:
if e.errno != errno.EEXIST:
raise
urllib.urlretrieve(linkapi, str(savefile))
print ("retrieved file ")
#check sab for current pause status
#print (str(mylar.RENAME_FILES))
#check sab for current pause status
sabqstatusapi = str(mylar.SAB_HOST) + "/api?mode=qstatus&output=xml&apikey=" + str(mylar.SAB_APIKEY)
from xml.dom.minidom import parseString
import urllib2
@ -412,41 +414,46 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
file.close()
dom = parseString(data)
for node in dom.getElementsByTagName('paused'):
pausestatus = node.firstChild.wholeText
print pausestatus
pausestatus = node.firstChild.wholeText
#print pausestatus
if pausestatus != 'True':
#pause sab first because it downloads too quick (cbr's are small!)
#pause sab first because it downloads too quick (cbr's are small!)
pauseapi = str(mylar.SAB_HOST) + "/api?mode=pause&apikey=" + str(mylar.SAB_APIKEY)
urllib.urlopen(pauseapi);
#print "Queue paused"
#else:
#print "Queue already paused"
#print "Queue already paused"
if mylar.RENAME_FILES == 1:
#print ("Saved file to: " + str(savefile))
tmpapi = str(mylar.SAB_HOST) + "/api?mode=addlocalfile&name=" + str(savefile) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY)
else:
tmpapi = str(mylar.SAB_HOST) + "/api?mode=addurl&name=" + str(linkapi) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY)
#print (str(tmpapi))
time.sleep(5)
urllib.urlopen(tmpapi);
print ("sent file to sab:" + str(tmpapi))
if mylar.RENAME_FILES == 1:
#let's give it 5 extra seconds to retrieve the nzb data...
time.sleep(5)
outqueue = str(mylar.SAB_HOST) + "/api?mode=queue&start=START&limit=LIMIT&output=xml&apikey=" + str(mylar.SAB_APIKEY)
#print ("outqueue line generated")
urllib.urlopen(outqueue);
time.sleep(5)
#print ("passed api request to SAB")
#<slots><slot><filename>.nzb filename
#chang nzbfilename to include series(SAB will auto rename based on this)
#api?mode=queue&name=rename&value=<filename_nzi22ks>&value2=NEWNAME
from xml.dom.minidom import parseString
import urllib2
file = urllib2.urlopen(outqueue);
data = file.read()
file.close()
dom = parseString(data)
queue_slots = dom.getElementsByTagName('filename')
queue_cnt = len(queue_slots)
print ("there are " + str(queue_cnt) + " things in SABnzbd's queue")
#print ("there are " + str(queue_cnt) + " things in SABnzbd's queue")
que = 0
slotmatch = "no"
for queue in queue_slots:
@ -461,27 +468,33 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
file.close()
dom = parseString(data)
queue_file = dom.getElementsByTagName('filename')[que].firstChild.wholeText
print ("queue File:" + str(queue_file))
print ("nzb File: " + str(filenzb))
queue_file = queue_file.replace("_", " ")
#print (str(queue_file))
#print (str(filenzb))
queue_file = queue_file.replace("_", " ")
if str(queue_file) in str(filenzb):
print ("matched")
#print ("matched")
slotmatch = "yes"
slot_nzoid = dom.getElementsByTagName('nzo_id')[que].firstChild.wholeText
print ("slot_nzoid: " + str(slot_nzoid))
#print ("slot_nzoid: " + str(slot_nzoid))
break
que+=1
if slotmatch == "yes":
renameit = str(ComicName.replace(' ', '_')) + "_" + str(IssueNumber) + "_(" + str(SeriesYear) + ")" + "_" + "(" + str(comyear) + ")"
if mylar.REPLACE_SPACES:
repchar = mylar.REPLACE_CHAR
else:
repchar = ' '
#let's make sure there's no crap in the ComicName since it's O.G.
ComicNM = re.sub('[\:\,]', '', str(ComicName))
renameit = str(ComicNM) + " " + str(IssueNumber) + " (" + str(SeriesYear) + ")" + " " + "(" + str(comyear) + ")"
renameit = renameit.replace(' ', repchar)
nzo_ren = str(mylar.SAB_HOST) + "/api?mode=queue&name=rename&apikey=" + str(mylar.SAB_APIKEY) + "&value=" + str(slot_nzoid) + "&value2=" + str(renameit)
print ("attempting to rename queue to " + str(nzo_ren))
#print ("attempting to rename queue to " + str(nzo_ren))
urllib2.urlopen(nzo_ren);
print ("renamed!")
#print ("renamed!")
#delete the .nzb now.
#delnzb = str(mylar.PROG_DIR) + "/" + str(filenzb) + ".nzb"
if mylar.PROG_DIR is not "/":
os.remove(savefile)
print ("removed :" + str(savefile))
#if mylar.PROG_DIR is not "/":
#os.remove(delnzb)
#we need to track nzo_id to make sure finished downloaded with SABnzbd.
#controlValueDict = {"nzo_id": str(slot_nzoid)}
#newValueDict = {"ComicName": str(ComicName),
@ -508,27 +521,19 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
else:
#print ("issues don't match..")
foundc = "no"
#else:
#print ("this probably isn't the right match as the titles don't match")
#foundcomic.append("no")
#foundc = "no"
if done == True: break
cmloopit-=1
findloop+=1
if foundc == "yes":
foundcomic.append("yes")
#print ("we just found Issue: " + str(IssueNumber) + " of " + str(ComicName) + "(" + str(comyear) + ")" )
logger.info(u"Found :" + str(ComicName) + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(nzbprov))
break
elif foundc == "no" and nzbpr <> 0:
logger.info(u"More than one search provider given - trying next one.")
#print ("Couldn't find with " + str(nzbprov) + ". More than one search provider listed, trying next option" )
elif foundc == "no" and nzbpr == 0:
foundcomic.append("no")
#print ("couldn't find Issue " + str(IssueNumber) + " of " + str(ComicName) + "(" + str(comyear) + ")" )
logger.info(u"Couldn't find Issue " + str(IssueNumber) + " of " + str(ComicName) + "(" + str(comyear) + "). Status kept as wanted." )
break
#print (foundc)
return foundc
def searchforissue(issueid=None, new=False):
@ -560,7 +565,6 @@ def searchforissue(issueid=None, new=False):
pass
#print ("not found!")
else:
#print ("attempting to configure search parameters...")
result = myDB.action('SELECT * FROM issues where IssueID=?', [issueid]).fetchone()
ComicID = result['ComicID']
comic = myDB.action('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone()
@ -572,7 +576,6 @@ def searchforissue(issueid=None, new=False):
foundNZB = "none"
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
#print ("entering search parameters...")
foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'])
if foundNZB == "yes":
#print ("found!")

View File

@ -69,6 +69,11 @@ def weekly_update(ComicName):
newValue = {"STATUS": "Skipped"}
myDB.upsert("weekly", newValue, controlValue)
def newpullcheck():
# When adding a new comic, let's check for new issues on this week's pullist and update.
weeklypull.pullitcheck()
return
def no_searchresults(ComicID):
# when there's a mismatch between CV & GCD - let's change the status to
# something other than 'Loaded'
@ -88,7 +93,7 @@ def foundsearch(ComicID, IssueID):
#fixed and addressed in search.py and follow-thru here!
#check sab history for completion here :)
CYear = issue['IssueDate'][:4]
print ("year:" + str(CYear))
#print ("year:" + str(CYear))
#slog = myDB.action('SELECT * FROM sablog WHERE ComicName=? AND ComicYEAR=?', [issue['ComicName'], str(CYear)]).fetchone()
#this checks the active queue for downloading/non-existant jobs
#--end queue check
@ -136,13 +141,19 @@ def forceRescan(ComicID):
fcnew = []
n = 0
reissues = myDB.action('SELECT * FROM issues WHERE ComicID=?', [ComicID]).fetchall()
while (n < iscnt):
reiss = reissues[n]
while (n <= iscnt):
try:
reiss = reissues[n]
except IndexError:
break
int_iss = reiss['Int_IssueNumber']
fn = 0
haveissue = "no"
while (fn < fccnt):
tmpfc = fc['comiclist'][fn]
while (fn < fccnt):
try:
tmpfc = fc['comiclist'][fn]
except IndexError:
break
temploc = tmpfc['ComicFilename'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
if 'annual' not in temploc:

View File

@ -118,7 +118,9 @@ class WebInterface(object):
def deleteArtist(self, ComicID):
myDB = db.DBConnection()
comic = myDB.action('SELECT * from comics WHERE ComicID=?', [ComicID]).fetchone()
logger.info(u"Deleting all traces of Comic: " + comic['ComicName'])
if comic['ComicName'] is None: ComicName = "None"
else: ComicName = comic['ComicName']
logger.info(u"Deleting all traces of Comic: " + str(ComicName))
myDB.action('DELETE from comics WHERE ComicID=?', [ComicID])
myDB.action('DELETE from issues WHERE ComicID=?', [ComicID])
raise cherrypy.HTTPRedirect("home")
@ -297,6 +299,10 @@ class WebInterface(object):
#mvcontroldict = {"ComicID": mvupcome['ComicID']}
return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming, issues=issues)
upcoming.exposed = True
def searchScan(self, name):
return serve_template(templatename="searchfix.html", title="Manage", name=name)
searchScan.exposed = True
def manage(self):
return serve_template(templatename="manage.html", title="Manage")
@ -311,7 +317,7 @@ class WebInterface(object):
def manageIssues(self):
myDB = db.DBConnection()
issues = myDB.select('SELECT * from issues')
return serve_template(templatename="manageissus.html", title="Manage Issues", issues=issues)
return serve_template(templatename="manageissues.html", title="Manage Issues", issues=issues)
manageIssues.exposed = True
def manageNew(self):

View File

@ -123,6 +123,7 @@ def pullit():
#print ("today: " + str(pulldate))
if pulldate == shipdaterep:
logger.info(u"No new pull-list available - will re-check again in 24 hours.")
pullitcheck()
return
break
else: