2012-09-13 15:27:34 +00:00
|
|
|
# This file is part of Mylar.
|
|
|
|
#
|
|
|
|
# Mylar is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# Mylar is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2013-01-01 20:09:28 +00:00
|
|
|
from __future__ import division
|
2012-09-13 15:27:34 +00:00
|
|
|
|
|
|
|
import mylar
|
2012-12-16 17:57:02 +00:00
|
|
|
from mylar import logger, db, updater, helpers, parseit, findcomicfeed
|
2012-09-13 15:27:34 +00:00
|
|
|
|
|
|
|
nzbsu_APIkey = mylar.NZBSU_APIKEY
|
|
|
|
dognzb_APIkey = mylar.DOGNZB_APIKEY
|
|
|
|
|
|
|
|
LOG = mylar.LOG_DIR
|
|
|
|
|
2012-12-16 17:57:02 +00:00
|
|
|
import pickle
|
2012-09-13 15:27:34 +00:00
|
|
|
import lib.feedparser as feedparser
|
|
|
|
import urllib
|
|
|
|
import os, errno
|
|
|
|
import string
|
|
|
|
import sqlite3 as lite
|
|
|
|
import sys
|
|
|
|
import getopt
|
|
|
|
import re
|
|
|
|
import time
|
2012-09-27 16:11:10 +00:00
|
|
|
from xml.dom.minidom import parseString
|
|
|
|
import urllib2
|
2012-09-13 15:27:34 +00:00
|
|
|
from datetime import datetime
|
|
|
|
|
2012-12-31 16:52:16 +00:00
|
|
|
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueID, AlternateSearch=None):
|
2012-12-20 10:39:37 +00:00
|
|
|
if ComicYear == None: ComicYear = '2013'
|
2012-09-13 15:27:34 +00:00
|
|
|
else: ComicYear = str(ComicYear)[:4]
|
|
|
|
##nzb provider selection##
|
2012-09-24 05:17:29 +00:00
|
|
|
##'dognzb' or 'nzb.su' or 'experimental'
|
2012-09-13 15:27:34 +00:00
|
|
|
nzbprovider = []
|
|
|
|
nzbp = 0
|
|
|
|
if mylar.NZBSU == 1:
|
|
|
|
nzbprovider.append('nzb.su')
|
|
|
|
nzbp+=1
|
|
|
|
if mylar.DOGNZB == 1:
|
|
|
|
nzbprovider.append('dognzb')
|
|
|
|
nzbp+=1
|
|
|
|
# --------
|
|
|
|
# Xperimental
|
|
|
|
if mylar.EXPERIMENTAL == 1:
|
|
|
|
nzbprovider.append('experimental')
|
|
|
|
nzbp+=1
|
2012-12-16 17:57:02 +00:00
|
|
|
if mylar.NEWZNAB == 1:
|
2013-01-06 17:16:27 +00:00
|
|
|
logger.fdebug("mylar.newznab:" + str(mylar.NEWZNAB))
|
2012-12-22 03:33:29 +00:00
|
|
|
if mylar.NEWZNAB_ENABLED:
|
2013-01-06 17:16:27 +00:00
|
|
|
newznab_hosts = [(mylar.NEWZNAB_HOST, mylar.NEWZNAB_APIKEY, mylar.NEWZNAB_ENABLED)]
|
|
|
|
logger.fdebug("newznab_hosts:" + str(newznab_hosts))
|
|
|
|
logger.fdebug("newznab_enabled:" + str(mylar.NEWZNAB_ENABLED))
|
2013-01-07 10:11:03 +00:00
|
|
|
newznabs = 1
|
2013-01-06 17:16:27 +00:00
|
|
|
else:
|
|
|
|
newznab_hosts = []
|
|
|
|
logger.fdebug("initial newznab provider not enabled...checking for additional newznabs.")
|
2013-01-07 10:11:03 +00:00
|
|
|
newznabs = 0
|
2013-01-06 17:16:27 +00:00
|
|
|
|
|
|
|
logger.fdebug("mylar.EXTRA_NEWZNABS:" + str(mylar.EXTRA_NEWZNABS))
|
2012-12-16 17:57:02 +00:00
|
|
|
|
|
|
|
for newznab_host in mylar.EXTRA_NEWZNABS:
|
|
|
|
if newznab_host[2] == '1' or newznab_host[2] == 1:
|
2013-01-07 10:11:03 +00:00
|
|
|
# nzbprovider.append('newznab')
|
|
|
|
# nzbp+=1
|
2012-12-16 17:57:02 +00:00
|
|
|
newznab_hosts.append(newznab_host)
|
|
|
|
newznabs = newznabs + 1
|
2013-01-06 17:16:27 +00:00
|
|
|
logger.fdebug("newznab hosts:" + str(newznab_host))
|
|
|
|
|
|
|
|
# print("newznab_nzbp-1:" + str(nzbprovider(nzbp-1)))
|
|
|
|
# print("newznab_nzbp:" + str(nzbprovider(nzbp)))
|
|
|
|
if mylar.NEWZNAB_ENABLED and 'newznab' not in nzbprovider:
|
|
|
|
nzbprovider.append('newznab')
|
|
|
|
nzbp+=1
|
|
|
|
|
2012-12-16 17:57:02 +00:00
|
|
|
|
|
|
|
#categories = "7030"
|
|
|
|
|
|
|
|
#for newznab_host in newznab_hosts:
|
|
|
|
# mylar.NEWZNAB_APIKEY = newznab_host[1]
|
|
|
|
# mylar.NEWZNAB_HOST = newznab_host[0]
|
|
|
|
|
2012-09-13 15:27:34 +00:00
|
|
|
# --------
|
2013-01-07 10:11:03 +00:00
|
|
|
providercount = int(nzbp + newznabs)
|
|
|
|
logger.fdebug("there are : " + str(providercount) + " search providers you have selected.")
|
2012-09-13 15:27:34 +00:00
|
|
|
nzbpr = nzbp-1
|
2012-09-30 13:52:59 +00:00
|
|
|
findit = 'no'
|
2012-10-09 06:33:14 +00:00
|
|
|
|
2012-10-16 08:16:29 +00:00
|
|
|
#fix for issue dates between Nov-Dec/Jan
|
2012-10-09 06:33:14 +00:00
|
|
|
IssDt = str(IssueDate)[5:7]
|
2012-10-16 08:16:29 +00:00
|
|
|
if IssDt == "12" or IssDt == "11":
|
2012-10-09 06:33:14 +00:00
|
|
|
IssDateFix = "yes"
|
|
|
|
else:
|
|
|
|
IssDateFix = "no"
|
2012-10-16 08:16:29 +00:00
|
|
|
|
2012-09-13 15:27:34 +00:00
|
|
|
while (nzbpr >= 0 ):
|
2012-12-16 17:57:02 +00:00
|
|
|
|
|
|
|
if nzbprovider[nzbpr] == 'newznab':
|
|
|
|
#this is for newznab
|
|
|
|
nzbprov = 'newznab'
|
|
|
|
for newznab_host in newznab_hosts:
|
2013-01-06 17:16:27 +00:00
|
|
|
logger.fdebug("using newznab_host: " + str(newznab_host))
|
2012-12-16 17:57:02 +00:00
|
|
|
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, newznab_host)
|
|
|
|
if findit == 'yes':
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("findit = found!")
|
2013-01-02 17:56:46 +00:00
|
|
|
break
|
2012-12-16 17:57:02 +00:00
|
|
|
else:
|
2012-12-31 16:52:16 +00:00
|
|
|
if AlternateSearch is not None:
|
|
|
|
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AlternateSearch) + " " + str(ComicYear))
|
|
|
|
findit = NZB_SEARCH(AlternateSearch, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, newznab_host)
|
|
|
|
if findit == 'yes':
|
|
|
|
break
|
|
|
|
|
2012-12-16 17:57:02 +00:00
|
|
|
nzbpr-=1
|
|
|
|
|
2012-12-20 10:39:37 +00:00
|
|
|
elif nzbprovider[nzbpr] == 'experimental':
|
2012-09-13 15:27:34 +00:00
|
|
|
#this is for experimental
|
|
|
|
nzbprov = 'experimental'
|
2012-10-30 10:43:01 +00:00
|
|
|
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID)
|
2012-09-13 15:27:34 +00:00
|
|
|
if findit == 'yes':
|
2012-12-31 16:52:16 +00:00
|
|
|
logger.fdebug("findit = found!")
|
2013-01-02 17:56:46 +00:00
|
|
|
break
|
2012-09-13 15:27:34 +00:00
|
|
|
else:
|
2012-12-31 16:52:16 +00:00
|
|
|
if AlternateSearch is not None:
|
2013-01-06 08:51:44 +00:00
|
|
|
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AlternateSearch) + " " + str(ComicYear))
|
2012-12-31 16:52:16 +00:00
|
|
|
findit = NZB_SEARCH(AlternateSearch, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID)
|
|
|
|
if findit == 'yes':
|
|
|
|
break
|
|
|
|
|
2012-10-09 06:33:14 +00:00
|
|
|
nzbpr-=1
|
2012-09-13 15:27:34 +00:00
|
|
|
|
2012-12-20 10:39:37 +00:00
|
|
|
elif nzbprovider[nzbpr] == 'nzb.su':
|
2012-09-13 15:27:34 +00:00
|
|
|
# this is for nzb.su
|
|
|
|
nzbprov = 'nzb.su'
|
2012-10-30 10:43:01 +00:00
|
|
|
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID)
|
2012-09-13 15:27:34 +00:00
|
|
|
if findit == 'yes':
|
2012-12-31 16:52:16 +00:00
|
|
|
logger.fdebug("findit = found!")
|
2013-01-02 17:56:46 +00:00
|
|
|
break
|
2012-09-13 15:27:34 +00:00
|
|
|
else:
|
2012-12-31 16:52:16 +00:00
|
|
|
if AlternateSearch is not None:
|
2013-01-06 08:51:44 +00:00
|
|
|
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AlternateSearch) + " " + str(ComicYear))
|
2012-12-31 16:52:16 +00:00
|
|
|
findit = NZB_SEARCH(AlternateSearch, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID)
|
|
|
|
if findit == 'yes':
|
|
|
|
break
|
2012-10-09 06:33:14 +00:00
|
|
|
|
|
|
|
nzbpr-=1
|
2012-10-16 08:16:29 +00:00
|
|
|
|
2012-09-13 15:27:34 +00:00
|
|
|
# ----
|
|
|
|
|
|
|
|
elif nzbprovider[nzbpr] == 'dognzb':
|
|
|
|
# this is for dognzb.com
|
|
|
|
nzbprov = 'dognzb'
|
2012-10-30 10:43:01 +00:00
|
|
|
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID)
|
2012-12-31 16:52:16 +00:00
|
|
|
|
2012-09-13 15:27:34 +00:00
|
|
|
if findit == 'yes':
|
2012-12-31 16:52:16 +00:00
|
|
|
logger.fdebug("findit = found!")
|
2013-01-02 17:56:46 +00:00
|
|
|
break
|
2012-09-13 15:27:34 +00:00
|
|
|
else:
|
2012-12-31 16:52:16 +00:00
|
|
|
if AlternateSearch is not None:
|
|
|
|
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AlternateSearch) + str(ComicYear))
|
|
|
|
findit = NZB_SEARCH(AlternateSearch, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID)
|
|
|
|
if findit == 'yes':
|
|
|
|
break
|
2012-10-09 06:33:14 +00:00
|
|
|
|
|
|
|
nzbpr-=1
|
2012-10-16 08:16:29 +00:00
|
|
|
|
2012-12-23 18:16:25 +00:00
|
|
|
if nzbpr >= 0 and findit != 'yes':
|
|
|
|
logger.info(u"More than one search provider given - trying next one.")
|
2012-09-13 15:27:34 +00:00
|
|
|
# ----
|
2012-12-20 10:39:37 +00:00
|
|
|
if findit == 'yes': return findit
|
2012-09-13 15:27:34 +00:00
|
|
|
return findit
|
|
|
|
|
2012-12-16 17:57:02 +00:00
|
|
|
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, newznab_host=None):
|
|
|
|
logger.info(u"Shhh be very quiet...I'm looking for " + ComicName + " issue: " + str(IssueNumber) + "(" + str(ComicYear) + ") using " + str(nzbprov))
|
2012-09-13 15:27:34 +00:00
|
|
|
if nzbprov == 'nzb.su':
|
|
|
|
apikey = mylar.NZBSU_APIKEY
|
|
|
|
elif nzbprov == 'dognzb':
|
|
|
|
apikey = mylar.DOGNZB_APIKEY
|
|
|
|
elif nzbprov == 'experimental':
|
|
|
|
apikey = 'none'
|
2012-12-16 17:57:02 +00:00
|
|
|
elif nzbprov == 'newznab':
|
|
|
|
host_newznab = newznab_host[0]
|
|
|
|
apikey = newznab_host[1]
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("using Newznab host of : " + str(host_newznab))
|
2012-09-13 15:27:34 +00:00
|
|
|
|
|
|
|
if mylar.PREFERRED_QUALITY == 0: filetype = ""
|
|
|
|
elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr"
|
|
|
|
elif mylar.PREFERRED_QUALITY == 2: filetype = ".cbz"
|
|
|
|
|
2012-09-30 13:52:59 +00:00
|
|
|
if mylar.SAB_PRIORITY:
|
|
|
|
if mylar.SAB_PRIORITY == 1: sabpriority = "-100"
|
|
|
|
elif mylar.SAB_PRIORITY == 2: sabpriority = "-1"
|
|
|
|
elif mylar.SAB_PRIORITY == 3: sabpriority = "0"
|
|
|
|
elif mylar.SAB_PRIORITY == 4: sabpriority = "1"
|
|
|
|
elif mylar.SAB_PRIORITY == 5: sabpriority = "-2"
|
|
|
|
else:
|
|
|
|
#if sab priority isn't selected, default to Normal (0)
|
|
|
|
sabpriority = "0"
|
|
|
|
|
2012-09-13 15:27:34 +00:00
|
|
|
# figure out what was missed via rss feeds and do a manual search via api
|
|
|
|
#tsc = int(tot-1)
|
|
|
|
findcomic = []
|
|
|
|
findcomiciss = []
|
|
|
|
findcount = 0
|
|
|
|
ci = ""
|
|
|
|
comsearch = []
|
|
|
|
isssearch = []
|
|
|
|
comyear = str(ComicYear)
|
|
|
|
|
|
|
|
#print ("-------SEARCH FOR MISSING------------------")
|
|
|
|
findcomic.append(str(ComicName))
|
2012-12-31 16:52:16 +00:00
|
|
|
# this should be called elsewhere..redudant code.
|
|
|
|
if '.' in IssueNumber:
|
|
|
|
isschk_find = IssueNumber.find('.')
|
|
|
|
isschk_b4dec = IssueNumber[:isschk_find]
|
|
|
|
isschk_decval = IssueNumber[isschk_find+1:]
|
|
|
|
logger.fdebug("IssueNumber: " + str(IssueNumber))
|
|
|
|
logger.fdebug("..before decimal: " + str(isschk_b4dec))
|
|
|
|
logger.fdebug("...after decimal: " + str(isschk_decval))
|
|
|
|
#--let's make sure we don't wipe out decimal issues ;)
|
|
|
|
if int(isschk_decval) == 0:
|
|
|
|
iss = isschk_b4dec
|
|
|
|
intdec = int(isschk_decval)
|
|
|
|
else:
|
|
|
|
if len(isschk_decval) == 1:
|
|
|
|
iss = isschk_b4dec + "." + isschk_decval
|
|
|
|
intdec = int(isschk_decval) * 10
|
|
|
|
else:
|
|
|
|
iss = isschk_b4dec + "." + isschk_decval.rstrip('0')
|
|
|
|
intdec = int(isschk_decval.rstrip('0')) * 10
|
|
|
|
|
|
|
|
logger.fdebug("let's search with this issue value: " + str(iss))
|
|
|
|
#Issue_Number = carry-over with decimals
|
|
|
|
#iss = clean issue number (no decimals)
|
|
|
|
intIss = (int(isschk_b4dec) * 1000) + intdec
|
|
|
|
logger.fdebug("int.issue :" + str(intIss))
|
|
|
|
logger.fdebug("int.issue_b4: " + str(isschk_b4dec))
|
|
|
|
logger.fdebug("int.issue_dec: " + str(intdec))
|
|
|
|
IssueNumber = iss
|
|
|
|
#issue_decimal = re.compile(r'[^\d.]+')
|
|
|
|
#issue = issue_decimal.sub('', str(IssueNumber))
|
|
|
|
findcomiciss.append(iss)
|
|
|
|
|
2012-09-13 15:27:34 +00:00
|
|
|
#print ("we need : " + str(findcomic[findcount]) + " issue: #" + str(findcomiciss[findcount]))
|
|
|
|
# replace whitespace in comic name with %20 for api search
|
2012-10-22 07:08:55 +00:00
|
|
|
cm1 = re.sub(" ", "%20", str(findcomic[findcount]))
|
|
|
|
cm = re.sub("\&", "%26", str(cm1))
|
2012-09-13 15:27:34 +00:00
|
|
|
#print (cmi)
|
2013-01-01 20:09:28 +00:00
|
|
|
if '.' in findcomiciss[findcount]:
|
|
|
|
if len(str(isschk_b4dec)) == 3:
|
|
|
|
cmloopit = 1
|
|
|
|
elif len(str(isschk_b4dec)) == 2:
|
|
|
|
cmloopit = 2
|
|
|
|
elif len(str(isschk_b4dec)) == 1:
|
|
|
|
cmloopit = 3
|
2012-09-13 15:27:34 +00:00
|
|
|
else:
|
2013-01-01 20:09:28 +00:00
|
|
|
if len(str(findcomiciss[findcount])) == 1:
|
|
|
|
cmloopit = 3
|
|
|
|
elif len(str(findcomiciss[findcount])) == 2:
|
|
|
|
cmloopit = 2
|
|
|
|
else:
|
|
|
|
cmloopit = 1
|
2012-09-13 15:27:34 +00:00
|
|
|
isssearch.append(str(findcomiciss[findcount]))
|
|
|
|
comsearch.append(cm)
|
|
|
|
findcount+=1
|
|
|
|
|
|
|
|
# ----
|
|
|
|
|
|
|
|
#print ("------RESULTS OF SEARCH-------------------")
|
|
|
|
findloop = 0
|
|
|
|
foundcomic = []
|
2013-01-01 20:09:28 +00:00
|
|
|
done = False
|
2012-09-13 15:27:34 +00:00
|
|
|
#---issue problem
|
|
|
|
# if issue is '011' instead of '11' in nzb search results, will not have same
|
|
|
|
# results. '011' will return different than '11', as will '009' and '09'.
|
|
|
|
|
|
|
|
while (findloop < (findcount) ):
|
|
|
|
comsrc = comsearch[findloop]
|
|
|
|
while (cmloopit >= 1 ):
|
2013-01-01 20:09:28 +00:00
|
|
|
if done is True:
|
|
|
|
logger.fdebug("we should break out now - sucessful search previous")
|
|
|
|
findloop == 99
|
|
|
|
break
|
2012-09-13 15:27:34 +00:00
|
|
|
# here we account for issue pattern variations
|
|
|
|
if cmloopit == 3:
|
|
|
|
comsearch[findloop] = comsrc + "%2000" + isssearch[findloop] + "%20" + str(filetype)
|
|
|
|
elif cmloopit == 2:
|
|
|
|
comsearch[findloop] = comsrc + "%200" + isssearch[findloop] + "%20" + str(filetype)
|
|
|
|
elif cmloopit == 1:
|
|
|
|
comsearch[findloop] = comsrc + "%20" + isssearch[findloop] + "%20" + str(filetype)
|
2013-01-01 20:09:28 +00:00
|
|
|
logger.fdebug("comsearch: " + str(comsearch))
|
|
|
|
logger.fdebug("cmloopit: " + str(cmloopit))
|
|
|
|
logger.fdebug("done: " + str(done))
|
2012-09-13 15:27:34 +00:00
|
|
|
if nzbprov != 'experimental':
|
|
|
|
if nzbprov == 'dognzb':
|
|
|
|
findurl = "http://dognzb.cr/api?t=search&apikey=" + str(apikey) + "&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030"
|
|
|
|
elif nzbprov == 'nzb.su':
|
2012-12-22 03:33:29 +00:00
|
|
|
findurl = "http://www.nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
|
2012-12-16 17:57:02 +00:00
|
|
|
elif nzbprov == 'newznab':
|
2012-12-20 10:39:37 +00:00
|
|
|
findurl = str(host_newznab) + "api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
|
|
|
|
logger.fdebug("search-url: " + str(findurl))
|
2012-09-13 15:27:34 +00:00
|
|
|
bb = feedparser.parse(findurl)
|
|
|
|
elif nzbprov == 'experimental':
|
2012-12-16 17:57:02 +00:00
|
|
|
#bb = parseit.MysterBinScrape(comsearch[findloop], comyear)
|
|
|
|
bb = findcomicfeed.Startit(cm, isssearch[findloop], comyear)
|
|
|
|
# since the regexs in findcomicfeed do the 3 loops, lets force the exit after
|
|
|
|
cmloopit == 1
|
2012-09-13 15:27:34 +00:00
|
|
|
done = False
|
|
|
|
foundc = "no"
|
2012-12-16 17:57:02 +00:00
|
|
|
log2file = ""
|
2012-09-13 15:27:34 +00:00
|
|
|
if bb == "no results":
|
|
|
|
pass
|
|
|
|
foundc = "no"
|
|
|
|
else:
|
|
|
|
for entry in bb['entries']:
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("checking search result: " + str(entry['title']))
|
2012-12-16 17:57:02 +00:00
|
|
|
thisentry = str(entry['title'])
|
|
|
|
logger.fdebug("Entry: " + str(thisentry))
|
2012-12-20 10:39:37 +00:00
|
|
|
cleantitle = re.sub('[_/.]', ' ', str(entry['title']))
|
2012-09-27 16:11:10 +00:00
|
|
|
cleantitle = helpers.cleanName(str(cleantitle))
|
2012-10-30 10:43:01 +00:00
|
|
|
nzbname = cleantitle
|
|
|
|
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("Cleantitle: " + str(cleantitle))
|
2012-09-27 16:11:10 +00:00
|
|
|
if len(re.findall('[^()]+', cleantitle)) == 1: cleantitle = "abcdefghijk 0 (1901).cbz"
|
2012-09-13 15:27:34 +00:00
|
|
|
if done:
|
|
|
|
break
|
|
|
|
#let's narrow search down - take out year (2010), (2011), etc
|
|
|
|
#let's check for first occurance of '(' as generally indicates
|
|
|
|
#that the 'title' has ended
|
2012-09-24 05:17:29 +00:00
|
|
|
|
|
|
|
ripperlist=['digital-',
|
|
|
|
'empire',
|
|
|
|
'dcp']
|
|
|
|
#this takes care of the brackets :)
|
|
|
|
m = re.findall('[^()]+', cleantitle)
|
|
|
|
lenm = len(m)
|
2012-09-27 16:11:10 +00:00
|
|
|
|
2012-09-24 05:17:29 +00:00
|
|
|
#print ("there are " + str(lenm) + " words.")
|
|
|
|
cnt = 0
|
2012-10-16 17:26:28 +00:00
|
|
|
yearmatch = "false"
|
|
|
|
|
2012-09-24 05:17:29 +00:00
|
|
|
while (cnt < lenm):
|
|
|
|
if m[cnt] is None: break
|
2012-12-20 10:39:37 +00:00
|
|
|
if m[cnt] == ' ':
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
logger.fdebug(str(cnt) + ". Bracket Word: " + str(m[cnt]))
|
2012-09-24 05:17:29 +00:00
|
|
|
if cnt == 0:
|
|
|
|
comic_andiss = m[cnt]
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("Comic: " + str(comic_andiss))
|
2012-09-24 05:17:29 +00:00
|
|
|
if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("year detected: " + str(m[cnt]))
|
2012-09-24 05:17:29 +00:00
|
|
|
result_comyear = m[cnt]
|
|
|
|
if str(comyear) in result_comyear:
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug(str(comyear) + " - right years match baby!")
|
2012-09-24 05:17:29 +00:00
|
|
|
yearmatch = "true"
|
|
|
|
else:
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug(str(comyear) + " - not right - years do not match")
|
2012-09-24 05:17:29 +00:00
|
|
|
yearmatch = "false"
|
2013-01-01 20:09:28 +00:00
|
|
|
#let's do this hear and save a few extra loops ;)
|
|
|
|
#fix for issue dates between Nov-Dec/Jan
|
|
|
|
if IssDateFix == "yes":
|
|
|
|
ComicYearFix = int(ComicYear) + 1
|
|
|
|
if str(ComicYearFix) in result_comyear:
|
|
|
|
logger.fdebug("further analysis reveals this was published inbetween Nov-Jan, incrementing year to " + str(ComicYearFix) + " has resulted in a match!")
|
|
|
|
yearmatch = "true"
|
|
|
|
|
2012-09-24 05:17:29 +00:00
|
|
|
if 'digital' in m[cnt] and len(m[cnt]) == 7:
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("digital edition detected")
|
2012-09-24 05:17:29 +00:00
|
|
|
pass
|
|
|
|
if ' of ' in m[cnt]:
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("mini-series detected : " + str(m[cnt]))
|
2012-09-24 05:17:29 +00:00
|
|
|
result_of = m[cnt]
|
|
|
|
if 'cover' in m[cnt]:
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("covers detected: " + str(m[cnt]))
|
2012-09-24 05:17:29 +00:00
|
|
|
result_comcovers = m[cnt]
|
|
|
|
for ripper in ripperlist:
|
|
|
|
if ripper in m[cnt]:
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("Scanner detected: " + str(m[cnt]))
|
2012-09-24 05:17:29 +00:00
|
|
|
result_comscanner = m[cnt]
|
|
|
|
cnt+=1
|
|
|
|
|
2012-09-27 16:11:10 +00:00
|
|
|
if yearmatch == "false": continue
|
2012-09-24 05:17:29 +00:00
|
|
|
|
|
|
|
splitit = []
|
|
|
|
watchcomic_split = []
|
2012-12-23 18:16:25 +00:00
|
|
|
logger.fdebug("original nzb comic and issue: " + str(comic_andiss))
|
|
|
|
#changed this from '' to ' '
|
|
|
|
comic_iss_b4 = re.sub('[\-\:\,]', ' ', str(comic_andiss))
|
2012-12-16 17:57:02 +00:00
|
|
|
comic_iss = comic_iss_b4.replace('.',' ')
|
|
|
|
logger.fdebug("adjusted nzb comic and issue: " + str(comic_iss))
|
2012-09-24 05:17:29 +00:00
|
|
|
splitit = comic_iss.split(None)
|
2012-12-16 17:57:02 +00:00
|
|
|
#something happened to dognzb searches or results...added a '.' in place of spaces
|
|
|
|
#screwed up most search results with dognzb. Let's try to adjust.
|
2012-12-23 18:16:25 +00:00
|
|
|
#watchcomic_split = findcomic[findloop].split(None)
|
2012-12-31 16:52:16 +00:00
|
|
|
|
|
|
|
if splitit[(len(splitit)-1)].isdigit():
|
|
|
|
#compares - if the last digit and second last digit are #'s seperated by spaces assume decimal
|
|
|
|
comic_iss = splitit[(len(splitit)-1)]
|
|
|
|
splitst = len(splitit) - 1
|
2013-01-01 20:09:28 +00:00
|
|
|
if splitit[(len(splitit)-2)].isdigit():
|
|
|
|
# for series that have a digit at the end, it screws up the logistics.
|
|
|
|
i = 1
|
|
|
|
chg_comic = splitit[0]
|
|
|
|
while (i < (len(splitit)-1)):
|
|
|
|
chg_comic = chg_comic + " " + splitit[i]
|
|
|
|
i+=1
|
|
|
|
logger.fdebug("chg_comic:" + str(chg_comic))
|
|
|
|
if chg_comic.upper() == findcomic[findloop].upper():
|
|
|
|
logger.fdebug("series contains numerics...adjusting..")
|
|
|
|
else:
|
|
|
|
changeup = "." + splitit[(len(splitit)-1)]
|
|
|
|
logger.fdebug("changeup to decimal: " + str(changeup))
|
|
|
|
comic_iss = splitit[(len(splitit)-2)] + "." + comic_iss
|
|
|
|
splitst = len(splitit) - 2
|
2013-01-06 08:51:44 +00:00
|
|
|
else:
|
|
|
|
# if the nzb name doesn't follow the series-issue-year format even closely..ignore nzb
|
|
|
|
logger.fdebug("invalid naming format of nzb detected - cannot properly determine issue")
|
|
|
|
continue
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss))
|
2012-12-31 16:52:16 +00:00
|
|
|
#bmm = re.findall('v\d', comic_iss)
|
|
|
|
#if len(bmm) > 0: splitst = len(splitit) - 2
|
|
|
|
#else: splitst = len(splitit) - 1
|
|
|
|
|
2012-12-23 18:16:25 +00:00
|
|
|
# make sure that things like - in watchcomic are accounted for when comparing to nzb.
|
2013-01-06 08:51:44 +00:00
|
|
|
watchcomic_split = re.sub('[\-\:\,\.]', ' ', findcomic[findloop]).split(None)
|
2013-01-01 20:09:28 +00:00
|
|
|
|
2012-12-23 18:16:25 +00:00
|
|
|
logger.fdebug(str(splitit) + " nzb series word count: " + str(splitst))
|
|
|
|
logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split)))
|
2012-09-24 05:17:29 +00:00
|
|
|
if (splitst) != len(watchcomic_split):
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("incorrect comic lengths...not a match")
|
2012-09-24 05:17:29 +00:00
|
|
|
if str(splitit[0]).lower() == "the":
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("THE word detected...attempting to adjust pattern matching")
|
2012-09-24 05:17:29 +00:00
|
|
|
splitit[0] = splitit[4:]
|
2012-09-13 15:27:34 +00:00
|
|
|
else:
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("length match..proceeding")
|
2012-09-13 15:27:34 +00:00
|
|
|
n = 0
|
|
|
|
scount = 0
|
2013-01-03 10:01:07 +00:00
|
|
|
logger.fdebug("search-length: " + str(splitst))
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("Watchlist-length: " + str(len(watchcomic_split)))
|
2013-01-03 10:01:07 +00:00
|
|
|
while ( n <= (splitst)-1 ):
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("splitit: " + str(splitit[n]))
|
2013-01-03 10:01:07 +00:00
|
|
|
if n < (splitst) and n < len(watchcomic_split):
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n]))
|
2012-09-24 05:17:29 +00:00
|
|
|
if str(watchcomic_split[n].lower()) in str(splitit[n].lower()):
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("word matched on : " + str(splitit[n]))
|
2012-09-24 05:17:29 +00:00
|
|
|
scount+=1
|
|
|
|
#elif ':' in splitit[n] or '-' in splitit[n]:
|
|
|
|
# splitrep = splitit[n].replace('-', '')
|
|
|
|
# print ("non-character keyword...skipped on " + splitit[n])
|
2012-10-21 15:31:42 +00:00
|
|
|
elif str(splitit[n].lower()).startswith('v'):
|
2013-01-06 08:51:44 +00:00
|
|
|
logger.fdebug("possible versioning..checking")
|
2012-10-21 15:31:42 +00:00
|
|
|
#we hit a versioning # - account for it
|
|
|
|
if splitit[n][1:].isdigit():
|
|
|
|
comicversion = str(splitit[n])
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("version found: " + str(comicversion))
|
2012-09-13 15:27:34 +00:00
|
|
|
else:
|
2012-12-31 16:52:16 +00:00
|
|
|
logger.fdebug("Comic / Issue section")
|
2012-09-24 05:17:29 +00:00
|
|
|
if splitit[n].isdigit():
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("issue detected")
|
2012-12-31 16:52:16 +00:00
|
|
|
#comiss = splitit[n]
|
2012-09-24 05:17:29 +00:00
|
|
|
comicNAMER = n - 1
|
|
|
|
comNAME = splitit[0]
|
|
|
|
cmnam = 1
|
2012-12-16 17:57:02 +00:00
|
|
|
while (cmnam <= comicNAMER):
|
2012-09-24 05:17:29 +00:00
|
|
|
comNAME = str(comNAME) + " " + str(splitit[cmnam])
|
|
|
|
cmnam+=1
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("comic: " + str(comNAME))
|
2012-09-24 05:17:29 +00:00
|
|
|
else:
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("non-match for: "+ str(splitit[n]))
|
2012-09-24 05:17:29 +00:00
|
|
|
pass
|
2012-09-13 15:27:34 +00:00
|
|
|
n+=1
|
2012-12-16 17:57:02 +00:00
|
|
|
#set the match threshold to 80% (for now)
|
|
|
|
# if it's less than 80% consider it a non-match and discard.
|
2013-01-01 20:09:28 +00:00
|
|
|
#splitit has to splitit-1 because last position is issue.
|
|
|
|
wordcnt = int(scount)
|
|
|
|
logger.fdebug("scount:" + str(wordcnt))
|
2013-01-03 10:01:07 +00:00
|
|
|
totalcnt = int(splitst)
|
2013-01-01 20:09:28 +00:00
|
|
|
logger.fdebug("splitit-len:" + str(totalcnt))
|
|
|
|
spercent = (wordcnt/totalcnt) * 100
|
|
|
|
logger.fdebug("we got " + str(spercent) + " percent.")
|
|
|
|
if int(spercent) >= 80:
|
|
|
|
logger.fdebug("it's a go captain... - we matched " + str(spercent) + "%!")
|
|
|
|
if int(spercent) < 80:
|
|
|
|
logger.fdebug("failure - we only got " + str(spercent) + "% right!")
|
|
|
|
continue
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug("this should be a match!")
|
2012-12-31 16:52:16 +00:00
|
|
|
logger.fdebug("issue we are looking for is : " + str(findcomiciss[findloop]))
|
|
|
|
logger.fdebug("integer value of issue we are looking for : " + str(intIss))
|
|
|
|
|
|
|
|
#redudant code - should be called elsewhere...
|
|
|
|
if '.' in comic_iss:
|
|
|
|
comisschk_find = comic_iss.find('.')
|
|
|
|
comisschk_b4dec = comic_iss[:comisschk_find]
|
|
|
|
comisschk_decval = comic_iss[comisschk_find+1:]
|
|
|
|
logger.fdebug("Found IssueNumber: " + str(comic_iss))
|
|
|
|
logger.fdebug("..before decimal: " + str(comisschk_b4dec))
|
|
|
|
logger.fdebug("...after decimal: " + str(comisschk_decval))
|
|
|
|
#--let's make sure we don't wipe out decimal issues ;)
|
|
|
|
if int(comisschk_decval) == 0:
|
|
|
|
ciss = comisschk_b4dec
|
|
|
|
cintdec = int(comisschk_decval)
|
|
|
|
else:
|
|
|
|
if len(comisschk_decval) == 1:
|
|
|
|
ciss = comisschk_b4dec + "." + comisschk_decval
|
|
|
|
cintdec = int(comisschk_decval) * 10
|
|
|
|
else:
|
|
|
|
ciss = comisschk_b4dec + "." + comisschk_decval.rstrip('0')
|
|
|
|
cintdec = int(comisschk_decval.rstrip('0')) * 10
|
|
|
|
comintIss = (int(comisschk_b4dec) * 1000) + cintdec
|
|
|
|
else:
|
|
|
|
comintIss = int(comic_iss) * 1000
|
|
|
|
logger.fdebug("issue we found for is : " + str(comic_iss))
|
|
|
|
logger.fdebug("integer value of issue we are found : " + str(comintIss))
|
|
|
|
|
2012-09-13 15:27:34 +00:00
|
|
|
#issue comparison now as well
|
2012-12-31 16:52:16 +00:00
|
|
|
if int(intIss) == int(comintIss):
|
2012-12-16 17:57:02 +00:00
|
|
|
logger.fdebug('issues match!')
|
2012-09-27 16:13:48 +00:00
|
|
|
logger.info(u"Found " + str(ComicName) + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(nzbprov) )
|
2012-09-13 15:27:34 +00:00
|
|
|
## -- inherit issue. Comic year is non-standard. nzb year is the year
|
|
|
|
## -- comic was printed, not the start year of the comic series and
|
|
|
|
## -- thus the deciding component if matches are correct or not
|
|
|
|
linkstart = os.path.splitext(entry['link'])[0]
|
|
|
|
#following is JUST for nzb.su
|
2012-12-16 17:57:02 +00:00
|
|
|
if nzbprov == 'nzb.su' or nzbprov == 'newznab':
|
2012-09-13 15:27:34 +00:00
|
|
|
linkit = os.path.splitext(entry['link'])[1]
|
|
|
|
linkit = linkit.replace("&", "%26")
|
|
|
|
linkapi = str(linkstart) + str(linkit)
|
|
|
|
else:
|
|
|
|
# this should work for every other provider
|
|
|
|
linkstart = linkstart.replace("&", "%26")
|
|
|
|
linkapi = str(linkstart)
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("link given by: " + str(nzbprov))
|
|
|
|
logger.fdebug("link: " + str(linkstart))
|
|
|
|
logger.fdebug("linkforapi: " + str(linkapi))
|
2012-09-13 15:27:34 +00:00
|
|
|
#here we distinguish between rename and not.
|
2012-09-24 05:17:29 +00:00
|
|
|
#blackhole functinality---
|
2012-09-13 15:27:34 +00:00
|
|
|
#let's download the file to a temporary cache.
|
|
|
|
|
|
|
|
if mylar.BLACKHOLE:
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("using blackhole directory at : " + str(mylar.BLACKHOLE_DIR))
|
2012-09-13 15:27:34 +00:00
|
|
|
if os.path.exists(mylar.BLACKHOLE_DIR):
|
|
|
|
filenamenzb = str(ComicName) + " " + str(IssueNumber) + " (" + str(comyear) + ").nzb"
|
2012-09-30 07:50:27 +00:00
|
|
|
urllib.urlretrieve(linkapi, str(mylar.BLACKHOLE_DIR) + str(filenamenzb))
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("filename saved to your blackhole as : " + str(filenamenzb))
|
2012-09-13 15:27:34 +00:00
|
|
|
logger.info(u"Successfully sent .nzb to your Blackhole directory : " + str(mylar.BLACKHOLE_DIR) + str(filenamenzb) )
|
2013-01-01 20:38:54 +00:00
|
|
|
nzbname = str(ComicName) + " " + str(IssueNumber) + " (" + str(comyear) + ")"
|
2012-09-13 15:27:34 +00:00
|
|
|
#end blackhole
|
|
|
|
|
|
|
|
else:
|
2012-12-22 03:33:29 +00:00
|
|
|
if nzbprov != 'nzb.su':
|
|
|
|
tmppath = mylar.CACHE_DIR
|
|
|
|
if os.path.exists(tmppath):
|
|
|
|
logger.fdebug("cache directory successfully found at : " + str(tmppath))
|
|
|
|
pass
|
2012-10-30 10:43:01 +00:00
|
|
|
else:
|
2012-12-22 03:33:29 +00:00
|
|
|
#let's make the dir.
|
|
|
|
logger.fdebug("couldn't locate cache directory, attempting to create at : " + str(mylar.CACHE_DIR))
|
|
|
|
try:
|
|
|
|
os.makedirs(str(mylar.CACHE_DIR))
|
|
|
|
logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))
|
|
|
|
|
|
|
|
except OSError.e:
|
|
|
|
if e.errno != errno.EEXIST:
|
|
|
|
raise
|
|
|
|
|
|
|
|
filenamenzb = os.path.split(linkapi)[1]
|
|
|
|
#filenzb = os.path.join(tmppath,filenamenzb)
|
|
|
|
logger.fdebug("unalterted nzb name: " + str(filenamenzb))
|
|
|
|
#let's send a clean copy to SAB because the names are random characters and/or could be stupid.
|
|
|
|
ComicName = re.sub('[\:\,]', '', ComicName)
|
|
|
|
filenzb = str(ComicName.replace(' ', '_')) + "_" + str(IssueNumber) + "_(" + str(comyear) + ")"
|
|
|
|
logger.fdebug("prettified nzb name: " + str(filenzb))
|
|
|
|
|
|
|
|
if mylar.RENAME_FILES == 1:
|
|
|
|
logger.fdebug("Rename Files enabled..")
|
|
|
|
filenzb = str(ComicName.replace(' ', '_')) + "_" + str(IssueNumber) + "_(" + str(comyear) + ")"
|
|
|
|
logger.fdebug("this should be the same as prettified nzb name:" + str(filenzb))
|
|
|
|
if mylar.REPLACE_SPACES:
|
|
|
|
logger.fdebug("Replace spaces option enabled")
|
|
|
|
logger.fdebug("replace character: " + str(mylar.REPLACE_CHAR))
|
|
|
|
repchar = mylar.REPLACE_CHAR
|
|
|
|
repurlchar = mylar.REPLACE_CHAR
|
|
|
|
else:
|
|
|
|
logger.fdebug("Replace spaces option NOT enabled")
|
|
|
|
repchar = ' '
|
|
|
|
repurlchar = "%20"
|
|
|
|
#let's make sure there's no crap in the ComicName since it's O.G.
|
|
|
|
logger.fdebug("original Name of comic: " + str(ComicName))
|
|
|
|
ComicNM = re.sub('[\:\,]', '', str(ComicName))
|
|
|
|
logger.fdebug("altered Name of comic: " + str(ComicNM))
|
|
|
|
renameit = str(ComicNM) + " " + str(IssueNumber) + " (" + str(SeriesYear) + ")" + " " + "(" + str(comyear) + ")"
|
|
|
|
logger.fdebug("altered Name with additional info: " + str(renameit))
|
|
|
|
renamethis = renameit.replace(' ', repchar)
|
|
|
|
logger.fdebug("...with replace spaces: " + str(renamethis))
|
|
|
|
renamer1 = renameit.replace(' ', repurlchar)
|
|
|
|
renamer = re.sub("\&", "%26", str(renamer1))
|
|
|
|
logger.fdebug("...adjusting for url restrictions: " + str(renamer))
|
|
|
|
|
|
|
|
filenext = str(filenzb) + ".nzb"
|
|
|
|
savefile = os.path.join(tmppath, filenext)
|
|
|
|
logger.fdebug("nzb file to be saved: " + str(savefile))
|
2012-10-30 10:43:01 +00:00
|
|
|
|
2012-12-22 03:33:29 +00:00
|
|
|
try:
|
|
|
|
urllib.urlretrieve(linkapi, str(savefile))
|
|
|
|
except urllib.URLError:
|
|
|
|
logger.fdebug(u"Unable to retrieve nzb using link: " + str(linkapi))
|
|
|
|
logger.fdebug(u"Possibly unable to save nzb: " + str(savefile))
|
|
|
|
logger.error(u"Unable to retrieve nzb file.")
|
|
|
|
return
|
|
|
|
|
|
|
|
if os.path.getsize(str(savefile)) == 0:
|
|
|
|
logger.error(u"nzb size detected as zero bytes.")
|
|
|
|
continue
|
|
|
|
|
|
|
|
logger.info(u"Sucessfully retrieved nzb file using " + str(nzbprov))
|
|
|
|
nzbname = str(filenzb)
|
|
|
|
|
|
|
|
elif nzbprov == 'nzb.su':
|
|
|
|
logger.fdebug("NZB.SU - linkapi:" + str(linkapi))
|
|
|
|
nzbname = re.sub(" ", "_", str(entry['title']))
|
2012-10-08 07:13:18 +00:00
|
|
|
|
2012-12-23 18:16:25 +00:00
|
|
|
logger.fdebug("nzbname used for post-processing:" + str(nzbname))
|
|
|
|
|
|
|
|
# let's build the send-to-SAB string now:
|
|
|
|
tmpapi = str(mylar.SAB_HOST)
|
|
|
|
logger.fdebug("send-to-SAB host string: " + str(tmpapi))
|
|
|
|
# nzb.su only works with direct links for some reason...
|
|
|
|
if nzbprov == 'nzb.su':
|
|
|
|
SABtype = "/api?mode=addurl&name="
|
|
|
|
savefileURL = str(linkapi)
|
|
|
|
else:
|
|
|
|
SABtype = "/api?mode=addlocalfile&name="
|
|
|
|
# if the savefile location has spaces in the path, could cause problems.
|
2012-12-27 15:04:03 +00:00
|
|
|
# if the savefile has a &, escape it otherwise will botch up send-to-SAB link
|
2012-12-23 18:16:25 +00:00
|
|
|
# let's adjust.
|
2012-12-27 15:04:03 +00:00
|
|
|
saveF = re.sub("\&", "%26", str(savefile))
|
|
|
|
savefileURL = re.sub(" ","%20", str(saveF))
|
2012-12-23 18:16:25 +00:00
|
|
|
tmpapi = tmpapi + str(SABtype)
|
|
|
|
logger.fdebug("...selecting API type: " + str(tmpapi))
|
|
|
|
tmpapi = tmpapi + str(savefileURL)
|
|
|
|
logger.fdebug("...attaching nzbfile: " + str(tmpapi))
|
|
|
|
# determine SAB priority
|
|
|
|
if mylar.SAB_PRIORITY:
|
|
|
|
tmpapi = tmpapi + "&priority=" + str(sabpriority)
|
|
|
|
logger.fdebug("...setting priority: " + str(tmpapi))
|
|
|
|
# if category is blank, let's adjust
|
|
|
|
if mylar.SAB_CATEGORY:
|
|
|
|
tmpapi = tmpapi + "&cat=" + str(mylar.SAB_CATEGORY)
|
|
|
|
logger.fdebug("...attaching category: " + str(tmpapi))
|
|
|
|
if mylar.RENAME_FILES == 1:
|
|
|
|
tmpapi = tmpapi + "&script=ComicRN.py"
|
|
|
|
logger.fdebug("...attaching rename script: " + str(tmpapi))
|
|
|
|
#final build of send-to-SAB
|
|
|
|
tmpapi = tmpapi + "&apikey=" + str(mylar.SAB_APIKEY)
|
|
|
|
|
2012-12-21 20:09:00 +00:00
|
|
|
logger.fdebug("Completed send-to-SAB link: " + str(tmpapi))
|
|
|
|
|
2012-09-27 16:11:10 +00:00
|
|
|
try:
|
|
|
|
urllib2.urlopen(tmpapi)
|
|
|
|
except urllib2.URLError:
|
|
|
|
logger.error(u"Unable to send nzb file to SABnzbd")
|
|
|
|
return
|
|
|
|
|
|
|
|
logger.info(u"Successfully sent nzb file to SABnzbd")
|
2012-12-22 03:33:29 +00:00
|
|
|
#delete the .nzb now.
|
|
|
|
if mylar.PROG_DIR is not "/" and nzbprov != 'nzb.su':
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("preparing to remove temporary nzb file at: " + str(savefile))
|
2012-10-30 10:43:01 +00:00
|
|
|
os.remove(savefile)
|
|
|
|
logger.info(u"Removed temporary save file")
|
2012-09-13 15:27:34 +00:00
|
|
|
#raise an exception to break out of loop
|
|
|
|
foundc = "yes"
|
|
|
|
done = True
|
|
|
|
break
|
|
|
|
else:
|
2012-12-16 17:57:02 +00:00
|
|
|
log2file = log2file + "issues don't match.." + "\n"
|
2012-09-13 15:27:34 +00:00
|
|
|
foundc = "no"
|
2013-01-01 20:09:28 +00:00
|
|
|
if done == True:
|
|
|
|
cmloopit == 1 #let's make sure it STOPS searching after a sucessful match.
|
|
|
|
break
|
2012-09-13 15:27:34 +00:00
|
|
|
cmloopit-=1
|
|
|
|
findloop+=1
|
|
|
|
if foundc == "yes":
|
|
|
|
foundcomic.append("yes")
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
|
2012-10-30 10:43:01 +00:00
|
|
|
updater.nzblog(IssueID, nzbname)
|
2012-12-16 17:57:02 +00:00
|
|
|
nzbpr == 0
|
2013-01-02 17:56:46 +00:00
|
|
|
#break
|
|
|
|
return foundc
|
2012-09-13 15:27:34 +00:00
|
|
|
elif foundc == "no" and nzbpr == 0:
|
|
|
|
foundcomic.append("no")
|
2012-12-20 10:39:37 +00:00
|
|
|
logger.fdebug("couldn't find a matching comic")
|
2012-10-09 06:33:14 +00:00
|
|
|
if IssDateFix == "no":
|
|
|
|
logger.info(u"Couldn't find Issue " + str(IssueNumber) + " of " + str(ComicName) + "(" + str(comyear) + "). Status kept as wanted." )
|
|
|
|
break
|
2012-09-13 15:27:34 +00:00
|
|
|
return foundc
|
|
|
|
|
|
|
|
def searchforissue(issueid=None, new=False):
|
|
|
|
myDB = db.DBConnection()
|
|
|
|
|
|
|
|
if not issueid:
|
|
|
|
|
|
|
|
myDB = db.DBConnection()
|
|
|
|
|
|
|
|
results = myDB.select('SELECT * from issues WHERE Status="Wanted"')
|
|
|
|
|
|
|
|
new = True
|
|
|
|
|
|
|
|
for result in results:
|
|
|
|
comic = myDB.action('SELECT * from comics WHERE ComicID=?', [result['ComicID']]).fetchone()
|
|
|
|
foundNZB = "none"
|
|
|
|
SeriesYear = comic['ComicYear']
|
2013-01-06 08:51:44 +00:00
|
|
|
AlternateSearch = comic['AlternateSearch']
|
2012-10-09 06:33:14 +00:00
|
|
|
IssueDate = result['IssueDate']
|
2012-09-13 15:27:34 +00:00
|
|
|
if result['IssueDate'] == None:
|
|
|
|
ComicYear = comic['ComicYear']
|
|
|
|
else:
|
|
|
|
ComicYear = str(result['IssueDate'])[:4]
|
|
|
|
|
2013-01-01 20:44:33 +00:00
|
|
|
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB) and (mylar.SAB_HOST):
|
2013-01-06 08:51:44 +00:00
|
|
|
foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(ComicYear), comic['ComicYear'], IssueDate, result['IssueID'], AlternateSearch)
|
2012-09-13 15:27:34 +00:00
|
|
|
if foundNZB == "yes":
|
|
|
|
#print ("found!")
|
|
|
|
updater.foundsearch(result['ComicID'], result['IssueID'])
|
|
|
|
else:
|
|
|
|
pass
|
|
|
|
#print ("not found!")
|
|
|
|
else:
|
|
|
|
result = myDB.action('SELECT * FROM issues where IssueID=?', [issueid]).fetchone()
|
|
|
|
ComicID = result['ComicID']
|
|
|
|
comic = myDB.action('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone()
|
|
|
|
SeriesYear = comic['ComicYear']
|
2013-01-06 08:51:44 +00:00
|
|
|
AlternateSearch = comic['AlternateSearch']
|
2012-10-09 06:33:14 +00:00
|
|
|
IssueDate = result['IssueDate']
|
2012-09-13 15:27:34 +00:00
|
|
|
if result['IssueDate'] == None:
|
|
|
|
IssueYear = comic['ComicYear']
|
|
|
|
else:
|
|
|
|
IssueYear = str(result['IssueDate'])[:4]
|
|
|
|
|
|
|
|
foundNZB = "none"
|
2013-01-01 20:44:33 +00:00
|
|
|
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB) and (mylar.SAB_HOST):
|
2013-01-06 08:51:44 +00:00
|
|
|
foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'], IssueDate, result['IssueID'], AlternateSearch)
|
2012-09-13 15:27:34 +00:00
|
|
|
if foundNZB == "yes":
|
|
|
|
#print ("found!")
|
|
|
|
updater.foundsearch(ComicID=result['ComicID'], IssueID=result['IssueID'])
|
|
|
|
else:
|
|
|
|
pass
|
|
|
|
#print ("not found!")
|
2012-10-16 08:16:29 +00:00
|
|
|
|
|
|
|
def searchIssueIDList(issuelist):
|
|
|
|
myDB = db.DBConnection()
|
|
|
|
for issueid in issuelist:
|
|
|
|
issue = myDB.action('SELECT * from issues WHERE IssueID=?', [issueid]).fetchone()
|
|
|
|
comic = myDB.action('SELECT * from comics WHERE ComicID=?', [issue['ComicID']]).fetchone()
|
|
|
|
print ("Checking for issue: " + str(issue['Issue_Number']))
|
|
|
|
foundNZB = "none"
|
|
|
|
SeriesYear = comic['ComicYear']
|
2013-01-06 08:51:44 +00:00
|
|
|
AlternateSearch = comic['AlternateSearch']
|
2012-10-16 08:16:29 +00:00
|
|
|
if issue['IssueDate'] == None:
|
|
|
|
ComicYear = comic['ComicYear']
|
|
|
|
else:
|
|
|
|
ComicYear = str(issue['IssueDate'])[:4]
|
2013-01-01 20:44:33 +00:00
|
|
|
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB) and (mylar.SAB_HOST):
|
2013-01-06 08:51:44 +00:00
|
|
|
foundNZB = search_init(comic['ComicName'], issue['Issue_Number'], str(ComicYear), comic['ComicYear'], issue['IssueDate'], issue['IssueID'], AlternateSearch)
|
2012-10-16 08:16:29 +00:00
|
|
|
if foundNZB == "yes":
|
|
|
|
#print ("found!")
|
|
|
|
updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'])
|
|
|
|
else:
|
|
|
|
pass
|
|
|
|
#print ("not found!")
|
|
|
|
|