mirror of https://github.com/evilhero/mylar
FIX:(#2179) Post-processing item would fail if match would occur on story-arc check, FIX: Fixed some sub-directory problems when doing various types of scans, IMP: Added booktype to filechecker parsing results, FIX: When downloading via DDL, would not adhere to the booktype as a search-result constraint with regards to matching
This commit is contained in:
parent
ccbe865f68
commit
43ca4825e5
|
@ -345,35 +345,35 @@ class PostProcessor(object):
|
|||
|
||||
def Process(self):
|
||||
module = self.module
|
||||
self._log("nzb name: " + self.nzb_name)
|
||||
self._log("nzb folder: " + self.nzb_folder)
|
||||
logger.fdebug(module + ' nzb name: ' + self.nzb_name)
|
||||
logger.fdebug(module + ' nzb folder: ' + self.nzb_folder)
|
||||
self._log('nzb name: %s' % self.nzb_name)
|
||||
self._log('nzb folder: %s' % self.nzb_folder)
|
||||
logger.fdebug('%s nzb name: %s' % (module, self.nzb_name))
|
||||
logger.fdebug('%s nzb folder: %s' % (module, self.nzb_folder))
|
||||
if self.ddl is False:
|
||||
if mylar.USE_SABNZBD==1:
|
||||
if self.nzb_name != 'Manual Run':
|
||||
logger.fdebug(module + ' Using SABnzbd')
|
||||
logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name)
|
||||
logger.fdebug('%s Using SABnzbd' % module)
|
||||
logger.fdebug('%s NZB name as passed from NZBGet: %s' % (module, self.nzb_name))
|
||||
|
||||
if self.nzb_name == 'Manual Run':
|
||||
logger.fdebug(module + ' Manual Run Post-Processing enabled.')
|
||||
logger.fdebug('%s Manual Run Post-Processing enabled.' % module)
|
||||
else:
|
||||
# if the SAB Directory option is enabled, let's use that folder name and append the jobname.
|
||||
if all([mylar.CONFIG.SAB_TO_MYLAR, mylar.CONFIG.SAB_DIRECTORY is not None, mylar.CONFIG.SAB_DIRECTORY != 'None']):
|
||||
self.nzb_folder = os.path.join(mylar.CONFIG.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
|
||||
logger.fdebug(module + ' SABnzbd Download folder option enabled. Directory set to : ' + self.nzb_folder)
|
||||
logger.fdebug('%s SABnzbd Download folder option enabled. Directory set to : %s' % (module, self.nzb_folder))
|
||||
|
||||
if mylar.USE_NZBGET==1:
|
||||
if self.nzb_name != 'Manual Run':
|
||||
logger.fdebug(module + ' Using NZBGET')
|
||||
logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name)
|
||||
logger.fdebug('%s Using NZBGET' % module)
|
||||
logger.fdebug('%s NZB name as passed from NZBGet: %s' % (module, self.nzb_name))
|
||||
# if the NZBGet Directory option is enabled, let's use that folder name and append the jobname.
|
||||
if self.nzb_name == 'Manual Run':
|
||||
logger.fdebug(module + ' Manual Run Post-Processing enabled.')
|
||||
logger.fdebug('%s Manual Run Post-Processing enabled.' % module)
|
||||
elif all([mylar.CONFIG.NZBGET_DIRECTORY is not None, mylar.CONFIG.NZBGET_DIRECTORY is not 'None']):
|
||||
logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name)
|
||||
logger.fdebug('%s NZB name as passed from NZBGet: %s' % (module, self.nzb_name))
|
||||
self.nzb_folder = os.path.join(mylar.CONFIG.NZBGET_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
|
||||
logger.fdebug(module + ' NZBGET Download folder option enabled. Directory set to : ' + self.nzb_folder)
|
||||
logger.fdebug('%s NZBGET Download folder option enabled. Directory set to : %s' % (module, self.nzb_folder))
|
||||
else:
|
||||
logger.fdebug('%s Now performing post-processing of %s sent from DDL' % (module, self.nzb_name))
|
||||
|
||||
|
@ -806,6 +806,13 @@ class PostProcessor(object):
|
|||
if watchmatch['sub']:
|
||||
logger.fdebug('%s[SUB: %s][CLOCATION: %s]' % (module, watchmatch['sub'], watchmatch['comiclocation']))
|
||||
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], helpers.conversion(watchmatch['comicfilename']))
|
||||
if not os.path.exists(clocation):
|
||||
scrubs = re.sub(watchmatch['comiclocation'], '', watchmatch['sub']).strip()
|
||||
if scrubs[:2] == '//' or scrubs[:2] == '\\':
|
||||
scrubs = scrubs[1:]
|
||||
if os.path.exists(scrubs):
|
||||
logger.fdebug('[MODIFIED CLOCATION] %s' % scrubs)
|
||||
clocation = scrubs
|
||||
else:
|
||||
logger.fdebug('%s[CLOCATION] %s' % (module, watchmatch['comiclocation']))
|
||||
if self.issueid is not None and os.path.isfile(watchmatch['comiclocation']):
|
||||
|
@ -938,6 +945,7 @@ class PostProcessor(object):
|
|||
"WatchValues": {"SeriesYear": av['SeriesYear'],
|
||||
"LatestDate": av['IssueDate'],
|
||||
"ComicVersion": 'v' + str(av['SeriesYear']),
|
||||
"ComicID": av['ComicID'],
|
||||
"Publisher": av['IssuePublisher'],
|
||||
"Total": av['TotalIssues'], # this will return the total issues in the arc (not needed for this)
|
||||
"Type": av['Type'],
|
||||
|
|
|
@ -504,7 +504,7 @@ def dbcheck():
|
|||
c.execute('SELECT ReleaseDate from storyarcs')
|
||||
except sqlite3.OperationalError:
|
||||
try:
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT)')
|
||||
c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist')
|
||||
c.execute('DROP TABLE readinglist')
|
||||
except sqlite3.OperationalError:
|
||||
|
@ -527,7 +527,8 @@ def dbcheck():
|
|||
c.execute('CREATE TABLE IF NOT EXISTS oneoffhistory (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, Status TEXT, weeknumber TEXT, year TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT)')
|
||||
conn.commit
|
||||
c.close
|
||||
|
||||
|
@ -1037,6 +1038,11 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE storyarcs ADD COLUMN DigitalDate TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT Type from storyarcs')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE storyarcs ADD COLUMN Type TEXT')
|
||||
|
||||
## -- searchresults Table --
|
||||
try:
|
||||
c.execute('SELECT SRID from searchresults')
|
||||
|
|
|
@ -130,7 +130,8 @@ class FileChecker(object):
|
|||
'issue_year': runresults['issue_year'],
|
||||
'issue_number': runresults['issue_number'],
|
||||
'scangroup': runresults['scangroup'],
|
||||
'reading_order': runresults['reading_order']
|
||||
'reading_order': runresults['reading_order'],
|
||||
'booktype': runresults['booktype']
|
||||
}
|
||||
else:
|
||||
filelist = self.traverse_directories(self.dir)
|
||||
|
@ -168,7 +169,8 @@ class FileChecker(object):
|
|||
'issue_year': runresults['issue_year'],
|
||||
'issue_number': runresults['issue_number'],
|
||||
'scangroup': runresults['scangroup'],
|
||||
'reading_order': runresults['reading_order']
|
||||
'reading_order': runresults['reading_order'],
|
||||
'booktype': runresults['booktype']
|
||||
})
|
||||
else:
|
||||
comiclist.append({
|
||||
|
@ -182,7 +184,8 @@ class FileChecker(object):
|
|||
'JusttheDigits': runresults['justthedigits'],
|
||||
'AnnualComicID': runresults['annual_comicid'],
|
||||
'issueid': runresults['issueid'],
|
||||
'scangroup': runresults['scangroup']
|
||||
'scangroup': runresults['scangroup'],
|
||||
'booktype': runresults['booktype']
|
||||
})
|
||||
comiccnt +=1
|
||||
else:
|
||||
|
@ -198,7 +201,8 @@ class FileChecker(object):
|
|||
'issue_year': runresults['issue_year'],
|
||||
'issue_number': runresults['issue_number'],
|
||||
'issueid': runresults['issueid'],
|
||||
'scangroup': runresults['scangroup']
|
||||
'scangroup': runresults['scangroup'],
|
||||
'booktype': runresults['booktype']
|
||||
})
|
||||
|
||||
watchmatch['comiccount'] = comiccnt
|
||||
|
@ -233,9 +237,9 @@ class FileChecker(object):
|
|||
tmppath = re.sub(path, '', subpath).strip()
|
||||
|
||||
path_list = os.path.normpath(tmppath)
|
||||
if '/' == path_list[0] or '\\' == path_list[0]:
|
||||
#need to remove any leading slashes so the os join can properly join the components
|
||||
path_list = path_list[1:]
|
||||
#if '/' == path_list[0] or '\\' == path_list[0]:
|
||||
# #need to remove any leading slashes so the os join can properly join the components
|
||||
# path_list = path_list[1:]
|
||||
#path_list = tmppath.split(os.sep)[-1]
|
||||
logger.fdebug('[SUB-PATH] subpath set to : ' + path_list)
|
||||
|
||||
|
@ -1083,6 +1087,7 @@ class FileChecker(object):
|
|||
'issue_year': issue_year,
|
||||
'annual_comicid': None,
|
||||
'scangroup': scangroup,
|
||||
'booktype': booktype,
|
||||
'reading_order': None}
|
||||
|
||||
if self.justparse:
|
||||
|
@ -1101,6 +1106,7 @@ class FileChecker(object):
|
|||
'issue_year': issue_year,
|
||||
'issue_number': issue_number,
|
||||
'scangroup': scangroup,
|
||||
'booktype': booktype,
|
||||
'reading_order': reading_order}
|
||||
|
||||
series_info = {}
|
||||
|
@ -1116,7 +1122,8 @@ class FileChecker(object):
|
|||
'series_volume': issue_volume,
|
||||
'issue_year': issue_year,
|
||||
'issue_number': issue_number,
|
||||
'scangroup': scangroup}
|
||||
'scangroup': scangroup,
|
||||
'booktype': booktype}
|
||||
|
||||
return self.matchIT(series_info)
|
||||
|
||||
|
@ -1279,7 +1286,8 @@ class FileChecker(object):
|
|||
'issueid': series_info['issueid'],
|
||||
'justthedigits': justthedigits,
|
||||
'annual_comicid': annual_comicid,
|
||||
'scangroup': series_info['scangroup']}
|
||||
'scangroup': series_info['scangroup'],
|
||||
'booktype': series_info['booktype']}
|
||||
|
||||
else:
|
||||
#logger.fdebug('[NO MATCH] ' + filename + ' [WATCHLIST:' + self.watchcomic + ']')
|
||||
|
@ -1294,7 +1302,8 @@ class FileChecker(object):
|
|||
'series_volume': series_info['series_volume'],
|
||||
'issue_year': series_info['issue_year'],
|
||||
'issueid': series_info['issueid'],
|
||||
'scangroup': series_info['scangroup']}
|
||||
'scangroup': series_info['scangroup'],
|
||||
'booktype': series_info['booktype']}
|
||||
|
||||
|
||||
def char_file_position(self, file, findchar, lastpos):
|
||||
|
|
|
@ -1113,7 +1113,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
parsed_comic = p_comic.listFiles()
|
||||
|
||||
logger.fdebug('parsed_info: %s' % parsed_comic)
|
||||
if parsed_comic['parse_status'] == 'success':
|
||||
if parsed_comic['parse_status'] == 'success' and (all([booktype == 'Print', parsed_comic['booktype'] == 'issue']) or booktype == parsed_comic['booktype']):
|
||||
try:
|
||||
fcomic = filechecker.FileChecker(watchcomic=ComicName)
|
||||
filecomic = fcomic.matchIT(parsed_comic)
|
||||
|
@ -1122,8 +1122,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
continue
|
||||
else:
|
||||
logger.fdebug('match_check: %s' % filecomic)
|
||||
elif booktype != parsed_comic['booktype']:
|
||||
logger.fdebug('Booktypes do not match. Looking for %s, this is a %s. Ignoring this result.' % (booktype, parsed_comic['booktype']))
|
||||
continue
|
||||
else:
|
||||
logger.fdebug('Unable to parse name properly: %s' % filecomic)
|
||||
logger.fdebug('Unable to parse name properly: %s. Ignoring this result' % filecomic)
|
||||
continue
|
||||
|
||||
#adjust for covers only by removing them entirely...
|
||||
vers4year = "no"
|
||||
|
|
Loading…
Reference in New Issue