FIX:(#1658) Retries from history should now be working again (with post-processing), IMP: When refreshing a series, if expected series directory was inaccessible/unavailable would delete old issue data resulting in a blank issue table, FIX: Fixed some encoding problems with the auto-snatch script, FIX: Removed Failed as a possible available option for status change on the Manage Issues tab, IMP: Changed some logger lines from info level to debug.

This commit is contained in:
evilhero 2017-09-14 14:49:24 -04:00
parent 3671a81096
commit 595ed45511
10 changed files with 117 additions and 61 deletions

View File

@ -42,7 +42,6 @@
<option value="Archived">Archived</option>
<option value="Skipped">Skipped</option>
<option value="Ignored">Ignored</option>
<option value="Failed">Failed</option>
</select>
selected issues
<input type="hidden" value="Go">

View File

@ -1026,27 +1026,45 @@ class PostProcessor(object):
comicname = None
issuenumber = None
if tmpiss is not None:
ppinfo.append({'comicid': tmpiss['ComicID'],
'issueid': issueid,
'comicname': tmpiss['ComicName'],
'issuenumber': tmpiss['Issue_Number'],
'publisher': None,
'sarc': sarc,
'oneoff': self.oneoff})
ppinfo.append({'comicid': tmpiss['ComicID'],
'issueid': issueid,
'comicname': tmpiss['ComicName'],
'issuenumber': tmpiss['Issue_Number'],
'comiclocation': None,
'publisher': None,
'sarc': sarc,
'oneoff': self.oneoff})
elif all([self.oneoff is not None, mylar.ALT_PULL == 2]):
oneinfo = myDB.selectone('SELECT * FROM weekly WHERE IssueID=?', [issueid]).fetchone()
if oneinfo is not None:
ppinfo.append({'comicid': oneinfo['ComicID'],
'comicname': oneinfo['COMIC'],
'issuenumber': oneinfo['ISSUE'],
'publisher': oneinfo['PUBLISHER'],
'issueid': issueid,
'sarc': None,
'oneoff': True})
if oneinfo is None:
oneinfo = myDB.selectone('SELECT * FROM oneoffhistory WHERE IssueID=?', [issueid]).fetchone()
if oneinfo is None:
logger.warn('Unable to locate issue as previously snatched one-off')
self._log('Unable to locate issue as previously snatched one-off')
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
else:
OComicname = oneinfo['ComicName']
OIssue = oneinfo['IssueNumber']
OPublisher = None
else:
OComicname = oneinfo['COMIC']
OIssue = oneinfo['ISSUE']
OPublisher = oneinfo['PUBLISHER']
self.oneoff = True
#logger.info(module + ' Discovered %s # %s by %s [comicid:%s][issueid:%s]' % (comicname, issuenumber, publisher, comicid, issueid))
ppinfo.append({'comicid': oneinfo['ComicID'],
'comicname': OComicname,
'issuenumber': OIssue,
'publisher': OPublisher,
'comiclocation': None,
'issueid': issueid,
'sarc': None,
'oneoff': True})
self.oneoff = True
#logger.info(module + ' Discovered %s # %s by %s [comicid:%s][issueid:%s]' % (comicname, issuenumber, publisher, comicid, issueid))
#use issueid to get publisher, series, year, issue number
else:
for x in oneoff_issuelist:
@ -1386,7 +1404,7 @@ class PostProcessor(object):
publisher = tinfo['publisher']
sarc = tinfo['sarc']
oneoff = tinfo['oneoff']
if oneoff is True:
if all([oneoff is True, tinfo['comiclocation'] is not None]):
location = os.path.abspath(os.path.join(tinfo['comiclocation'], os.pardir))
else:
location = self.nzb_folder
@ -1438,19 +1456,31 @@ class PostProcessor(object):
grdst = mylar.GRABBAG_DIR
odir = location
if odir is None:
odir = self.nzb_folder
ofilename = tinfo['comiclocation']
path, ext = os.path.splitext(ofilename)
if ofilename is not None:
path, ext = os.path.splitext(ofilename)
else:
#os.walk the location to get the filename...(coming from sab kinda thing) where it just passes the path.
for root, dirnames, filenames in os.walk(odir, followlinks=True):
for filename in filenames:
if filename.lower().endswith(self.extensions):
ofilename = filename
logger.fdebug(module + ' Valid filename located as : ' + ofilename)
path, ext = os.path.splitext(ofilename)
break
if ofilename is None:
logger.error(module + ' Unable to post-process file as it is not in a valid cbr/cbz format. PostProcessing aborted.')
logger.error(module + ' Unable to post-process file as it is not in a valid cbr/cbz format or cannot be located in path. PostProcessing aborted.')
self._log('Unable to locate downloaded file to rename. PostProcessing aborted.')
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
if odir is None:
odir = self.nzb_folder
if sandwich is not None and 'S' in sandwich:
issuearcid = re.sub('S', '', issueid)
logger.fdebug(module + ' issuearcid:' + str(issuearcid))

View File

@ -806,7 +806,7 @@ def GetImportList(results):
def drophtml(html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html)
soup = BeautifulSoup(html, "html.parser")
text_parts = soup.findAll(text=True)
#print ''.join(text_parts)

View File

@ -881,7 +881,7 @@ def cleanhtml(raw_html):
VALID_TAGS = ['div', 'p']
soup = BeautifulSoup(raw_html)
soup = BeautifulSoup(raw_html, "html.parser")
for tag in soup.findAll('p'):
if tag.name not in VALID_TAGS:
@ -2705,7 +2705,7 @@ def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False):
downlocation = torrent_folder.encode('utf-8')
else:
if mylar.USE_DELUGE:
downlocation = os.path.join(torrent_folder, torrent_info['files'][0]['path'])
downlocation = os.path.join(torrent_folder.encode('utf-8'), torrent_info['files'][0]['path'])
else:
downlocation = torrent_info['files'][0].encode('utf-8')
@ -2745,7 +2745,7 @@ def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False):
else:
try:
new_filepath = os.path.join(torrent_path, '.copy')
logger.info('New_Filepath: %s' % new_filepath)
logger.fdebug('New_Filepath: %s' % new_filepath)
shutil.copy(torrent_path, new_filepath)
torrent_info['copied_filepath'] = new_filepath
except:

View File

@ -429,7 +429,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
if not os.path.exists(os.path.join(comlocation, "cvinfo")) or mylar.CV_ONETIMER:
with open(os.path.join(comlocation, "cvinfo"), "w") as text_file:
text_file.write(str(comic['ComicURL']))
logger.info('Updating complete for: ' + comic['ComicName'])
if calledfrom == 'weekly':
@ -439,7 +439,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
elif calledfrom == 'dbupdate':
logger.info('returning to dbupdate module')
return #issuedata # this should be the issuedata data from updateissuedata function
return issuedata # this should be the issuedata data from updateissuedata function
elif calledfrom == 'weeklycheck':
logger.info('Successfully refreshed ' + comic['ComicName'] + ' (' + str(SeriesYear) + '). Returning to Weekly issue update.')
@ -1275,7 +1275,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
issue_collection(issuedata, nostatus='False')
else:
logger.fdebug('initiating issue updating - just the info')
issue_collection(issuedata, nostatus='True')
#issue_collection(issuedata, nostatus='True')
styear = str(SeriesYear)

View File

@ -27,7 +27,7 @@ def sabnzbd(sabhost=mylar.SAB_HOST, sabusername=mylar.SAB_USERNAME, sabpassword=
sabhost = sabhost + '/'
sabline = sabhttp + sabusername + ':' + sabpassword + '@' + sabhost
r = requests.get(sabline + 'config/general/')
soup = BeautifulSoup(r.content)
soup = BeautifulSoup(r.content, "html.parser")
#lenlinks = len(cntlinks)
cnt1 = len(soup.findAll("div", {"class": "field-pair alt"}))
cnt2 = len(soup.findAll("div", {"class": "field-pair"}))

View File

@ -48,7 +48,7 @@ class RTorrent(object):
torrent = self.client.find_torrent(torrent_hash)
if torrent:
if check:
logger.info('Successfully located torrent %s by hash on client. Detailed statistics to follow', torrent_hash)
logger.fdebug('Successfully located torrent %s by hash on client. Detailed statistics to follow', torrent_hash)
else:
logger.warn("%s Torrent already exists. Not downloading at this time.", torrent_hash)
return
@ -56,7 +56,7 @@ class RTorrent(object):
if check:
logger.warn('Unable to locate torrent with a hash value of %s', torrent_hash)
return
if filepath:
loadit = self.client.load_torrent(filepath)
if loadit:
@ -80,21 +80,21 @@ class RTorrent(object):
return torrent_info
if torrent_info['completed']:
logger.info("Directory: %s", torrent_info['folder'])
logger.info("Name: %s", torrent_info['name'])
logger.info("FileSize: %s", helpers.human_size(torrent_info['total_filesize']))
logger.info("Completed: %s", torrent_info['completed'])
logger.info("Downloaded: %s", helpers.human_size(torrent_info['download_total']))
logger.info("Uploaded: %s", helpers.human_size(torrent_info['upload_total']))
logger.info("Ratio: %s", torrent_info['ratio'])
logger.fdebug("Directory: %s", torrent_info['folder'])
logger.fdebug("Name: %s", torrent_info['name'])
logger.fdebug("FileSize: %s", helpers.human_size(torrent_info['total_filesize']))
logger.fdebug("Completed: %s", torrent_info['completed'])
logger.fdebug("Downloaded: %s", helpers.human_size(torrent_info['download_total']))
logger.fdebug("Uploaded: %s", helpers.human_size(torrent_info['upload_total']))
logger.fdebug("Ratio: %s", torrent_info['ratio'])
#logger.info("Time Started: %s", torrent_info['time_started'])
logger.info("Seeding Time: %s", helpers.humanize_time(int(time.time()) - torrent_info['time_started']))
logger.fdebug("Seeding Time: %s", helpers.humanize_time(int(time.time()) - torrent_info['time_started']))
if torrent_info['label']:
logger.info("Torrent Label: %s", torrent_info['label'])
logger.fdebug("Torrent Label: %s", torrent_info['label'])
#logger.info(torrent_info)
return torrent_info
return torrent_info
def get_the_hash(self, filepath):
# Open torrent file
@ -102,5 +102,5 @@ class RTorrent(object):
metainfo = bencode.decode(torrent_file.read())
info = metainfo['info']
thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
logger.info('Hash: ' + thehash)
logger.fdebug('Hash: ' + thehash)
return thehash

View File

@ -42,10 +42,9 @@ class TorrentClient(object):
if parsed.scheme in ['http', 'https']:
url += mylar.RTORRENT_RPC_URL
logger.info(url)
#logger.fdebug(url)
if username and password:
logger.info('username: %s / password: %s' % (username, 'redacted'))
try:
self.conn = RTorrent(
url,(auth, username, password),
@ -56,7 +55,7 @@ class TorrentClient(object):
logger.error('Failed to connect to rTorrent: %s', err)
return False
else:
logger.info('NO username %s / NO password %s' % (username, password))
logger.fdebug('NO username %s / NO password %s' % (username, password))
try:
self.conn = RTorrent(
url, (auth, username, password),
@ -107,7 +106,7 @@ class TorrentClient(object):
start = bool(mylar.RTORRENT_STARTONLOAD)
if filepath.startswith('magnet'):
logger.info('torrent magnet link set to : ' + filepath)
logger.fdebug('torrent magnet link set to : ' + filepath)
torrent_hash = re.findall('urn:btih:([\w]{32,40})', filepath)[0].upper()
# Send request to rTorrent
try:
@ -122,7 +121,7 @@ class TorrentClient(object):
else:
logger.info('Torrent successfully loaded into rtorrent using magnet link as source.')
else:
logger.info('filepath to torrent file set to : ' + filepath)
logger.fdebug('filepath to torrent file set to : ' + filepath)
try:
torrent = self.conn.load_torrent(filepath, verify_load=True)
if not torrent:
@ -142,12 +141,12 @@ class TorrentClient(object):
if mylar.RTORRENT_LABEL:
torrent.set_custom(1, mylar.RTORRENT_LABEL)
logger.info('Setting label for torrent to : ' + mylar.RTORRENT_LABEL)
logger.fdebug('Setting label for torrent to : ' + mylar.RTORRENT_LABEL)
if mylar.RTORRENT_DIRECTORY:
torrent.set_directory(mylar.RTORRENT_DIRECTORY)
logger.info('Setting directory for torrent to : ' + mylar.RTORRENT_DIRECTORY)
logger.fdebug('Setting directory for torrent to : ' + mylar.RTORRENT_DIRECTORY)
logger.info('Successfully loaded torrent.')
#note that if set_directory is enabled, the torrent has to be started AFTER it's loaded or else it will give chunk errors and not seed

View File

@ -171,18 +171,28 @@ def dbUpdate(ComicIDList=None, calledfrom=None, sched=False):
#print 'added annual'
issues += annual_load #myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID])
#store the issues' status for a given comicid, after deleting and readding, flip the status back to$
logger.fdebug("Deleting all issue data.")
myDB.action('DELETE FROM issues WHERE ComicID=?', [ComicID])
myDB.action('DELETE FROM annuals WHERE ComicID=?', [ComicID])
#logger.fdebug("Deleting all issue data.")
#myDB.action('DELETE FROM issues WHERE ComicID=?', [ComicID])
#myDB.action('DELETE FROM annuals WHERE ComicID=?', [ComicID])
logger.fdebug("Refreshing the series and pulling in new data using only CV.")
if whack == False:
cchk = mylar.importer.addComictoDB(ComicID, mismatch, calledfrom='dbupdate', annload=annload, csyear=csyear)
#reload the annuals here.
if cchk:
#delete the data here if it's all valid.
logger.fdebug("Deleting all old issue data to make sure new data is clean...")
myDB.action('DELETE FROM issues WHERE ComicID=?', [ComicID])
myDB.action('DELETE FROM annuals WHERE ComicID=?', [ComicID])
mylar.importer.issue_collection(cchk, nostatus='True')
else:
logger.warn('There was an error when refreshing this series - Make sure directories are writable/exist, etc')
return
issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID])
annuals = []
ann_list = []
#reload the annuals here.
if mylar.ANNUALS_ON:
annuals_list = myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID])
ann_list += annuals_list

View File

@ -1231,7 +1231,9 @@ class WebInterface(object):
logger.error("Unable to send torrent - check logs and settings.")
continue
else:
if mylar.ENABLE_SNATCH_SCRIPT:
if any([mylar.USE_RTORRENT, mylar.USE_DELUGE]) and mylar.AUTO_SNATCH:
mylar.SNATCHED_QUEUE.put(rcheck['hash'])
elif mylar.ENABLE_SNATCH_SCRIPT:
#packs not supported on retry atm - Volume and Issuedate also not included due to limitations...
snatch_vars = {'comicinfo': {'comicname': ComicName,
'issuenumber': IssueNumber,
@ -1255,22 +1257,38 @@ class WebInterface(object):
logger.info('Successfully retried issue.')
break
else:
oneoff = False
chkthis = myDB.selectone('SELECT a.ComicID, a.ComicName, a.ComicVersion, a.ComicYear, b.IssueID, b.Issue_Number, b.IssueDate FROM comics as a INNER JOIN annuals as b ON a.ComicID = b.ComicID WHERE IssueID=?', [IssueID]).fetchone()
if chkthis is None:
chkthis = myDB.selectone('SELECT a.ComicID, a.ComicName, a.ComicVersion, a.ComicYear, b.IssueID, b.Issue_Number, b.IssueDate FROM comics as a INNER JOIN issues as b ON a.ComicID = b.ComicID WHERE IssueID=?', [IssueID]).fetchone()
if chkthis is None:
chkthis = myDB.selectone('SELECT ComicID, ComicName, year as ComicYear, IssueID, IssueNumber as Issue_number, weeknumber, year from oneoffhistory WHERE IssueID=?', [IssueID]).fetchone()
if chkthis is None:
logger.warn('Unable to locate previous snatch details (checked issues/annuals/one-offs). Retrying the snatch for this issue is unavailable.')
continue
else:
logger.fdebug('Successfully located issue as a one-off download initiated via pull-list. Let\'s do this....')
oneoff = True
modcomicname = chkthis['ComicName']
else:
modcomicname = chkthis['ComicName'] + ' Annual'
if oneoff is True:
weekchk = helpers.weekly_info(chkthis['weeknumber'], chkthis['year'])
IssueDate = weekchk['midweek']
ComicVersion = None
else:
IssueDate = chkthis['IssueDate']
ComicVersion = chkthis['ComicVersion']
comicinfo = []
comicinfo.append({"ComicName": chkthis['ComicName'],
"ComicVolume": chkthis['ComicVersion'],
"ComicVolume": ComicVersion,
"IssueNumber": chkthis['Issue_Number'],
"comyear": chkthis['ComicYear'],
"IssueDate": chkthis['IssueDate'],
"IssueDate": IssueDate,
"pack": False,
"modcomicname": modcomicname,
"oneoff": False})
"oneoff": oneoff})
newznabinfo = None