FIX: When alt_pull 2 method was offline, would fail to drop-down to alt_pull 0/1 method and continue checking for new releases, FIX:(#1402) Fixed adding directly to uTorrent client, FIX: Added some exception traps for when Wanted tab is inaccessible, FIX: Updating public tracker demonoid to use new url for rss feeds, FIX: should now account for unicode issue numbers when filechecking, FIX: Attempt to improve unicode handling when importing, FIX: When importing a series, would not flip into Manual Intevention status when more than one search result came back in some instances

This commit is contained in:
evilhero 2016-10-05 10:12:58 -04:00
parent d57741515e
commit e971620b43
12 changed files with 301 additions and 300 deletions

View File

@ -0,0 +1,24 @@
<%inherit file="base.html"/>
<%def name="headIncludes()">
</%def>
<%def name="body()">
<div id="paddingheader">
<h1 class="clearfix">${title}</h1>
</div>
</tbody>
<div>
<table><tr>
%for ti in torrent_info:
<td>Completed: ${ti['completed']}</td>
<td>Download: ${ti['download']}</td>
<td>Upload: ${ti['upload']}</td>
<td>Ratio: ${ti['ratio']}</td>
<td>Seedtime: ${ti['seedtime']}</td>
%endfor
</table>
</div>
</tbody>
</%def>

View File

@ -261,7 +261,7 @@ class PostProcessor(object):
alt_db = myDB.select("SELECT * FROM Comics WHERE AlternateSearch != 'None'")
if alt_db is not None:
for aldb in alt_db:
as_d = filechecker.FileChecker(AlternateSearch=aldb['AlternateSearch'].decode('utf-8'))
as_d = filechecker.FileChecker(AlternateSearch=helpers.conversion(aldb['AlternateSearch']))
as_dinfo = as_d.altcheck()
alt_list.append({'AS_Alt': as_dinfo['AS_Alt'],
'AS_Tuple': as_dinfo['AS_Tuple'],
@ -270,8 +270,8 @@ class PostProcessor(object):
manual_list = []
for fl in filelist['comiclist']:
as_d = filechecker.FileChecker()#watchcomic=fl['series_name'].decode('utf-8'))
as_dinfo = as_d.dynamic_replace(fl['series_name'])
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(helpers.conversion(fl['series_name']))
mod_seriesname = as_dinfo['mod_seriesname']
loopchk = []
for x in alt_list:
@ -279,6 +279,8 @@ class PostProcessor(object):
for ab in x['AS_Alt']:
tmp_ab = re.sub(' ', '', ab)
tmp_mod_seriesname = re.sub(' ', '', mod_seriesname)
logger.info(tmp_mod_seriesname)
logger.info(tmp_ab.lower)
if re.sub('\|', '', tmp_mod_seriesname.lower()).strip() == re.sub('\|', '', tmp_ab.lower()).strip():
if not any(re.sub('[\|\s]', '', cname.lower()) == x for x in loopchk):
loopchk.append(re.sub('[\|\s]', '', cname.lower()))
@ -494,9 +496,9 @@ class PostProcessor(object):
if datematch == 'True':
if watchmatch['sub']:
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], watchmatch['comicfilename'].decode('utf-8'))
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], helpers.conversion(watchmatch['comicfilename']))
else:
clocation = os.path.join(watchmatch['comiclocation'],watchmatch['comicfilename'].decode('utf-8'))
clocation = os.path.join(watchmatch['comiclocation'],helpers.conversion(watchmatch['comicfilename']))
manual_list.append({"ComicLocation": clocation,
"ComicID": cs['ComicID'],
"IssueID": issuechk['IssueID'],
@ -509,7 +511,7 @@ class PostProcessor(object):
logger.fdebug(module + '[NON-MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Incorrect series - not populating..continuing post-processing')
continue
#ccnt+=1
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Match verified for ' + fl['comicfilename'].decode('utf-8'))
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Match verified for ' + helpers.conversion(fl['comicfilename']))
break
logger.fdebug(module + ' There are ' + str(len(manual_list)) + ' files found that match on your watchlist, ' + str(int(filelist['comiccount'] - len(manual_list))) + ' do not match anything and will be ignored.')
@ -522,8 +524,8 @@ class PostProcessor(object):
#mod_seriesname = '%' + re.sub(' ', '%', fl['series_name']).strip() + '%'
#arc_series = myDB.select("SELECT * FROM readinglist WHERE ComicName LIKE?", [fl['series_name']]) # by StoryArcID")
as_d = filechecker.FileChecker(watchcomic=fl['series_name'].decode('utf-8'))
as_dinfo = as_d.dynamic_replace(fl['series_name'])
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(helpers.conversion(fl['series_name']))
mod_seriesname = as_dinfo['mod_seriesname']
arcloopchk = []
for x in alt_list:
@ -672,9 +674,9 @@ class PostProcessor(object):
passit = True
if passit == False:
if arcmatch['sub']:
clocation = os.path.join(arcmatch['comiclocation'], arcmatch['sub'], arcmatch['comicfilename'].decode('utf-8'))
clocation = os.path.join(arcmatch['comiclocation'], arcmatch['sub'], helpers.conversion(arcmatch['comicfilename']))
else:
clocation = os.path.join(arcmatch['comiclocation'], arcmatch['comicfilename'].decode('utf-8'))
clocation = os.path.join(arcmatch['comiclocation'], helpers.conversion(arcmatch['comicfilename']))
logger.info('[' + k + ' #' + issuechk['IssueNumber'] + '] MATCH: ' + clocation + ' / ' + str(issuechk['IssueID']) + ' / ' + str(v[i]['ArcValues']['IssueID']))
manual_arclist.append({"ComicLocation": clocation,
"ComicID": v[i]['WatchValues']['ComicID'],

View File

@ -116,6 +116,7 @@ DONATEBUTTON = True
PULLNEW = None
ALT_PULL = 0
PULBYFILE = None
LOCAL_IP = None
EXT_IP = None
@ -474,7 +475,7 @@ def initialize():
PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, BOXCAR_ENABLED, BOXCAR_ONSNATCH, BOXCAR_TOKEN, \
PUSHBULLET_ENABLED, PUSHBULLET_APIKEY, PUSHBULLET_DEVICEID, PUSHBULLET_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, FILE_OPTS, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, STORYARCDIR, COPY2ARCDIR, CVURL, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, ALT_PULL, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, SYNO_FIX, CHMOD_FILE, CHMOD_DIR, CHOWNER, CHGROUP, ANNUALS_ON, CV_ONLY, CV_ONETIMER, CURRENT_WEEKNUMBER, CURRENT_YEAR, PULL_REFRESH, WEEKFOLDER, WEEKFOLDER_LOC, UMASK
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, ALT_PULL, PULLBYFILE, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, SYNO_FIX, CHMOD_FILE, CHMOD_DIR, CHOWNER, CHGROUP, ANNUALS_ON, CV_ONLY, CV_ONETIMER, CURRENT_WEEKNUMBER, CURRENT_YEAR, PULL_REFRESH, WEEKFOLDER, WEEKFOLDER_LOC, UMASK
if __INITIALIZED__:
return False

View File

@ -376,29 +376,16 @@ class FileChecker(object):
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
'validcountchk': validcountchk})
try:
sf.decode('ascii')
except:
logger.fdebug('Unicode character detected: ' + sf)
if '\xbd' in sf: #.encode('utf-8'):
logger.fdebug('[SPECIAL-CHARACTER ISSUE] Possible issue # : ' + sf)
possible_issuenumbers.append({'number': sf,
'position': split_file.index(sf),
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
'validcountchk': validcountchk})
if '\xe2' in sf: #(maybe \u221e)
logger.fdebug('[SPECIAL-CHARACTER ISSUE] Possible issue # : ' + sf)
possible_issuenumbers.append({'number': sf,
'position': split_file.index(sf),
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
'validcountchk': validcountchk})
#if '\xbc' in sf:
# '0.25'
#if '\xbe' in sf::
# '0.75'
if sf == 'XCV':
# new 2016-09-19 \ attempt to check for XCV which replaces any unicode above
for x in list(wrds):
if x != '':
tmpissue_number = re.sub('XCV', x, split_file[split_file.index(sf)])
logger.info('[SPECIAL-CHARACTER ISSUE] Possible issue # : ' + tmpissue_number)
possible_issuenumbers.append({'number': sf,
'position': split_file.index(sf),
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
'validcountchk': validcountchk})
count = None
found = False
@ -676,7 +663,6 @@ class FileChecker(object):
else:
issue_number = possible_issuenumbers[0]['number']
issue_number_position = possible_issuenumbers[0]['position']
logger.fdebug('issue verified as : ' + issue_number)
if highest_series_pos > possible_issuenumbers[0]['position']: highest_series_pos = possible_issuenumbers[0]['position']
if issue_number:
@ -697,6 +683,16 @@ class FileChecker(object):
issue_number = fin_num
if highest_series_pos > fin_pos: highest_series_pos = fin_pos
#--- this is new - 2016-09-18 /account for unicode in issue number when issue number is not deteted above
logger.fdebug('issue_position: ' + str(issue_number_position))
if all([issue_number_position == highest_series_pos, 'XCV' in split_file, issue_number is None]):
for x in list(wrds):
if x != '':
issue_number = re.sub('XCV', x, split_file[issue_number_position-1])
highest_series_pos -=1
issue_number_position -=1
logger.fdebug('issue verified as : ' + issue_number)
issue_volume = None
if len(volume_found) > 0:
issue_volume = 'v' + str(volume_found['volume'])
@ -759,7 +755,7 @@ class FileChecker(object):
logger.fdebug('series title possibly: ' + series_name)
#if the filename is unicoded, it won't match due to the unicode translation. Keep the unicode as well as the decoded.
series_name_decoded= unicodedata.normalize('NFKD', series_name.decode('utf-8')).encode('ASCII', 'ignore')
series_name_decoded= unicodedata.normalize('NFKD', helpers.conversion(series_name)).encode('ASCII', 'ignore')
#check for annual in title(s) here.
if mylar.ANNUALS_ON:

View File

@ -1918,7 +1918,7 @@ def torrent_create(site, linkid, alt=None):
else:
url = 'http://torrentproject.se/torrent/' + str(linkid) + '.torrent'
elif site == 'DEM':
url = 'https://www.demonoid.pw/files/download/' + str(linkid) + '/'
url = 'https://www.demonoid.cc/files/download/' + str(linkid) + '/'
elif site == 'WWT':
url = 'https://worldwidetorrents.eu/download.php'
@ -1955,7 +1955,7 @@ def parse_32pfeed(rssfeedline):
return KEYS_32P
def humanize_time(self, amount, units = 'seconds'):
def humanize_time(amount, units = 'seconds'):
def process_time(amount, units):
@ -2095,6 +2095,13 @@ def issue_find_ids(ComicName, ComicID, pack, IssueNumber):
issues['valid'] = valid
return issues
def conversion(value):
if type(value) == str:
try:
value = value.decode('utf-8')
except:
value = value.decode('windows-1252')
return value
#def file_ops(path,dst):
# # path = source path + filename

View File

@ -41,7 +41,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
if not os.path.isdir(dir):
logger.warn('Cannot find directory: %s. Not scanning' % dir.decode(mylar.SYS_ENCODING, 'replace'))
return
return "Fail"
logger.info('Scanning comic directory: %s' % dir.decode(mylar.SYS_ENCODING, 'replace'))
@ -421,8 +421,8 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
logger.fdebug('[' + mod_series + '] Adding to the import-queue!')
isd = filechecker.FileChecker(watchcomic=mod_series.decode('utf-8'))
is_dyninfo = isd.dynamic_replace(mod_series)
isd = filechecker.FileChecker()
is_dyninfo = isd.dynamic_replace(helpers.conversion(mod_series))
logger.fdebug('Dynamic-ComicName: ' + is_dyninfo['mod_seriesname'])
#impid = dispname + '-' + str(result_comyear) + '-' + str(comiss) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
@ -464,7 +464,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
"issuenumber": issuenumber, #issuenumber,
"volume": issuevolume,
"comfilename": comfilename,
"comlocation": comlocation.decode(mylar.SYS_ENCODING)
"comlocation": helpers.conversion(comlocation)
})
cnt+=1
#logger.fdebug('import_by_ids: ' + str(import_by_comicids))
@ -588,9 +588,16 @@ def scanLibrary(scan=None, queue=None):
except Exception, e:
logger.error('[IMPORT] Unable to complete the scan: %s' % e)
mylar.IMPORT_STATUS = None
return
valreturn.append({"somevalue": 'self.ie',
"result": 'error'})
return queue.put(valreturn)
if soma == "Completed":
logger.info('[IMPORT] Sucessfully completed import.')
elif soma == "Fail":
mylar.IMPORT_STATUS = 'Failure'
valreturn.append({"somevalue": 'self.ie',
"result": 'error'})
return queue.put(valreturn)
else:
mylar.IMPORT_STATUS = 'Now adding the completed results to the DB.'
logger.info('[IMPORT] Parsing/Reading of files completed!')
@ -613,55 +620,37 @@ def scanLibrary(scan=None, queue=None):
#these all have related ComicID/IssueID's...just add them as is.
controlValue = {"impID": ghi['impid']}
newValue = {"Status": "Not Imported",
"ComicName": i['ComicName'],
"DisplayName": i['ComicName'],
"DynamicName": nspace_dynamicname,
"ComicName": helpers.conversion(i['ComicName']),
"DisplayName": helpers.conversion(i['ComicName']),
"DynamicName": helpers.conversion(nspace_dynamicname),
"ComicID": i['ComicID'],
"IssueID": i['IssueID'],
"IssueNumber": i['Issue_Number'],
"IssueNumber": helpers.conversion(i['Issue_Number']),
"Volume": ghi['volume'],
"ComicYear": ghi['comicyear'],
"ComicFilename": ghi['comfilename'].decode('utf-8'),
"ComicLocation": ghi['comlocation'],
"ComicFilename": helpers.conversion(ghi['comfilename']),
"ComicLocation": helpres.conversion(ghi['comlocation']),
"ImportDate": helpers.today(),
"WatchMatch": None} #i['watchmatch']}
myDB.upsert("importresults", newValue, controlValue)
if int(soma['import_count']) > 0:
for ss in soma['import_by_comicids']:
if type(ss['issuenumber']) == str:
try:
theissuenumber = ss['issuenumber'].decode('utf-8')
except:
theissuenumber = ss['issuenumber'].decode('windows-1252').encode('utf-8')#mylar.SYS_ENCODING)
theissuenumber = unicode(theissuenumber, mylar.SYS_ENCODING)
else:
theissuenumber = ss['issuenumber']
thefilename = ss['comfilename']
thelocation = ss['comlocation']
if type(ss['comfilename']) != unicode:
thefilename = thefilename.decode('utf-8')
if type(ss['comlocation']) != unicode:
thelocation = thelocation.decode('utf-8')
nspace_dynamicname = re.sub('[\|\s]', '', ss['dynamicname'].lower()).strip()
if type(nspace_dynamicname) != unicode:
nspace_dynamicname = nspace_dynamicname.decode('utf-8')
controlValue = {"impID": ss['impid']}
newValue = {"ComicYear": ss['comicyear'],
"Status": "Not Imported",
"ComicName": ss['comicname'].decode('utf-8'),
"DisplayName": ss['displayname'].decode('utf-8'),
"DynamicName": nspace_dynamicname,
"ComicName": helpers.conversion(ss['comicname']),
"DisplayName": helpers.conversion(ss['displayname']),
"DynamicName": helpers.conversion(nspace_dynamicname),
"ComicID": ss['comicid'], #if it's been scanned in for cvinfo, this will be the CID - otherwise it's None
"IssueID": None,
"Volume": ss['volume'],
"IssueNumber": theissuenumber,
"ComicFilename": thefilename,#.decode('utf-8'), #ss['comfilename'].encode('utf-8'),
"ComicLocation": thelocation,
"IssueNumber": helpers.conversion(ss['issuenumber']),
"ComicFilename": helpers.conversion(ss['comfilename']),
"ComicLocation": helpers.conversion(ss['comlocation']),
"ImportDate": helpers.today(),
"WatchMatch": ss['watchmatch']}
myDB.upsert("importresults", newValue, controlValue)

View File

@ -110,10 +110,10 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
continue
return
elif pickfeed == "5" and srchterm is not None: # demonoid search / non-RSS
feed = 'https://www.demonoid.pw/' + "files/?category=10&subcategory=All&language=0&seeded=2&external=2&query=" + str(srchterm) + "&uid=0&out=rss"
feed = 'https://www.demonoid.cc/' + "files/?category=10&subcategory=All&language=0&seeded=2&external=2&query=" + str(srchterm) + "&uid=0&out=rss"
verify = bool(mylar.TPSE_VERIFY)
elif pickfeed == "6": # demonoid rss feed
feed = 'https://www.demonoid.pw/rss/10.xml'
feed = 'https://www.demonoid.cc/rss/10.xml'
feedtype = ' from the New Releases RSS Feed from Demonoid'
verify = bool(mylar.TPSE_VERIFY)
elif pickfeed == "999": #WWT rss feed
@ -853,9 +853,9 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
url = helpers.torrent_create('DEM', linkit)
if url.startswith('https'):
dem_referrer = 'https://www.demonoid.pw/files/download/'
dem_referrer = 'https://www.demonoid.cc/files/download/'
else:
dem_referrer = 'http://www.demonoid.pw/files/download/'
dem_referrer = 'http://www.demonoid.cc/files/download/'
headers = {'Accept-encoding': 'gzip',
'User-Agent': str(mylar.USER_AGENT),
@ -980,16 +980,10 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
logger.fdebug('[' + site + '] Saved torrent file to : ' + filepath)
if mylar.USE_UTORRENT:
utorrent.addTorrent(url)
if mylar.UTORRENT_LABEL:
torfile = open(filepath, 'rb')
tordata = torfile.read()
torfile.close()
hash = utorrent.calculate_torrent_hash(url, tordata)
time.sleep(10)
utorrent.labelTorrent(hash)
return "pass"
uTC = utorrent.utorrentclient()
resp = uTC.addfile(filepath, filename)
return resp #resp = pass / fail
elif mylar.USE_WATCHDIR:
if mylar.TORRENT_LOCAL:
return "pass"

View File

@ -1,3 +1,18 @@
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import re
@ -25,13 +40,20 @@ class RTorrent(object):
logger.error('could not connect to %s, exiting', mylar.RTORRENT_HOST)
sys.exit(-1)
def main(self, torrent_hash=None, filepath=None):
def main(self, torrent_hash=None, filepath=None, check=False):
torrent = self.client.find_torrent(torrent_hash)
if torrent:
logger.warn("%s Torrent already exists. Not downloading at this time.", torrent_hash)
return
if check:
logger.info('Successfully located torrent %s by hash on client. Detailed statistics to follow', torrent_hash)
else:
logger.warn("%s Torrent already exists. Not downloading at this time.", torrent_hash)
return
else:
if check:
logger.warn('Unable to locate torrent with a hash value of %s', torrent_hash)
return
if filepath:
loadit = self.client.load_torrent(filepath)
if loadit:
@ -45,6 +67,9 @@ class RTorrent(object):
sys.exit(-1)
torrent_info = self.client.get_torrent(torrent)
if check:
return torrent_info
if torrent_info['completed']:
logger.info("Directory: %s", torrent_info['folder'])
logger.info("Name: %s", torrent_info['name'])
@ -59,7 +84,7 @@ class RTorrent(object):
if torrent_info['label']:
logger.info("Torrent Label: %s", torrent_info['label'])
logger.info(torrent_info)
#logger.info(torrent_info)
return torrent_info
def get_the_hash(self, filepath):

View File

@ -308,7 +308,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
else:
if CV_EXcomicid['variloop'] == '99':
mismatch = "yes"
if mylar.ALT_PULL != 2:
if mylar.ALT_PULL != 2 or mylar.PULLBYFILE is True:
lastupdatechk = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
if lastupdatechk is None:
pullupd = "yes"
@ -352,7 +352,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
if issuechk is None:
if futurepull is None:
og_status = None
if mylar.ALT_PULL != 2:
if mylar.ALT_PULL != 2 or mylar.PULLBYFILE is True:
logger.fdebug(adjComicName + ' Issue: ' + str(IssueNumber) + ' not present in listings to mark for download...updating comic and adding to Upcoming Wanted Releases.')
# we need to either decrease the total issue count, OR indicate that an issue is upcoming.
upco_results = myDB.select("SELECT COUNT(*) FROM UPCOMING WHERE ComicID=?", [ComicID])
@ -565,7 +565,7 @@ def weekly_update(ComicName, IssueNumber, CStatus, CID, weeknumber, year, altiss
def newpullcheck(ComicName, ComicID, issue=None):
# When adding a new comic, let's check for new issues on this week's pullist and update.
if mylar.ALT_PULL != '2':
if mylar.ALT_PULL != 2 or mylar.PULLBYFILE is True:
mylar.weeklypull.pullitcheck(comic1off_name=ComicName, comic1off_id=ComicID, issue=issue)
else:
mylar.weeklypull.new_pullcheck(weeknumber=mylar.CURRENT_WEEKNUMBER, pullyear=mylar.CURRENT_YEAR, comic1off_name=ComicName, comic1off_id=ComicID, issue=issue)
@ -1028,7 +1028,7 @@ def forceRescan(ComicID, archive=None, module=None):
logger.fdebug(module + ' Matched...issue: ' + rescan['ComicName'] + '#' + reiss['Issue_Number'] + ' --- ' + str(int_iss))
havefiles+=1
haveissue = "yes"
isslocation = tmpfc['ComicFilename'].decode('utf-8')
isslocation = helpers.conversion(tmpfc['ComicFilename'])
issSize = str(tmpfc['ComicSize'])
logger.fdebug(module + ' .......filename: ' + isslocation)
logger.fdebug(module + ' .......filesize: ' + str(tmpfc['ComicSize']))
@ -1166,7 +1166,7 @@ def forceRescan(ComicID, archive=None, module=None):
logger.fdebug(module + ' Matched...annual issue: ' + rescan['ComicName'] + '#' + str(reann['Issue_Number']) + ' --- ' + str(int_iss))
havefiles+=1
haveissue = "yes"
isslocation = tmpfc['ComicFilename'].decode('utf-8')
isslocation = helpers.conversion(tmpfc['ComicFilename'])
issSize = str(tmpfc['ComicSize'])
logger.fdebug(module + ' .......filename: ' + isslocation)
logger.fdebug(module + ' .......filesize: ' + str(tmpfc['ComicSize']))

View File

@ -1,29 +1,33 @@
# This file is part of Mylar and is adapted from Headphones.
import hashlib
import urllib
import json
import time
from collections import namedtuple
import urllib2
import urlparse
import cookielib
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import mylar
from mylar import logger
import requests
from bencode import bencode, bdecode
from hashlib import sha1
from cStringIO import StringIO
import mylar
from mylar import logger, upload
class utorrentclient(object):
TOKEN_REGEX = "<div id='token' style='display:none;'>([^<>]+)</div>"
UTSetting = namedtuple("UTSetting", ["name", "int", "str", "access"])
def __init__(self, base_url=None, username=None, password=None, ):
def __init__(self):
host = mylar.UTORRENT_HOST
host = mylar.UTORRENT_HOST #has to be in the format of URL:PORT
if not host.startswith('http'):
host = 'http://' + host
@ -36,183 +40,111 @@ class utorrentclient(object):
self.base_url = host
self.username = mylar.UTORRENT_USERNAME
self.password = mylar.UTORRENT_PASSWORD
self.opener = self._make_opener('uTorrent', self.base_url, self.username, self.password)
self.token = self._get_token()
# TODO refresh token, when necessary
def _make_opener(self, realm, base_url, username, password):
"""uTorrent API need HTTP Basic Auth and cookie support for token verify."""
auth = urllib2.HTTPBasicAuthHandler()
auth.add_password(realm=realm, uri=base_url, user=username, passwd=password)
opener = urllib2.build_opener(auth)
urllib2.install_opener(opener)
cookie_jar = cookielib.CookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
handlers = [auth, cookie_handler]
opener = urllib2.build_opener(*handlers)
return opener
self.utorrent_url = '%s/gui/' % (self.base_url)
self.auth = requests.auth.HTTPBasicAuth(self.username, self.password)
self.token, self.cookies = self._get_token()
def _get_token(self):
url = urlparse.urljoin(self.base_url, 'gui/token.html')
TOKEN_REGEX = r'<div[^>]*id=[\"\']token[\"\'][^>]*>([^<]*)</div>'
utorrent_url_token = '%stoken.html' % self.utorrent_url
try:
response = self.opener.open(url)
except urllib2.HTTPError as err:
logger.debug('URL: ' + str(url))
r = requests.get(utorrent_url_token, auth=self.auth)
except requests.exceptions.RequestException as err:
logger.debug('URL: ' + str(utorrent_url_token))
logger.debug('Error getting Token. uTorrent responded with error: ' + str(err))
return
match = re.search(utorrentclient.TOKEN_REGEX, response.read())
return match.group(1)
return 'fail'
def list(self, **kwargs):
params = [('list', '1')]
params += kwargs.items()
return self._action(params)
def add_url(self, url):
# can receive magnet or normal .torrent link
params = [('action', 'add-url'), ('s', url)]
return self._action(params)
def start(self, *hashes):
params = [('action', 'start'), ]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def stop(self, *hashes):
params = [('action', 'stop'), ]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def pause(self, *hashes):
params = [('action', 'pause'), ]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def forcestart(self, *hashes):
params = [('action', 'forcestart'), ]
for hash in hashes:
params.append(('hash', hash))
return self._action(params)
def getfiles(self, hash):
params = [('action', 'getfiles'), ('hash', hash)]
return self._action(params)
def getprops(self, hash):
params = [('action', 'getprops'), ('hash', hash)]
return self._action(params)
def setprops(self, hash, s, val):
params = [('action', 'setprops'), ('hash', hash), ("s", s), ("v", val)]
logger.debug('Params: ' + str(params))
return self._action(params)
def setprio(self, hash, priority, *files):
params = [('action', 'setprio'), ('hash', hash), ('p', str(priority))]
for file_index in files:
params.append(('f', str(file_index)))
return self._action(params)
def get_settings(self, key=None):
params = [('action', 'getsettings'), ]
status, value = self._action(params)
settings = {}
for args in value['settings']:
settings[args[0]] = self.UTSetting(*args)
if key:
return settings[key]
return settings
def remove(self, hash, remove_data=False):
if remove_data:
params = [('action', 'removedata'), ('hash', hash)]
else:
params = [('action', 'remove'), ('hash', hash)]
return self._action(params)
def _action(self, params, body=None, content_type=None):
if not self.token:
return
url = self.base_url + '/gui/' + '?token=' + self.token + '&' + urllib.urlencode(params)
request = urllib2.Request(url)
if body:
request.add_data(body)
request.add_header('Content-length', len(body))
if content_type:
request.add_header('Content-type', content_type)
token = re.search(TOKEN_REGEX, r.text).group(1)
guid = r.cookies['GUID']
cookies = dict(GUID = guid)
return token, cookies
def addfile(self, filepath=None, filename=None, bytes=None):
params = {'action': 'add-file', 'token': self.token}
try:
response = self.opener.open(request)
return response.code, json.loads(response.read())
except urllib2.HTTPError as err:
logger.debug('URL: ' + str(url))
logger.debug('uTorrent webUI raised the following error: ' + str(err))
d = open(filepath, 'rb')
tordata = d.read()
d.close()
except:
logger.warn('Unable to load torrent file. Aborting at this time.')
return 'fail'
files = {'torrent_file': tordata}
try:
r = requests.post(url=self.utorrent_url, auth=self.auth, cookies=self.cookies, params=params, files=files)
except requests.exceptions.RequestException as err:
logger.debug('URL: ' + str(self.utorrent_url))
logger.debug('Error sending to uTorrent Client. uTorrent responded with error: ' + str(err))
return 'fail'
def labelTorrent(hash):
label = mylar.UTORRENT_LABEL
uTorrentClient = utorrentclient()
if label:
uTorrentClient.setprops(hash, 'label', str(label))
# (to-do) verify the hash in order to ensure it's loaded here
if str(r.status_code) == '200':
logger.info('Successfully added torrent to uTorrent client.')
if mylar.UTORRENT_LABEL:
try:
hash = self.calculate_torrent_hash(data=tordata)
self.setlabel(hash)
except:
logger.warn('Unable to set label for torrent.')
return 'pass'
else:
return 'fail'
def removeTorrent(hash, remove_data=False):
uTorrentClient = utorrentclient()
status, torrentList = uTorrentClient.list()
torrents = torrentList['torrents']
for torrent in torrents:
if torrent[0].upper() == hash.upper():
if torrent[21] == 'Finished':
logger.info('%s has finished seeding, removing torrent and data' % torrent[2])
uTorrentClient.remove(hash, remove_data)
return True
def setlabel(self, hash):
params = {'token': self.token, 'action': 'setprops', 'hash': hash, 's': 'label', 'v': str(mylar.UTORRENT_LABEL)}
r = requests.post(url=self.utorrent_url, auth=self.auth, cookies=self.cookies, params=params)
if str(r.status_code) == '200':
logger.info('label ' + str(mylar.UTORRENT_LABEL) + ' successfully applied')
else:
logger.info('Unable to label torrent')
return
def calculate_torrent_hash(link=None, filepath=None, data=None):
thehash = None
if not link:
if filepath:
torrent_file = open(filepath, "rb")
metainfo = bdecode(torrent_file.read())
else:
logger.info(
'%s has not finished seeding yet, torrent will not be removed, will try again on next run' %
torrent[2])
return False
return False
metainfo = bdecode(data)
info = metainfo['info']
thehash = hashlib.sha1(bencode(info)).hexdigest().upper()
logger.info('Hash: ' + thehash)
else:
if link.startswith("magnet:"):
torrent_hash = re.findall("urn:btih:([\w]{32,40})", link)[0]
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)).lower()
thehash = torrent_hash.upper()
if thehash is None:
logger.warn('Cannot calculate torrent hash without magnet link or data')
def setSeedRatio(hash, ratio):
uTorrentClient = utorrentclient()
uTorrentClient.setprops(hash, 'seed_override', '1')
if ratio != 0:
uTorrentClient.setprops(hash, 'seed_ratio', ratio * 10)
else:
# TODO passing -1 should be unlimited
uTorrentClient.setprops(hash, 'seed_ratio', -10)
return thehash
def addTorrent(link):
uTorrentClient = utorrentclient()
uTorrentClient.add_url(link)
# not implemented yet #
# def load_torrent(self, filepath):
# start = bool(mylar.UTORRENT_STARTONLOAD)
# logger.info('filepath to torrent file set to : ' + filepath)
#
# torrent = self.addfile(filepath, verify_load=True)
#torrent should return the hash if it's valid and loaded (verify_load checks)
# if not torrent:
# return False
def calculate_torrent_hash(link, data=None):
"""
Calculate the torrent hash from a magnet link or data. Raises a ValueError
when it cannot create a torrent hash given the input data.
"""
# if mylar.UTORRENT_LABEL:
# self.setlabel(torrent)
# logger.info('Setting label for torrent to : ' + mylar.UTORRENT_LABEL)
if link.startswith("magnet:"):
torrent_hash = re.findall("urn:btih:([\w]{32,40})", link)[0]
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)).lower()
elif data:
info = bdecode(data)["info"]
torrent_hash = sha1(bencode(info)).hexdigest()
else:
raise ValueError("Cannot calculate torrent hash without magnet link " \
"or data")
logger.debug("Torrent hash: " + torrent_hash)
return torrent_hash.upper()
# logger.info('Successfully loaded torrent.')
# #note that if set_directory is enabled, the torrent has to be started AFTER it's loaded or else it will give chunk errors and not seed
# if start:
# logger.info('[' + str(start) + '] Now starting torrent.')
# torrent.start()
# else:
# logger.info('[' + str(start) + '] Not starting torrent due to configuration setting.')
# return True

View File

@ -1763,7 +1763,6 @@ class WebInterface(object):
filterpull.exposed = True
def manualpull(self,weeknumber=None,year=None):
from mylar import weeklypull
if weeknumber:
#threading.Thread(target=mylar.locg.locg,args=[None,weeknumber,year]).start()
mylar.locg.locg(weeknumber=weeknumber,year=year)
@ -1774,7 +1773,6 @@ class WebInterface(object):
manualpull.exposed = True
def pullrecreate(self):
from mylar import weeklypull
myDB = db.DBConnection()
myDB.action("DROP TABLE weekly")
mylar.dbcheck()
@ -1842,13 +1840,16 @@ class WebInterface(object):
# if int(tmpdate) >= int(timenow) and int(tmpdate) == int(pulldate): #int(pulldate) <= int(timenow):
mylar.WANTED_TAB_OFF = False
try:
ab = upc['weeknumber']
bc = upc['year']
ab = int(upc['weeknumber'])
bc = int(upc['year'])
except TypeError:
logger.warn('Weekly Pull hasn\'t finished being generated as of yet (or has yet to initialize). Try to wait a few seconds more to accomodate processing.')
logger.warn('Weekly Pull hasn\'t finished being generated as of yet (or has yet to initialize). Try to wait up to a minute to accomodate processing.')
mylar.WANTED_TAB_OFF = True
return
myDB.action("DROP TABLE weekly")
mylar.dbcheck()
logger.info("Deleted existed pull-list data. Recreating Pull-list...")
forcecheck = 'yes'
return threading.Thread(target=weeklypull.pullit, args=[forcecheck]).start()
if int(upc['weeknumber']) == int(weeknumber) and int(upc['year']) == int(weekyear):
if upc['Status'] == 'Wanted':
@ -3216,6 +3217,10 @@ class WebInterface(object):
mylar.IMPORTBUTTON = True #globally set it to ON after the scan so that it will be picked up.
mylar.IMPORT_STATUS = 'Import completed.'
break
else:
yield ckh[0]['result']
mylar.IMPORTBUTTON = False
break
return
ThreadcomicScan.exposed = True
@ -3254,10 +3259,8 @@ class WebInterface(object):
importResults.exposed = True
def ImportFilelisting(self, comicname, dynamicname, volume):
if type(comicname) != unicode:
comicname = urllib.unquote(comicname).decode('utf-8')
if type(dynamicname) != unicode:
dynamicname = urllib.unquote(dynamicname).decode('utf-8')
comicname = urllib.unquote_plus(helpers.econversion(comicname))
dynamicname = helpers.econversion(urllib.unquote_plus(dynamicname)) #urllib.unquote(dynamicname).decode('utf-8')
myDB = db.DBConnection()
if volume is None or volume == 'None':
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume IS NULL",[dynamicname])
@ -3269,9 +3272,9 @@ class WebInterface(object):
filelisting = '<table width="500"><tr><td>'
filelisting += '<center><b>Files that have been scanned in for:</b></center>'
if volume is None or volume == 'None':
filelisting += '<center><b>' + re.sub('\+', ' ', comicname) + '</b></center></td></tr><tr><td>'
filelisting += '<center><b>' + comicname + '</b></center></td></tr><tr><td>'
else:
filelisting += '<center><b>' + re.sub('\+', ' ', comicname) + ' [' + str(volume) + ']</b></center></td></tr><tr><td>'
filelisting += '<center><b>' + comicname + ' [' + str(volume) + ']</b></center></td></tr><tr><td>'
#filelisting += '<div style="height:300px;overflow:scroll;overflow-x:hidden;">'
filelisting += '<div style="display:inline-block;overflow-y:auto:overflow-x:hidden;">'
cnt = 0
@ -3601,7 +3604,7 @@ class WebInterface(object):
if volume is None or volume == 'None':
ctrlVal = {"DynamicName": DynamicName,
"ComicName": ComicName}
"ComicName": ComicName}
else:
ctrlVal = {"DynamicName": DynamicName,
"ComicName": ComicName,
@ -3682,6 +3685,10 @@ class WebInterface(object):
self.addbyid(sr['comicid'], calledby=True, imported=imported, ogcname=ogcname) #imported=yes)
else:
logger.info('[IMPORT] There is more than one result that might be valid - normally this is due to the filename(s) not having enough information for me to use (ie. no volume label/year). Manual intervention is required.')
#force the status here just in case
newVal = {'SRID': SRID,
'Status': 'Manual Intervention'}
myDB.upsert("importresults", newVal, ctrlVal)
mylar.IMPORTLOCK = False
logger.info('[IMPORT] Initial Import complete (I might still be populating the series data).')
@ -4842,22 +4849,42 @@ class WebInterface(object):
return
else:
rp = test.RTorrent()
torrent_info = rp.main(torrent_hash)
torrent_info = rp.main(torrent_hash, check=True)
if torrent_info:
torrent_name = torrent_info['name']
torrent_info['filesize'] = helpers.human_size(torrent_info['total_filesize'])
torrent_info['download'] = helpers.human_size(torrent_info['download_total'])
torrent_info['upload'] = helpers.human_size(torrent_info['upload_total'])
torrent_info['seedtime'] = helpers.humanize_time(amount=int(time.time()) - torrent_info['time_started'])
if torrent_info['completed']:
logger.info("Client: %s", mylar.RTORRENT_HOST)
logger.info("Directory: %s", torrent_info['folder'])
logger.info("Name: %s", torrent_info['name'])
logger.info("Hash: %s", torrent_info['hash'])
logger.info("FileSize: %s", helpers.human_size(torrent_info['total_filesize']))
logger.info("FileSize: %s", torrent_info['filesize'])
logger.info("Completed: %s", torrent_info['completed'])
logger.info("Downloaded: %s", helpers.human_size(torrent_info['download_total']))
logger.info("Uploaded: %s", helpers.human_size(torrent_info['upload_total']))
logger.info("Downloaded: %s", torrent_info['download'])
logger.info("Uploaded: %s", torrent_info['upload'])
logger.info("Ratio: %s", torrent_info['ratio'])
logger.info("Seeding Time: %s", torrent_info['seedtime'])
if torrent_info['label']:
logger.info("Torrent Label: %s", torrent_info['label'])
ti = '<table><tr><td>'
ti += '<center><b>' + torrent_name + '</b></center></br>'
ti += '<center>' + torrent_info['hash'] + '</center>'
ti += '<tr><td><center>Filesize: ' + torrent_info['filesize'] + '</center></td></tr>'
ti += '<tr><td><center>' + torrent_info['download'] + ' DOWN / ' + torrent_info['upload'] + ' UP</center></td></tr>'
ti += '<tr><td><center>Ratio: ' + str(torrent_info['ratio']) + '</center></td></tr>'
ti += '<tr><td><center>Seedtime: ' + torrent_info['seedtime'] + '</center></td</tr>'
ti += '</table>'
else:
torrent_name = 'Not Found'
ti = 'Torrent not found (' + str(torrent_hash)
return ti
torrentit.exposed = True
def get_the_hash(self, filepath):
@ -4877,5 +4904,6 @@ class WebInterface(object):
import auth32p
p = auth32p.info32p(test=True)
rtnvalues = p.authenticate()
logger.info('32p return values: ' + str(rtnvalues))
return rtnvalues
test_32p.exposed = True

View File

@ -57,6 +57,7 @@ def pullit(forcecheck=None):
#PULLURL = 'http://www.previewsworld.com/shipping/prevues/newreleases.txt'
PULLURL = 'http://www.previewsworld.com/shipping/newreleases.txt'
newrl = os.path.join(mylar.CACHE_DIR, 'newreleases.txt')
mylar.PULLBYFILE = None
if mylar.ALT_PULL == 1:
#logger.info('[PULL-LIST] The Alt-Pull method is currently broken. Defaulting back to the normal method of grabbing the pull-list.')
@ -75,10 +76,12 @@ def pullit(forcecheck=None):
else:
logger.info('[PULL-LIST] Unable to retrieve weekly pull-list. Dropping down to legacy method of PW-file')
f= urllib.urlretrieve(PULLURL, newrl)
urllib.urlretrieve(PULLURL, newrl)
mylar.PULLBYFILE = True
else:
logger.info('[PULL-LIST] Populating & Loading pull-list data from file')
f = urllib.urlretrieve(PULLURL, newrl)
urllib.urlretrieve(PULLURL, newrl)
mylar.PULLBYFILE = True
#set newrl to a manual file to pull in against that particular file
#newrl = '/mylar/tmp/newreleases.txt'
@ -86,7 +89,7 @@ def pullit(forcecheck=None):
#newtxtfile header info ("SHIPDATE\tPUBLISHER\tISSUE\tCOMIC\tEXTRA\tSTATUS\n")
#STATUS denotes default status to be applied to pulllist in Mylar (default = Skipped)
if mylar.ALT_PULL != 2:
if mylar.ALT_PULL != 2 or mylar.PULLBYFILE is True:
newfl = os.path.join(mylar.CACHE_DIR, 'Clean-newreleases.txt')
newtxtfile = open(newfl, 'wb')
@ -386,7 +389,7 @@ def pullit(forcecheck=None):
dupefound = "no"
#-- remove html tags when alt_pull is enabled
if mylar.ALT_PULL:
if mylar.ALT_PULL == 1:
if '&amp;' in comicnm:
comicnm = re.sub('&amp;', '&', comicnm).strip()
if '&amp;' in pub:
@ -423,7 +426,7 @@ def pullit(forcecheck=None):
newtxtfile.close()
if pulldate == '00000000' and mylar.ALT_PULL != 2:
if all([pulldate == '00000000', mylar.ALT_PULL != 2]) or mylar.PULLBYFILE is True:
pulldate = shipdate
try:
@ -471,7 +474,7 @@ def pullit(forcecheck=None):
logger.info(u"Weekly Pull List successfully loaded.")
if mylar.ALT_PULL != 2:
if mylar.ALT_PULL != 2 or mylar.PULLBYFILE is True:
pullitcheck(forcecheck=forcecheck)
def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurepull=None, issue=None):