FIX:(#1222) Fixed usage with NZBHydra - will now be able to properly grab the nzb files, as well as proper logging and handling of Failed downloading, IMP: Duplicate Directory Dump option available in Configuration GUI. Any duplicates discovered during post-processing will be moved into this directory (if enabled) depending on the dupe constraints, FIX: Better handling of titles with '&' and '-' in the titles (as well as annuals) when adding series which should mean more accurate results when trying to add a series, FIX:(#1142) If files didn't have the pages field metadata within the comicinfo.xml file, would error out and either fail to display the issue, or fail to scan the issue during an import scan, FIX: When adding/refreshing a series, if the cover image from CV is unable to be retrieved or is not of an adequate size, fallback to a differnt quality image from CV, FIX: When refreshing/adding a series, annuals will only be checked against once (it was previously running through the entire annual check twice), FIX: During RSS scans/checks if a title in the results had an encoded & (&), would store it as the html which would never match up when doing actual comparison searches, IMP: Fixed usage of feedparser module in rss feeds so that it only uses the retrieved data and doesn't do the actual polling against the url (requests module now does), IMP: Added proper handling of error code 910 with dognzb (max api hits), so that once it hits the 910 error it will disable dognzb as a provider, FIX: When attempting to display issue details on a series detail page (the i icon in the issues table), if the metadata in the .cbz cannot be read or doesn't exist, will now display a graphical warning instead of a 500 error, IMP: Added fork/fork version/tag to comictagger user-agent/version, IMP: Removed configparser dependency from ComicTagger, FIX: When performing searches, improved volume label matching regardless of how the volume label is represented

This commit is contained in:
evilhero 2016-03-04 15:04:19 -05:00
parent b4f6d9a12a
commit 1e0b319d2b
19 changed files with 736 additions and 591 deletions

View File

@ -2,7 +2,8 @@ Mylar is an automated Comic Book (cbr/cbz) downloader program heavily-based on t
Yes, it does work, yes there are still bugs, and for that reson I still consider it the definition of an 'Alpha Release'.
This application requires a version of the 2.7.x Python branch for the best results (3.x is not supported)
-REQUIREMENTS-
- at least version 2.7.9 Python for proper usage (3.x is not supported).
** NEW **
You will need to get your OWN ComicVine API Key for this application to fully work.

View File

@ -706,16 +706,28 @@
</select>
</div>
</fieldset>
<fieldset>
<legend>Duplicate Dump Folder</legend>
<div class="row checkbox left clearfix">
<input type="checkbox" id="enable_ddump" onclick="initConfigCheckbox($this));" name="ddump" value="1" ${config['ddump']} /><label>Enable Duplicate Dump Folder</label>
</div>
<div class="config">
<div class="row"">
<label>Full path to move files determined to be duplicates</label>
<input type="text" name="duplicate_dump" value="${config['duplicate_dump']}" size="30">
</div>
</div>
</fieldset>
<fieldset>
<legend>Failed Download Handling</legend>
<div class="row checkbox left clearfix">
<input type="checkbox" id="enable_failed" onclick="initConfigCheckbox($this));" name="failed_download_handling" value="1" ${config['failed_download_handling']} /><label>Enable Failed Download Handling</label>
</div>
<div class="config">
<div class="row checkbox left clearfix">
<div class="checkbox left clearfix">
<input type="checkbox" name="failed_auto" value="1" ${config['failed_auto']} /><label>Enable Automatic-Retry for Failed Downloads</label>
</div>
</div>
</fieldset>
</td>
@ -1401,6 +1413,7 @@
initConfigCheckbox("#replace_spaces");
initConfigCheckbox("#use_minsize");
initConfigCheckbox("#use_maxsize");
initConfigCheckbox("#enable_ddump");
initConfigCheckbox("#enable_failed");
initConfigCheckbox("#enable_meta");
initConfigCheckbox("#zero_level");

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -107,7 +107,7 @@ class ComicVineTalker(QObject):
else:
self.api_key = ComicVineTalker.api_key
self.cv_headers = {'User-Agent': 'ComicTagger.[ninjas.walk.alone.fork] - UserAgent + CV Rate Limiting / 1.01 - KATANA'}
self.cv_headers = {'User-Agent': 'ComicTagger ' + str(ctversion.version) + ' [' + ctversion.fork + ' / ' + ctversion.fork_tag + ']'}
self.log_func = None
def setLogFunc( self , log_func ):
@ -449,8 +449,10 @@ class ComicVineTalker(QObject):
if settings.use_series_start_as_volume:
metadata.volume = volume_results['start_year']
metadata.notes = "Tagged with ComicTagger {0} using info from Comic Vine on {1}. [Issue ID {2}]".format(
metadata.notes = "Tagged with the {1} fork of ComicTagger {0} using info from Comic Vine on {3}. [Issue ID {4}]".format(
ctversion.version,
ctversion.fork,
ctversion.fork_tag,
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
issue_results['id'])
#metadata.notes += issue_results['site_detail_url']

View File

@ -1,3 +1,5 @@
# This file should contan only these comments, and the line below.
# Used by packaging makefiles and app
version="1.1.15-beta"
version="1.20.0"
fork="ninjas.walk.alone"
fork_tag="SHURIKEN"

View File

@ -318,13 +318,9 @@ For more help visit the wiki at: http://code.google.com/p/comictagger/
if o == "--only-set-cv-key":
self.only_set_key = True
if o == "--version":
print "ComicTagger {0}: Copyright (c) 2012-2014 Anthony Beville".format(ctversion.version)
print "ComicTagger {0} [{1} / {2}]".format(ctversion.version, ctversion.fork, ctversion.fork_tag)
print "Modified version of ComicTagger (Copyright (c) 2012-2014 Anthony Beville)"
print "Distributed under Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)"
new_version = VersionChecker().getLatestVersion("", False)
if new_version is not None and new_version != ctversion.version:
print "----------------------------------------"
print "New version available online: {0}".format(new_version)
print "----------------------------------------"
sys.exit(0)
if o in ("-t", "--type"):
if a.lower() == "cr":

10
lib/comictaggerlib/readme Normal file
View File

@ -0,0 +1,10 @@
ComicTagger.[ninjas.walk.alone]
Fork: Ninjas Walk Alone
Modified ComicTagger 1.15.beta to include some patches to allow for better integration with both ComicVine and mylar. These fixes are:
- UserAgent included now to allow for usage with ComicVine
- ComicVine Rate limiting is now on a per api request (1 api request / 2s)
- Removed requirement for configparser
- Changed queries to ComicVine to utilize the requests module

View File

@ -20,13 +20,20 @@ limitations under the License.
import os
import sys
import configparser
import platform
import codecs
import uuid
import utils
try:
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
if config_path not in sys.path:
sys.path.append(config_path)
from lib.configobj import ConfigObj
except ImportError:
print "Unable to use configobj module. This is a CRITICAL error and ComicTagger cannot proceed. Exiting."
class ComicTaggerSettings:
@staticmethod
@ -137,13 +144,14 @@ class ComicTaggerSettings:
self.folder = ""
self.setDefaultValues()
self.config = configparser.RawConfigParser()
#self.config = configparser.RawConfigParser()
self.folder = ComicTaggerSettings.getSettingsFolder()
if not os.path.exists( self.folder ):
os.makedirs( self.folder )
self.settings_file = os.path.join( self.folder, "settings")
self.settings_file = os.path.join( self.folder, "settings.ini")
self.CFG = ConfigObj(self.settings_file, encoding='utf-8')
# if config file doesn't exist, write one out
if not os.path.exists( self.settings_file ):
@ -182,204 +190,176 @@ class ComicTaggerSettings:
os.unlink( self.settings_file )
self.__init__()
def CheckSection(self, sec):
""" Check if INI section exists, if not create it """
try:
self.CFG[sec]
return True
except:
self.CFG[sec] = {}
return False
################################################################################
# Check_setting_int #
################################################################################
def check_setting_int(self, config, cfg_name, item_name, def_val):
try:
my_val = int(config[cfg_name][item_name])
except:
my_val = def_val
try:
config[cfg_name][item_name] = my_val
except:
config[cfg_name] = {}
config[cfg_name][item_name] = my_val
return my_val
################################################################################
# Check_setting_str #
################################################################################
def check_setting_str(self, config, cfg_name, item_name, def_val, log=True):
try:
my_val = config[cfg_name][item_name]
except:
my_val = def_val
try:
config[cfg_name][item_name] = my_val
except:
config[cfg_name] = {}
config[cfg_name][item_name] = my_val
return my_val
def load(self):
def readline_generator(f):
line = f.readline()
while line:
yield line
line = f.readline()
self.rar_exe_path = self.check_setting_str(self.CFG, 'settings', 'rar_exe_path', '')
self.unrar_exe_path = self.check_setting_str(self.CFG, 'settings', 'unurar_exe_path', '')
self.check_for_new_version = bool(self.check_setting_int(self.CFG, 'settings', 'check_for_new_version', 0))
self.send_usage_stats = bool(self.check_setting_int(self.CFG, 'settings', 'send_usage_stats', 0))
self.install_id = self.check_setting_str(self.CFG, 'auto', 'install_id', '')
self.last_selected_load_data_style = self.check_setting_str(self.CFG, 'auto', 'last_selected_load_data_style', '')
self.last_selected_save_data_style = self.check_setting_str(self.CFG, 'auto', 'last_selected_save_data_style', '')
self.last_selected_save_data_style = self.check_setting_str(self.CFG, 'auto', 'last_selected_save_data_style', '')
self.last_opened_folder = self.check_setting_str(self.CFG, 'auto', 'last_opened_folder', '')
self.last_main_window_width = self.check_setting_str(self.CFG, 'auto', 'last_main_window_width', '')
self.last_main_window_height = self.check_setting_str(self.CFG, 'auto', 'last_main_window_height', '')
self.last_form_side_width = self.check_setting_str(self.CFG, 'auto', 'last_form_side_width', '')
self.last_list_side_width = self.check_setting_str(self.CFG, 'auto', 'last_list_side_width', '')
self.last_filelist_sorted_column = self.check_setting_str(self.CFG, 'auto', 'last_filelist_sorted_column', '')
self.last_filelist_sorted_order = self.check_setting_str(self.CFG, 'auto', 'last_filelist_sorted_order', '')
self.last_main_window_x = self.check_setting_str(self.CFG, 'auto', 'last_main_window_x', '')
self.last_main_window_y = self.check_setting_str(self.CFG, 'auto', 'last_main_window_y','')
self.last_form_side_width = self.check_setting_str(self.CFG, 'auto', 'last_form_side_width','')
self.last_list_side_width = self.check_setting_str(self.CFG, 'auto', 'last_list_side_width','')
#self.config.readfp(codecs.open(self.settings_file, "r", "utf8"))
self.config.read_file(readline_generator(codecs.open(self.settings_file, "r", "utf8")))
self.rar_exe_path = self.config.get( 'settings', 'rar_exe_path' )
self.unrar_exe_path = self.config.get( 'settings', 'unrar_exe_path' )
if self.config.has_option('settings', 'check_for_new_version'):
self.check_for_new_version = self.config.getboolean( 'settings', 'check_for_new_version' )
if self.config.has_option('settings', 'send_usage_stats'):
self.send_usage_stats = self.config.getboolean( 'settings', 'send_usage_stats' )
if self.config.has_option('auto', 'install_id'):
self.install_id = self.config.get( 'auto', 'install_id' )
if self.config.has_option('auto', 'last_selected_load_data_style'):
self.last_selected_load_data_style = self.config.getint( 'auto', 'last_selected_load_data_style' )
if self.config.has_option('auto', 'last_selected_save_data_style'):
self.last_selected_save_data_style = self.config.getint( 'auto', 'last_selected_save_data_style' )
if self.config.has_option('auto', 'last_opened_folder'):
self.last_opened_folder = self.config.get( 'auto', 'last_opened_folder' )
if self.config.has_option('auto', 'last_main_window_width'):
self.last_main_window_width = self.config.getint( 'auto', 'last_main_window_width' )
if self.config.has_option('auto', 'last_main_window_height'):
self.last_main_window_height = self.config.getint( 'auto', 'last_main_window_height' )
if self.config.has_option('auto', 'last_main_window_x'):
self.last_main_window_x = self.config.getint( 'auto', 'last_main_window_x' )
if self.config.has_option('auto', 'last_main_window_y'):
self.last_main_window_y = self.config.getint( 'auto', 'last_main_window_y' )
if self.config.has_option('auto', 'last_form_side_width'):
self.last_form_side_width = self.config.getint( 'auto', 'last_form_side_width' )
if self.config.has_option('auto', 'last_list_side_width'):
self.last_list_side_width = self.config.getint( 'auto', 'last_list_side_width' )
if self.config.has_option('auto', 'last_filelist_sorted_column'):
self.last_filelist_sorted_column = self.config.getint( 'auto', 'last_filelist_sorted_column' )
if self.config.has_option('auto', 'last_filelist_sorted_order'):
self.last_filelist_sorted_order = self.config.getint( 'auto', 'last_filelist_sorted_order' )
self.id_length_delta_thresh = self.check_setting_str(self.CFG, 'identifier', 'id_length_delta_thresh', '')
self.id_publisher_blacklist = self.check_setting_str(self.CFG, 'identifier', 'id_publisher_blacklist', '')
if self.config.has_option('identifier', 'id_length_delta_thresh'):
self.id_length_delta_thresh = self.config.getint( 'identifier', 'id_length_delta_thresh' )
if self.config.has_option('identifier', 'id_publisher_blacklist'):
self.id_publisher_blacklist = self.config.get( 'identifier', 'id_publisher_blacklist' )
self.parse_scan_info = bool(self.check_setting_int(self.CFG, 'filenameparser', 'parse_scan_info', 0))
if self.config.has_option('filenameparser', 'parse_scan_info'):
self.parse_scan_info = self.config.getboolean( 'filenameparser', 'parse_scan_info' )
self.ask_about_cbi_in_rar = bool(self.check_setting_int(self.CFG, 'dialogflags', 'ask_about_cbi_in_rar', 0))
self.show_disclaimer = bool(self.check_setting_int(self.CFG, 'dialogflags', 'show_disclaimer', 0))
self.dont_notify_about_this_version = self.check_setting_str(self.CFG, 'dialogflags', 'dont_notify_about_this_version', '')
self.ask_about_usage_stats = bool(self.check_setting_int(self.CFG, 'dialogflags', 'ask_about_usage_stats', 0))
self.show_no_unrar_warning = bool(self.check_setting_int(self.CFG, 'dialogflags', 'show_no_unrar_warning', 0))
self.use_series_start_as_volume = bool(self.check_setting_int(self.CFG, 'comicvine', 'use_series_start_as_volume', 0))
self.clear_form_before_populating_from_cv = bool(self.check_setting_int(self.CFG, 'comicvine', 'clear_form_before_populating_from_cv', 0))
self.remove_html_tables = bool(self.check_setting_int(self.CFG, 'comicvine', 'remove_html_tables', 0))
self.cv_api_key = self.check_setting_str(self.CFG, 'comicvine', 'cv_api_key', '')
if self.config.has_option('dialogflags', 'ask_about_cbi_in_rar'):
self.ask_about_cbi_in_rar = self.config.getboolean( 'dialogflags', 'ask_about_cbi_in_rar' )
if self.config.has_option('dialogflags', 'show_disclaimer'):
self.show_disclaimer = self.config.getboolean( 'dialogflags', 'show_disclaimer' )
if self.config.has_option('dialogflags', 'dont_notify_about_this_version'):
self.dont_notify_about_this_version = self.config.get( 'dialogflags', 'dont_notify_about_this_version' )
if self.config.has_option('dialogflags', 'ask_about_usage_stats'):
self.ask_about_usage_stats = self.config.getboolean( 'dialogflags', 'ask_about_usage_stats' )
if self.config.has_option('dialogflags', 'show_no_unrar_warning'):
self.show_no_unrar_warning = self.config.getboolean( 'dialogflags', 'show_no_unrar_warning' )
if self.config.has_option('comicvine', 'use_series_start_as_volume'):
self.use_series_start_as_volume = self.config.getboolean( 'comicvine', 'use_series_start_as_volume' )
if self.config.has_option('comicvine', 'clear_form_before_populating_from_cv'):
self.clear_form_before_populating_from_cv = self.config.getboolean( 'comicvine', 'clear_form_before_populating_from_cv' )
if self.config.has_option('comicvine', 'remove_html_tables'):
self.remove_html_tables = self.config.getboolean( 'comicvine', 'remove_html_tables' )
if self.config.has_option('comicvine', 'cv_api_key'):
self.cv_api_key = self.config.get( 'comicvine', 'cv_api_key' )
if self.config.has_option('cbl_transform', 'assume_lone_credit_is_primary'):
self.assume_lone_credit_is_primary = self.config.getboolean( 'cbl_transform', 'assume_lone_credit_is_primary' )
if self.config.has_option('cbl_transform', 'copy_characters_to_tags'):
self.copy_characters_to_tags = self.config.getboolean( 'cbl_transform', 'copy_characters_to_tags' )
if self.config.has_option('cbl_transform', 'copy_teams_to_tags'):
self.copy_teams_to_tags = self.config.getboolean( 'cbl_transform', 'copy_teams_to_tags' )
if self.config.has_option('cbl_transform', 'copy_locations_to_tags'):
self.copy_locations_to_tags = self.config.getboolean( 'cbl_transform', 'copy_locations_to_tags' )
if self.config.has_option('cbl_transform', 'copy_notes_to_comments'):
self.copy_notes_to_comments = self.config.getboolean( 'cbl_transform', 'copy_notes_to_comments' )
if self.config.has_option('cbl_transform', 'copy_storyarcs_to_tags'):
self.copy_storyarcs_to_tags = self.config.getboolean( 'cbl_transform', 'copy_storyarcs_to_tags' )
if self.config.has_option('cbl_transform', 'copy_weblink_to_comments'):
self.copy_weblink_to_comments = self.config.getboolean( 'cbl_transform', 'copy_weblink_to_comments' )
if self.config.has_option('cbl_transform', 'apply_cbl_transform_on_cv_import'):
self.apply_cbl_transform_on_cv_import = self.config.getboolean( 'cbl_transform', 'apply_cbl_transform_on_cv_import' )
if self.config.has_option('cbl_transform', 'apply_cbl_transform_on_bulk_operation'):
self.apply_cbl_transform_on_bulk_operation = self.config.getboolean( 'cbl_transform', 'apply_cbl_transform_on_bulk_operation' )
self.assume_lone_credit_is_primary = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'assume_lone_credit_is_primary', 0))
self.copy_characters_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_characters_to_tags', 0))
self.copy_teams_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_teams_to_tags', 0))
self.copy_locations_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_locations_to_tags', 0))
self.copy_notes_to_comments = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_notes_to_comments', 0))
self.copy_storyarcs_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_storyarcs_to_tags', 0))
self.copy_weblink_to_comments = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_weblink_to_comments', 0))
self.apply_cbl_transform_on_cv_import = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'apply_cbl_transform_on_cv_import', 0))
self.apply_cbl_transform_on_bulk_operation = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'apply_cbl_transform_on_bulk_operation', 0))
if self.config.has_option('rename', 'rename_template'):
self.rename_template = self.config.get( 'rename', 'rename_template' )
if self.config.has_option('rename', 'rename_issue_number_padding'):
self.rename_issue_number_padding = self.config.getint( 'rename', 'rename_issue_number_padding' )
if self.config.has_option('rename', 'rename_use_smart_string_cleanup'):
self.rename_use_smart_string_cleanup = self.config.getboolean( 'rename', 'rename_use_smart_string_cleanup' )
if self.config.has_option('rename', 'rename_extension_based_on_archive'):
self.rename_extension_based_on_archive = self.config.getboolean( 'rename', 'rename_extension_based_on_archive' )
self.rename_template = bool(self.check_setting_int(self.CFG, 'rename', 'rename_template', 0))
self.rename_issue_number_padding = self.check_setting_str(self.CFG, 'rename', 'rename_issue_number_padding', '')
self.rename_use_smart_string_cleanup = bool(self.check_setting_int(self.CFG, 'rename', 'rename_use_smart_string_cleanup', 0))
self.rename_extension_based_on_archive = bool(self.check_setting_int(self.CFG, 'rename', 'rename_extension_based_on_archive', 0))
if self.config.has_option('autotag', 'save_on_low_confidence'):
self.save_on_low_confidence = self.config.getboolean( 'autotag', 'save_on_low_confidence' )
if self.config.has_option('autotag', 'dont_use_year_when_identifying'):
self.dont_use_year_when_identifying = self.config.getboolean( 'autotag', 'dont_use_year_when_identifying' )
if self.config.has_option('autotag', 'assume_1_if_no_issue_num'):
self.assume_1_if_no_issue_num = self.config.getboolean( 'autotag', 'assume_1_if_no_issue_num' )
if self.config.has_option('autotag', 'ignore_leading_numbers_in_filename'):
self.ignore_leading_numbers_in_filename = self.config.getboolean( 'autotag', 'ignore_leading_numbers_in_filename' )
if self.config.has_option('autotag', 'remove_archive_after_successful_match'):
self.remove_archive_after_successful_match = self.config.getboolean( 'autotag', 'remove_archive_after_successful_match' )
if self.config.has_option('autotag', 'wait_and_retry_on_rate_limit'):
self.wait_and_retry_on_rate_limit = self.config.getboolean( 'autotag', 'wait_and_retry_on_rate_limit' )
self.save_on_low_confidence = bool(self.check_setting_int(self.CFG, 'autotag', 'save_on_low_confidence', 0))
self.dont_use_year_when_identifying = bool(self.check_setting_int(self.CFG, 'autotag', 'dont_use_year_when_identifying', 0))
self.assume_1_if_no_issue_num = bool(self.check_setting_int(self.CFG, 'autotag', 'assume_1_if_no_issue_num', 0))
self.ignore_leading_numbers_in_filename = bool(self.check_setting_int(self.CFG, 'autotag', 'ignore_leading_numbers_in_filename', 0))
self.remove_archive_after_successful_match = bool(self.check_setting_int(self.CFG, 'autotag', 'remove_archive_after_successful_match', 0))
self.wait_and_retry_on_rate_limit = bool(self.check_setting_int(self.CFG, 'autotag', 'wait_and_retry_on_rate_limit', 0))
def save( self ):
new_config = ConfigObj()
new_config.filename = self.settings_file
if not self.config.has_section( 'settings' ):
self.config.add_section( 'settings' )
new_config.encoding = 'UTF8'
new_config['settings'] = {}
new_config['settings']['check_for_new_version'] = self.check_for_new_version
new_config['settings']['rar_exe_path'] = self.rar_exe_path
new_config['settings']['unrar_exe_path'] = self.unrar_exe_path
new_config['settings']['send_usage_stats'] = self.send_usage_stats
self.config.set( 'settings', 'check_for_new_version', self.check_for_new_version )
self.config.set( 'settings', 'rar_exe_path', self.rar_exe_path )
self.config.set( 'settings', 'unrar_exe_path', self.unrar_exe_path )
self.config.set( 'settings', 'send_usage_stats', self.send_usage_stats )
new_config.write()
new_config['auto'] = {}
new_config['auto']['install_id'] = self.install_id
new_config['auto']['last_selected_load_data_style'] = self.last_selected_load_data_style
new_config['auto']['last_selected_save_data_style'] = self.last_selected_save_data_style
new_config['auto']['last_opened_folder'] = self.last_opened_folder
new_config['auto']['last_main_window_width'] = self.last_main_window_width
new_config['auto']['last_main_window_height'] = self.last_main_window_height
new_config['auto']['last_main_window_x'] = self.last_main_window_x
new_config['auto']['last_main_window_y'] = self.last_main_window_y
new_config['auto']['last_form_side_width'] = self.last_form_side_width
new_config['auto']['last_list_side_width'] = self.last_list_side_width
new_config['auto']['last_filelist_sorted_column'] = self.last_filelist_sorted_column
new_config['auto']['last_filelist_sorted_order'] = self.last_filelist_sorted_order
if not self.config.has_section( 'auto' ):
self.config.add_section( 'auto' )
new_config['identifier'] = {}
new_config['identifier']['id_length_delta_thresh'] = self.id_length_delta_thresh
new_config['identifier']['id_publisher_blacklist'] = self.id_publisher_blacklist
self.config.set( 'auto', 'install_id', self.install_id )
self.config.set( 'auto', 'last_selected_load_data_style', self.last_selected_load_data_style )
self.config.set( 'auto', 'last_selected_save_data_style', self.last_selected_save_data_style )
self.config.set( 'auto', 'last_opened_folder', self.last_opened_folder )
self.config.set( 'auto', 'last_main_window_width', self.last_main_window_width )
self.config.set( 'auto', 'last_main_window_height', self.last_main_window_height )
self.config.set( 'auto', 'last_main_window_x', self.last_main_window_x )
self.config.set( 'auto', 'last_main_window_y', self.last_main_window_y )
self.config.set( 'auto', 'last_form_side_width', self.last_form_side_width )
self.config.set( 'auto', 'last_list_side_width', self.last_list_side_width )
self.config.set( 'auto', 'last_filelist_sorted_column', self.last_filelist_sorted_column )
self.config.set( 'auto', 'last_filelist_sorted_order', self.last_filelist_sorted_order )
new_config['dialogflags'] = {}
new_config['dialogflags']['ask_about_cbi_in_rar'] = self.ask_about_cbi_in_rar
new_config['dialogflags']['show_disclaimer'] = self.show_disclaimer
new_config['dialogflags']['dont_notify_about_this_version'] = self.dont_notify_about_this_version
new_config['dialogflags']['ask_about_usage_stats'] = self.ask_about_usage_stats
new_config['dialogflags']['show_no_unrar_warning'] = self.show_no_unrar_warning
if not self.config.has_section( 'identifier' ):
self.config.add_section( 'identifier' )
self.config.set( 'identifier', 'id_length_delta_thresh', self.id_length_delta_thresh )
self.config.set( 'identifier', 'id_publisher_blacklist', self.id_publisher_blacklist )
if not self.config.has_section( 'dialogflags' ):
self.config.add_section( 'dialogflags' )
self.config.set( 'dialogflags', 'ask_about_cbi_in_rar', self.ask_about_cbi_in_rar )
self.config.set( 'dialogflags', 'show_disclaimer', self.show_disclaimer )
self.config.set( 'dialogflags', 'dont_notify_about_this_version', self.dont_notify_about_this_version )
self.config.set( 'dialogflags', 'ask_about_usage_stats', self.ask_about_usage_stats )
self.config.set( 'dialogflags', 'show_no_unrar_warning', self.show_no_unrar_warning )
if not self.config.has_section( 'filenameparser' ):
self.config.add_section( 'filenameparser' )
new_config['filenameparser'] = {}
new_config['filenameparser']['parse_scan_info'] = self.parse_scan_info
self.config.set( 'filenameparser', 'parse_scan_info', self.parse_scan_info )
if not self.config.has_section( 'comicvine' ):
self.config.add_section( 'comicvine' )
self.config.set( 'comicvine', 'use_series_start_as_volume', self.use_series_start_as_volume )
self.config.set( 'comicvine', 'clear_form_before_populating_from_cv', self.clear_form_before_populating_from_cv )
self.config.set( 'comicvine', 'remove_html_tables', self.remove_html_tables )
self.config.set( 'comicvine', 'cv_api_key', self.cv_api_key )
new_config['comicvine'] = {}
new_config['comicvine']['use_series_start_as_volume'] = self.use_series_start_as_volume
new_config['comicvine']['clear_form_before_populating_from_cv'] = self.clear_form_before_populating_from_cv
new_config['comicvine']['remove_html_tables'] = self.remove_html_tables
new_config['comicvine']['cv_api_key'] = self.cv_api_key
if not self.config.has_section( 'cbl_transform' ):
self.config.add_section( 'cbl_transform' )
new_config['cbl_transform'] = {}
new_config['cbl_transform']['assume_lone_credit_is_primary'] = self.assume_lone_credit_is_primary
new_config['cbl_transform']['copy_characters_to_tags'] = self.copy_characters_to_tags
new_config['cbl_transform']['copy_teams_to_tags'] = self.copy_teams_to_tags
new_config['cbl_transform']['copy_locations_to_tags'] = self.copy_locations_to_tags
new_config['cbl_transform']['copy_storyarcs_to_tags'] = self.copy_storyarcs_to_tags
new_config['cbl_transform']['copy_notes_to_comments'] = self.copy_notes_to_comments
new_config['cbl_transform']['copy_weblink_to_comments'] = self.copy_weblink_to_comments
new_config['cbl_transform']['apply_cbl_transform_on_cv_import'] = self.apply_cbl_transform_on_cv_import
new_config['cbl_transform']['apply_cbl_transform_on_bulk_operation'] = self.apply_cbl_transform_on_bulk_operation
self.config.set( 'cbl_transform', 'assume_lone_credit_is_primary', self.assume_lone_credit_is_primary )
self.config.set( 'cbl_transform', 'copy_characters_to_tags', self.copy_characters_to_tags )
self.config.set( 'cbl_transform', 'copy_teams_to_tags', self.copy_teams_to_tags )
self.config.set( 'cbl_transform', 'copy_locations_to_tags', self.copy_locations_to_tags )
self.config.set( 'cbl_transform', 'copy_storyarcs_to_tags', self.copy_storyarcs_to_tags )
self.config.set( 'cbl_transform', 'copy_notes_to_comments', self.copy_notes_to_comments )
self.config.set( 'cbl_transform', 'copy_weblink_to_comments', self.copy_weblink_to_comments )
self.config.set( 'cbl_transform', 'apply_cbl_transform_on_cv_import', self.apply_cbl_transform_on_cv_import )
self.config.set( 'cbl_transform', 'apply_cbl_transform_on_bulk_operation', self.apply_cbl_transform_on_bulk_operation )
if not self.config.has_section( 'rename' ):
self.config.add_section( 'rename' )
self.config.set( 'rename', 'rename_template', self.rename_template )
self.config.set( 'rename', 'rename_issue_number_padding', self.rename_issue_number_padding )
self.config.set( 'rename', 'rename_use_smart_string_cleanup', self.rename_use_smart_string_cleanup )
self.config.set( 'rename', 'rename_extension_based_on_archive', self.rename_extension_based_on_archive )
if not self.config.has_section( 'autotag' ):
self.config.add_section( 'autotag' )
self.config.set( 'autotag', 'save_on_low_confidence', self.save_on_low_confidence )
self.config.set( 'autotag', 'dont_use_year_when_identifying', self.dont_use_year_when_identifying )
self.config.set( 'autotag', 'assume_1_if_no_issue_num', self.assume_1_if_no_issue_num )
self.config.set( 'autotag', 'ignore_leading_numbers_in_filename', self.ignore_leading_numbers_in_filename )
self.config.set( 'autotag', 'remove_archive_after_successful_match', self.remove_archive_after_successful_match )
self.config.set( 'autotag', 'wait_and_retry_on_rate_limit', self.wait_and_retry_on_rate_limit )
with codecs.open( self.settings_file, 'wb', 'utf8') as configfile:
self.config.write(configfile)
new_config['rename'] = {}
new_config['rename']['rename_template'] = self.rename_template
new_config['rename']['rename_issue_number_padding'] = self.rename_issue_number_padding
new_config['rename']['rename_use_smart_string_cleanup'] = self.rename_use_smart_string_cleanup
new_config['rename']['rename_extension_based_on_archive'] = self.rename_extension_based_on_archive
new_config['autotag'] = {}
new_config['autotag']['save_on_low_confidence'] = self.save_on_low_confidence
new_config['autotag']['dont_use_year_when_identifying'] = self.dont_use_year_when_identifying
new_config['autotag']['assume_1_if_no_issue_num'] = self.assume_1_if_no_issue_num
new_config['autotag']['ignore_leading_numbers_in_filename'] = self.ignore_leading_numbers_in_filename
new_config['autotag']['remove_archive_after_successful_match'] = self.remove_archive_after_successful_match
new_config['autotag']['wait_and_retry_on_rate_limit'] = self.wait_and_retry_on_rate_limit
#make sure the basedir is cached, in case we're on windows running a script from frozen binary
ComicTaggerSettings.baseDir()

View File

@ -242,7 +242,15 @@ class FailedProcessor(object):
# Perhaps later improvement might be to break it down by provider so that Mylar will attempt to
# download same issues on different providers (albeit it shouldn't matter, if it's broke it's broke).
logger.info('prov : ' + str(self.prov) + '[' + str(self.id) + ']')
chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID=?', [self.id]).fetchone()
# if this is from nzbhydra, we need to rejig the id line so that the searchid is removed since it's always unique to the search.
if 'indexerguid' in self.id:
st = self.id.find('searchid:')
end = self.id.find(',',st)
self.id = '%' + self.id[:st] + '%' + self.id[end+1:len(self.id)-1] + '%'
chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID LIKE ?', [self.id]).fetchone()
else:
chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID=?', [self.id]).fetchone()
if chk_fail is None:
logger.info(module + ' Successfully marked this download as Good for downloadable content')
return 'Good'

View File

@ -163,6 +163,29 @@ class PostProcessor(object):
self._log(u"Unable to run extra_script: " + str(script_cmd))
def duplicate_process(self, dupeinfo):
#path to move 'should' be the entire path to the given file
path_to_move = dupeinfo[0]['to_dupe']
file_to_move = os.path.split(path_to_move)[1]
if dupeinfo[0]['action'] == 'dupe_src':
logger.info('[DUPLICATE-CLEANUP] New File will be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.')
else:
logger.info('[DUPLICATE-CLEANUP] New File will not be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.')
#check to make sure duplicate_dump directory exists:
checkdirectory = filechecker.validateAndCreateDirectory(mylar.DUPLICATE_DUMP, True, module='[DUPLICATE-CLEANUP]')
#this gets tricky depending on if it's the new filename or the existing filename, and whether or not 'copy' or 'move' has been selected.
try:
shutil.move(path_to_move, os.path.join(mylar.DUPLICATE_DUMP, file_to_move))
except (OSError, IOError):
logger.warn('[DUPLICATE-CLEANUP] Failed to move ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move))
return False
logger.warn('[DUPLICATE-CLEANUP] Successfully moved ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move))
return True
def Process(self):
module = self.module
self._log("nzb name: " + self.nzb_name)
@ -930,16 +953,22 @@ class PostProcessor(object):
break
dupthis = helpers.duplicate_filecheck(ml['ComicLocation'], ComicID=comicid, IssueID=issueid)
if dupthis == "write":
if dupthis[0]['action'] == 'dupe_src' or dupthis[0]['action'] == 'dupe_file':
#check if duplicate dump folder is enabled and if so move duplicate file in there for manual intervention.
#'dupe_file' - do not write new file as existing file is better quality
#'dupe_src' - write new file, as existing file is a lesser quality (dupe)
if mylar.DUPLICATE_DUMP:
dupchkit = self.duplicate_process(dupthis)
if dupchkit == False:
logger.warn('Unable to move duplicate file - skipping post-processing of this file.')
continue
if dupthis[0]['action'] == "write" or dupthis[0]['action'] == 'dupe_src':
stat = ' [' + str(i) + '/' + str(len(manual_list)) + ']'
self.Process_next(comicid, issueid, issuenumOG, ml, stat)
dupthis = None
else:
pass
#check if duplicate dump folder is enabled and if so move duplicate file in there for manual intervention.
#if mylar.DUPLICATE_DUMP:
# if dupthis == 'dupe_src':
#
logger.info(module + ' Manual post-processing completed for ' + str(i) + ' issues.')
return
else:
@ -947,7 +976,22 @@ class PostProcessor(object):
issuenumOG = issuenzb['Issue_Number']
#the self.nzb_folder should contain only the existing filename
dupthis = helpers.duplicate_filecheck(self.nzb_folder, ComicID=comicid, IssueID=issueid)
if dupthis == "write":
if dupthis[0]['action'] == 'dupe_src' or dupthis[0]['action'] == 'dupe_file':
#check if duplicate dump folder is enabled and if so move duplicate file in there for manual intervention.
#'dupe_file' - do not write new file as existing file is better quality
#'dupe_src' - write new file, as existing file is a lesser quality (dupe)
if mylar.DUPLICATE_DUMP:
dupchkit = self.duplicate_process(dupthis)
if dupchkit == False:
logger.warn('Unable to move duplicate file - skipping post-processing of this file.')
self.valreturn.append({"self.log": self.log,
"mode": 'stop',
"issueid": issueid,
"comicid": comicid})
return self.queue.put(self.valreturn)
if dupthis[0]['action'] == "write" or dupthis[0]['action'] == 'dupe_src':
return self.Process_next(comicid, issueid, issuenumOG)
else:
self.valreturn.append({"self.log": self.log,
@ -957,7 +1001,6 @@ class PostProcessor(object):
return self.queue.put(self.valreturn)
def Process_next(self, comicid, issueid, issuenumOG, ml=None, stat=None):
if stat is None: stat = ' [1/1]'
module = self.module

View File

@ -165,6 +165,8 @@ CHECK_FOLDER = None
ENABLE_CHECK_FOLDER = False
INTERFACE = None
DUPECONSTRAINT = None
DDUMP = 0
DUPLICATE_DUMP = None
PREFERRED_QUALITY = 0
CORRECT_METADATA = False
MOVE_FILES = False
@ -415,7 +417,7 @@ def initialize():
global __INITIALIZED__, DBCHOICE, DBUSER, DBPASS, DBNAME, COMICVINE_API, DEFAULT_CVAPI, CVAPI_RATE, CV_HEADERS, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, UPCOMING_SNATCHED, COMICSORT, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, MAX_LOGSIZE, OLDCONFIG_VERSION, OS_DETECT, \
queue, LOCAL_IP, EXT_IP, HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, HTTPS_FORCE_ON, HOST_RETURN, API_ENABLED, API_KEY, DOWNLOAD_APIKEY, LAUNCH_BROWSER, GIT_PATH, SAFESTART, AUTO_UPDATE, \
CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, GIT_USER, GIT_BRANCH, USER_AGENT, DESTINATION_DIR, MULTIPLE_DEST_DIRS, CREATE_FOLDERS, DELETE_REMOVE_DIR, \
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, DUPECONSTRAINT, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, HIGHCOUNT, \
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, DUPECONSTRAINT, DDUMP, DUPLICATE_DUMP, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, HIGHCOUNT, \
DOWNLOAD_SCAN_INTERVAL, FOLDER_SCAN_LOG_VERBOSE, IMPORTLOCK, NZB_DOWNLOADER, USE_SABNZBD, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, SAB_TO_MYLAR, SAB_DIRECTORY, USE_BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, IMP_METADATA, \
USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBGET_DIRECTORY, NZBSU, NZBSU_UID, NZBSU_APIKEY, NZBSU_VERIFY, DOGNZB, DOGNZB_APIKEY, DOGNZB_VERIFY, \
NEWZNAB, NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_VERIFY, NEWZNAB_UID, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \
@ -515,6 +517,8 @@ def initialize():
ENABLE_CHECK_FOLDER = bool(check_setting_int(CFG, 'General', 'enable_check_folder', 0))
INTERFACE = check_setting_str(CFG, 'General', 'interface', 'default')
DUPECONSTRAINT = check_setting_str(CFG, 'General', 'dupeconstraint', 'filesize')
DDUMP = bool(check_setting_int(CFG, 'General', 'ddump', 0))
DUPLICATE_DUMP = check_setting_str(CFG, 'General', 'duplicate_dump', '')
AUTOWANT_ALL = bool(check_setting_int(CFG, 'General', 'autowant_all', 0))
AUTOWANT_UPCOMING = bool(check_setting_int(CFG, 'General', 'autowant_upcoming', 1))
COMIC_COVER_LOCAL = bool(check_setting_int(CFG, 'General', 'comic_cover_local', 0))
@ -1256,6 +1260,8 @@ def config_write():
new_config['General']['check_folder'] = CHECK_FOLDER
new_config['General']['interface'] = INTERFACE
new_config['General']['dupeconstraint'] = DUPECONSTRAINT
new_config['General']['ddump'] = DDUMP
new_config['General']['duplicate_dump'] = DUPLICATE_DUMP
new_config['General']['autowant_all'] = int(AUTOWANT_ALL)
new_config['General']['autowant_upcoming'] = int(AUTOWANT_UPCOMING)
new_config['General']['preferred_quality'] = int(PREFERRED_QUALITY)
@ -1937,7 +1943,6 @@ def dbcheck():
logger.info('Correcting Null entries that make the main page break on startup.')
c.execute("UPDATE Comics SET LatestDate='Unknown' WHERE LatestDate='None' or LatestDate is NULL")
conn.commit()
c.close()

View File

@ -71,14 +71,6 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen
logger.fdebug(module + ' UNRAR path set to : ' + unrar_cmd)
#check for dependencies here - configparser
try:
import configparser
except ImportError:
logger.warn(module + ' configparser not found on system. Please install manually in order to write metadata')
logger.warn(module + ' continuing with PostProcessing, but I am not using metadata.')
return "fail"
if not os.path.exists(unrar_cmd):
logger.fdebug(module + ' WARNING: cannot find the unrar command.')
@ -144,10 +136,10 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen
ctversion = subprocess.check_output([sys.executable, comictagger_cmd, "--version"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
#logger.warn(module + "[WARNING] "command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
logger.warn(module + '[WARNING] Make sure that you have configparser installed.')
logger.warn(module + '[WARNING] Make sure that you are using the comictagger included with Mylar.')
return "fail"
ctend = ctversion.find(':')
ctend = ctversion.find('\]')
ctcheck = re.sub("[^0-9]", "", ctversion[:ctend])
ctcheck = re.sub('\.', '', ctcheck).strip()
if int(ctcheck) >= int('1115'): # (v1.1.15)
@ -159,7 +151,7 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen
use_cvapi = "True"
tagoptions.extend(["--cv-api-key", mylar.COMICVINE_API])
else:
logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - personal ComicVine API key not supported in this version. Good luck.')
logger.fdebug(module + ' ' + ctversion[:ctend+1] + ' being used - personal ComicVine API key not supported in this version. Good luck.')
use_cvapi = "False"
i = 1
@ -231,7 +223,8 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen
try:
p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, err = p.communicate()
logger.info(out)
logger.info(err)
if initial_ctrun and 'exported successfully' in out:
logger.fdebug(module + '[COMIC-TAGGER] : ' +str(out))
#Archive exported successfully to: X-Men v4 008 (2014) (Digital) (Nahga-Empire).cbz (Original deleted)

View File

@ -1301,13 +1301,13 @@ def IssueDetails(filelocation, IssueID=None):
#print str(data)
issuetag = 'xml'
#looks for the first page and assumes it's the cover. (Alternate covers handled later on)
elif any(['000.' in infile, '00.' in infile]) and infile.endswith(pic_extensions):
elif any(['000.' in infile, '00.' in infile]) and infile.endswith(pic_extensions) and cover == "notfound":
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensions):
elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensions) and cover == "notfound":
logger.fdebug('Found Alternate cover - ' + infile + ' . Extracting.')
altlist = ('00a', '00b', '00c', '00d', '00e')
for alt in altlist:
@ -1433,17 +1433,25 @@ def IssueDetails(filelocation, IssueID=None):
pagecount = 0
logger.fdebug("number of pages I counted: " + str(pagecount))
i = 0
while (i < int(pagecount)):
pageinfo = result.getElementsByTagName('Page')[i].attributes
attrib = pageinfo.getNamedItem('Image')
logger.fdebug('Frontcover validated as being image #: ' + str(attrib.value))
att = pageinfo.getNamedItem('Type')
logger.fdebug('pageinfo: ' + str(pageinfo))
if att.value == 'FrontCover':
logger.fdebug('FrontCover detected. Extracting.')
break
i+=1
else:
try:
pageinfo = result.getElementsByTagName('Page')[0].attributes
if pageinfo: pageinfo_test == True
except:
pageinfo_test = False
if pageinfo_test:
while (i < int(pagecount)):
pageinfo = result.getElementsByTagName('Page')[i].attributes
attrib = pageinfo.getNamedItem('Image')
logger.fdebug('Frontcover validated as being image #: ' + str(attrib.value))
att = pageinfo.getNamedItem('Type')
logger.fdebug('pageinfo: ' + str(pageinfo))
if att.value == 'FrontCover':
logger.fdebug('FrontCover detected. Extracting.')
break
i+=1
elif issuetag == 'comment':
stripline = 'Archive: ' + dstlocation
data = re.sub(stripline, '', data.encode("utf-8")).strip()
if data is None or data == '':
@ -1515,6 +1523,10 @@ def IssueDetails(filelocation, IssueID=None):
except:
pagecount = "None"
else:
logger.warn('Unable to locate any metadata within cbz file. Tag this file and try again if necessary.')
return
issuedetails.append({"title": issue_title,
"series": series_title,
"volume": series_volume,
@ -1609,17 +1621,21 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
logger.info('[DUPECHECK] Unable to find corresponding Issue within the DB. Do you still have the series on your watchlist?')
return
series = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [dupchk['ComicID']]).fetchone()
#if it's a retry and the file was already snatched, the status is Snatched and won't hit the dupecheck.
#rtnval will be one of 3:
#'write' - write new file
#'dupe_file' - do not write new file as existing file is better quality
#'dupe_src' - write new file, as existing file is a lesser quality (dupe)
rtnval = []
if dupchk['Status'] == 'Downloaded' or dupchk['Status'] == 'Archived':
try:
dupsize = dupchk['ComicSize']
except:
logger.info('[DUPECHECK] Duplication detection returned no hits as this is a new Snatch. This is not a duplicate.')
rtnval = "write"
rtnval.append({'action': "write"})
logger.info('[DUPECHECK] Existing Status already set to ' + dupchk['Status'])
cid = []
if dupsize is None:
@ -1635,11 +1651,13 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
return duplicate_filecheck(filename, ComicID, IssueID, StoryArcID)
else:
#not sure if this one is correct - should never actually get to this point.
rtnval = "dupe_file"
rtnval.append({'action': "dupe_file",
'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])})
else:
rtnval = "dupe_file"
rtnval.append({'action': "dupe_file",
'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])})
else:
logger.info('[DUPECHECK] Existing file :' + dupchk['Location'] + ' has a filesize of : ' + str(dupsize) + ' bytes.')
logger.info('[DUPECHECK] Existing file within db :' + dupchk['Location'] + ' has a filesize of : ' + str(dupsize) + ' bytes.')
#keywords to force keep / delete
#this will be eventually user-controlled via the GUI once the options are enabled.
@ -1648,7 +1666,8 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
logger.info('[DUPECHECK] Existing filesize is 0 as I cannot locate the original entry.')
if dupchk['Status'] == 'Archived':
logger.info('[DUPECHECK] Assuming issue is Archived.')
rtnval = "dupe_file"
rtnval.append({'action': "dupe_file",
'to_dupe': filename})
return rtnval
else:
logger.info('[DUPECHECK] Assuming 0-byte file - this one is gonna get hammered.')
@ -1660,33 +1679,39 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
if dupchk['Location'].endswith('.cbz'):
#keep dupechk['Location']
logger.info('[DUPECHECK-CBR PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in file : ' + dupchk['Location'])
rtnval = "dupe_file"
rtnval.append({'action': "dupe_file",
'to_dupe': filename})
else:
#keep filename
logger.info('[DUPECHECK-CBR PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in file : ' + filename)
rtnval = "dupe_src"
rtnval.append({'action': "dupe_src",
'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])})
elif 'cbz' in mylar.DUPECONSTRAINT:
if dupchk['Location'].endswith('.cbr'):
#keep dupchk['Location']
logger.info('[DUPECHECK-CBZ PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in filename : ' + dupchk['Location'])
rtnval = "dupe_file"
rtnval.append({'action': "dupe_file",
'to_dupe': filename})
else:
#keep filename
logger.info('[DUPECHECK-CBZ PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in filename : ' + filename)
rtnval = "dupe_src"
rtnval.append({'action': "dupe_src",
'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])})
if mylar.DUPECONSTRAINT == 'filesize':
if filesz <= int(dupsize) and int(dupsize) != 0:
logger.info('[DUPECHECK-FILESIZE PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in filename : ' + dupchk['Location'])
rtnval = "dupe_file"
rtnval.append({'action': "dupe_file",
'to_dupe': filename})
else:
logger.info('[DUPECHECK-FILESIZE PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in filename : ' + filename)
rtnval = "dupe_src"
rtnval.append({'action': "dupe_src",
'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])})
else:
logger.info('[DUPECHECK] Duplication detection returned no hits. This is not a duplicate of anything that I have scanned in as of yet.')
rtnval = "write"
rtnval.append({'action': "write"})
return rtnval
def create_https_certificates(ssl_cert, ssl_key):

View File

@ -28,6 +28,9 @@ import shutil
import imghdr
import sqlite3
import cherrypy
import lib.requests as requests
import gzip
from StringIO import StringIO
import mylar
from mylar import logger, helpers, db, mb, cv, parseit, filechecker, search, updater, moveit, comicbookdb
@ -169,152 +172,152 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
#since the weekly issue check could return either annuals or issues, let's initialize it here so it carries through properly.
weeklyissue_check = []
#let's do the Annual check here.
if mylar.ANNUALS_ON:
#we need to check first to see if there are pre-existing annuals that have been manually added, or else they'll get
#wiped out.
annualids = [] #to be used to make sure an ID isn't double-loaded
if annload is None:
pass
else:
for manchk in annload:
if manchk['ReleaseComicID'] is not None or manchk['ReleaseComicID'] is not None: #if it exists, then it's a pre-existing add.
#print str(manchk['ReleaseComicID']), comic['ComicName'], str(SeriesYear), str(comicid)
manualAnnual(manchk['ReleaseComicID'], comic['ComicName'], SeriesYear, comicid)
annualids.append(manchk['ReleaseComicID'])
annualcomicname = re.sub('[\,\:]', '', comic['ComicName'])
#----- CBDB (outdated)
# annuals = comicbookdb.cbdb(annualcomicname, SeriesYear)
# print ("Number of Annuals returned: " + str(annuals['totalissues']))
# nb = 0
# while (nb <= int(annuals['totalissues'])):
# try:
# annualval = annuals['annualslist'][nb]
# except IndexError:
# break
#----
#this issueid doesn't exist at this point since we got the data from cbdb...let's try and figure out
#the issueID for CV based on what we know so we can use that ID (and thereby the metadata too)
#other inherit issue - results below will return the ID for the Series of Annuals, not the series itself.
#sr['comicid'] not the same as comicid for series.
annComicName = annualcomicname + ' annual'
mode = 'series'
#if annuals['totalissues'] is None:
# annissues = 0
#else:
# annissues = annuals['totalissues']
#print "annissues :" + str(annissues)
# annuals happen once / year. determine how many.
annualyear = SeriesYear # no matter what, the year won't be less than this.
#if annualval['AnnualYear'] is None:
# sresults = mb.findComic(annComicName, mode, issue=annissues)
#else:
#sresults = mb.findComic(annComicName, mode, issue=annissues, limityear=annualval['AnnualYear'])
#print "annualyear: " + str(annualval['AnnualYear'])
annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected', 'print edition', 'tpb', 'available in print', 'collects'}
logger.fdebug('[IMPORTER-ANNUAL] - Annual Year:' + str(annualyear))
sresults, explicit = mb.findComic(annComicName, mode, issue=None, explicit='all')#,explicit=True)
type='comic'
if len(sresults) == 1:
logger.fdebug('[IMPORTER-ANNUAL] - 1 result')
if len(sresults) > 0:
logger.fdebug('[IMPORTER-ANNUAL] - there are ' + str(len(sresults)) + ' results.')
num_res = 0
while (num_res < len(sresults)):
sr = sresults[num_res]
logger.fdebug("description:" + sr['description'])
if any(x in sr['description'].lower() for x in annual_types_ignore):
logger.fdebug('[IMPORTER-ANNUAL] - tradeback/collected edition detected - skipping ' + str(sr['comicid']))
else:
if comicid in sr['description']:
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.')
issueid = sr['comicid']
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' added to series list as an Annual')
if issueid in annualids:
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.')
num_res+=1 # need to manually increment since not a for-next loop
continue
issued = cv.getComic(issueid, 'issue')
if len(issued) is None or len(issued) == 0:
logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...')
pass
else:
n = 0
if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1:
sr_issues = 1
else:
sr_issues = sr['issues']
logger.fdebug('[IMPORTER-ANNUAL (MAIN)] - There are ' + str(sr_issues) + ' annuals in this series.')
while (n < int(sr_issues)):
try:
firstval = issued['issuechoice'][n]
except IndexError:
break
try:
cleanname = helpers.cleanName(firstval['Issue_Name'])
except:
cleanname = 'None'
issid = str(firstval['Issue_ID'])
issnum = str(firstval['Issue_Number'])
issname = cleanname
issdate = str(firstval['Issue_Date'])
stdate = str(firstval['Store_Date'])
int_issnum = helpers.issuedigits(issnum)
newCtrl = {"IssueID": issid}
newVals = {"Issue_Number": issnum,
"Int_IssueNumber": int_issnum,
"IssueDate": issdate,
"ReleaseDate": stdate,
"IssueName": issname,
"ComicID": comicid,
"ComicName": comic['ComicName'],
"ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(),
"ReleaseComicName": sr['name'],
"Status": "Skipped"}
myDB.upsert("annuals", newVals, newCtrl)
if issuechk is not None and issuetype == 'annual':
logger.fdebug('[IMPORTER-ANNUAL] - Comparing annual ' + str(issuechk) + ' .. to .. ' + str(int_issnum))
if issuechk == int_issnum:
weeklyissue_check.append({"Int_IssueNumber": int_issnum,
"Issue_Number": issnum,
"IssueDate": issdate,
"ReleaseDate": stdate})
n+=1
num_res+=1
elif len(sresults) == 0 or len(sresults) is None:
logger.fdebug('[IMPORTER-ANNUAL] - No results, removing the year from the agenda and re-querying.')
sresults, explicit = mb.findComic(annComicName, mode, issue=None)#, explicit=True)
if len(sresults) == 1:
sr = sresults[0]
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.')
else:
resultset = 0
else:
logger.fdebug('[IMPORTER-ANNUAL] - Returning results to screen - more than one possibility')
for sr in sresults:
if annualyear < sr['comicyear']:
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(annualyear) + ' is less than ' + str(sr['comicyear']))
if int(sr['issues']) > (2013 - int(sr['comicyear'])):
logger.fdebug('[IMPORTER-ANNUAL] - Issue count is wrong')
#newCtrl = {"IssueID": issueid}
#newVals = {"Issue_Number": annualval['AnnualIssue'],
# "IssueDate": annualval['AnnualDate'],
# "IssueName": annualval['AnnualTitle'],
# "ComicID": comicid,
# "Status": "Skipped"}
#myDB.upsert("annuals", newVals, newCtrl)
#nb+=1
# #let's do the Annual check here.
# if mylar.ANNUALS_ON:
# #we need to check first to see if there are pre-existing annuals that have been manually added, or else they'll get
# #wiped out.
# annualids = [] #to be used to make sure an ID isn't double-loaded
#
# if annload is None:
# pass
# else:
# for manchk in annload:
# if manchk['ReleaseComicID'] is not None or manchk['ReleaseComicID'] is not None: #if it exists, then it's a pre-existing add.
# #print str(manchk['ReleaseComicID']), comic['ComicName'], str(SeriesYear), str(comicid)
# manualAnnual(manchk['ReleaseComicID'], comic['ComicName'], SeriesYear, comicid)
# annualids.append(manchk['ReleaseComicID'])
#
# annualcomicname = re.sub('[\,\:]', '', comic['ComicName'])
#
##----- CBDB (outdated)
## annuals = comicbookdb.cbdb(annualcomicname, SeriesYear)
## print ("Number of Annuals returned: " + str(annuals['totalissues']))
## nb = 0
## while (nb <= int(annuals['totalissues'])):
## try:
## annualval = annuals['annualslist'][nb]
## except IndexError:
## break
##----
# #this issueid doesn't exist at this point since we got the data from cbdb...let's try and figure out
# #the issueID for CV based on what we know so we can use that ID (and thereby the metadata too)
#
# #other inherit issue - results below will return the ID for the Series of Annuals, not the series itself.
# #sr['comicid'] not the same as comicid for series.
# annComicName = annualcomicname + ' annual'
# mode = 'series'
# #if annuals['totalissues'] is None:
# # annissues = 0
# #else:
# # annissues = annuals['totalissues']
# #print "annissues :" + str(annissues)
#
# # annuals happen once / year. determine how many.
# annualyear = SeriesYear # no matter what, the year won't be less than this.
# #if annualval['AnnualYear'] is None:
# # sresults = mb.findComic(annComicName, mode, issue=annissues)
# #else:
# #sresults = mb.findComic(annComicName, mode, issue=annissues, limityear=annualval['AnnualYear'])
# #print "annualyear: " + str(annualval['AnnualYear'])
# annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected', 'print edition', 'tpb', 'available in print', 'collects'}
#
# logger.fdebug('[IMPORTER-ANNUAL] - Annual Year:' + str(annualyear))
# sresults, explicit = mb.findComic(annComicName, mode, issue=None, explicit='all')#,explicit=True)
# type='comic'
#
# if len(sresults) == 1:
# logger.fdebug('[IMPORTER-ANNUAL] - 1 result')
# if len(sresults) > 0:
# logger.fdebug('[IMPORTER-ANNUAL] - there are ' + str(len(sresults)) + ' results.')
# num_res = 0
# while (num_res < len(sresults)):
# sr = sresults[num_res]
# #logger.fdebug("description:" + sr['description'])
# if any(x in sr['description'].lower() for x in annual_types_ignore):
# logger.fdebug('[IMPORTER-ANNUAL] - tradeback/collected edition detected - skipping ' + str(sr['comicid']))
# else:
# if comicid in sr['description']:
# logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.')
# issueid = sr['comicid']
# logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' added to series list as an Annual')
# if issueid in annualids:
# logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.')
# num_res+=1 # need to manually increment since not a for-next loop
# continue
# issued = cv.getComic(issueid, 'issue')
# if len(issued) is None or len(issued) == 0:
# logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...')
# pass
# else:
# n = 0
# if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1:
# sr_issues = 1
# else:
# sr_issues = sr['issues']
# logger.fdebug('[IMPORTER-ANNUAL (MAIN)] - There are ' + str(sr_issues) + ' annuals in this series.')
# while (n < int(sr_issues)):
# try:
# firstval = issued['issuechoice'][n]
# except IndexError:
# break
# try:
# cleanname = helpers.cleanName(firstval['Issue_Name'])
# except:
# cleanname = 'None'
# issid = str(firstval['Issue_ID'])
# issnum = str(firstval['Issue_Number'])
# issname = cleanname
# issdate = str(firstval['Issue_Date'])
# stdate = str(firstval['Store_Date'])
# int_issnum = helpers.issuedigits(issnum)
# newCtrl = {"IssueID": issid}
# newVals = {"Issue_Number": issnum,
# "Int_IssueNumber": int_issnum,
# "IssueDate": issdate,
# "ReleaseDate": stdate,
# "IssueName": issname,
# "ComicID": comicid,
# "ComicName": comic['ComicName'],
# "ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(),
# "ReleaseComicName": sr['name'],
# "Status": "Skipped"}
# myDB.upsert("annuals", newVals, newCtrl)
#
# if issuechk is not None and issuetype == 'annual':
# logger.fdebug('[IMPORTER-ANNUAL] - Comparing annual ' + str(issuechk) + ' .. to .. ' + str(int_issnum))
# if issuechk == int_issnum:
# weeklyissue_check.append({"Int_IssueNumber": int_issnum,
# "Issue_Number": issnum,
# "IssueDate": issdate,
# "ReleaseDate": stdate})
#
# n+=1
# num_res+=1
#
# elif len(sresults) == 0 or len(sresults) is None:
# logger.fdebug('[IMPORTER-ANNUAL] - No results, removing the year from the agenda and re-querying.')
# sresults, explicit = mb.findComic(annComicName, mode, issue=None)#, explicit=True)
# if len(sresults) == 1:
# sr = sresults[0]
# logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.')
# else:
# resultset = 0
# else:
# logger.fdebug('[IMPORTER-ANNUAL] - Returning results to screen - more than one possibility')
# for sr in sresults:
# if annualyear < sr['comicyear']:
# logger.fdebug('[IMPORTER-ANNUAL] - ' + str(annualyear) + ' is less than ' + str(sr['comicyear']))
# if int(sr['issues']) > (2013 - int(sr['comicyear'])):
# logger.fdebug('[IMPORTER-ANNUAL] - Issue count is wrong')
#
# #newCtrl = {"IssueID": issueid}
# #newVals = {"Issue_Number": annualval['AnnualIssue'],
# # "IssueDate": annualval['AnnualDate'],
# # "IssueName": annualval['AnnualTitle'],
# # "ComicID": comicid,
# # "Status": "Skipped"}
# #myDB.upsert("annuals", newVals, newCtrl)
# #nb+=1
#parseit.annualCheck(gcomicid=gcdinfo['GCDComicID'], comicid=comicid, comicname=comic['ComicName'], comicyear=SeriesYear)
#comic book location on machine
@ -410,9 +413,6 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
coverfile = os.path.join(mylar.CACHE_DIR, str(comicid) + ".jpg")
#if cover has '+' in url it's malformed, we need to replace '+' with '%20' to retreive properly.
#thisci = urllib.quote_plus(str(comic['ComicImage']))
#urllib.urlretrieve(str(thisci), str(coverfile))
#new CV API restriction - one api request / second.(probably unecessary here, but it doesn't hurt)
if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2:
@ -420,45 +420,63 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
else:
time.sleep(mylar.CVAPI_RATE)
logger.info('Attempting to retrieve the comic image for series')
try:
cimage = re.sub('[\+]', '%20', comic['ComicImage'])
request = urllib2.Request(cimage)#, headers={'Content-Type': 'application/x-www-form-urlencoded'})
#request.add_header('User-Agent', str(mylar.USER_AGENT))
response = urllib2.urlopen(request)
com_image = response.read()
with open(coverfile, 'wb') as the_file:
the_file.write(com_image)
try:
logger.info('Image header check: ' + imghdr.what(coverfile))
except:
logger.info('image is corrupted.')
raise Exception
logger.info('Successfully retrieved cover for ' + comic['ComicName'])
r = requests.get(comic['ComicImage'], params=None, stream=True, headers=mylar.CV_HEADERS)
except Exception, e:
logger.warn('[%s] Error fetching data using : %s' % (e, comic['ComicImage']))
logger.info('Attempting to use alternate image size to get cover.')
#new CV API restriction - one api request / second.
if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2:
time.sleep(2)
logger.warn('Unable to download image from CV URL link: ' + comic['ComicImage'] + ' [Status Code returned: ' + str(r.status_code) + ']')
logger.fdebug('comic image retrieval status code: ' + str(r.status_code))
if str(r.status_code) != '200':
logger.warn('Unable to download image from CV URL link: ' + comic['ComicImage'] + ' [Status Code returned: ' + str(r.status_code) + ']')
coversize = 0
else:
if r.headers.get('Content-Encoding') == 'gzip':
buf = StringIO(r.content)
f = gzip.GzipFile(fileobj=buf)
with open(coverfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
statinfo = os.stat(coverfile)
coversize = statinfo.st_size
if int(coversize) < 35000 or str(r.status_code) != '200':
if str(r.status_code) != '200':
logger.info('Trying to grab an alternate cover due to problems trying to retrieve the main cover image.')
else:
time.sleep(mylar.CVAPI_RATE)
logger.info('Image size invalid [' + str(coversize) + ' bytes] - trying to get alternate cover image.')
logger.fdebug('invalid image link is here: ' + comic['ComicImage'])
os.remove(coverfile)
logger.info('Attempting to retrieve alternate comic image for the series.')
try:
cimage = re.sub('[\+]', '%20', comic['ComicImageALT'])
request = urllib2.Request(cimage)
response = urllib2.urlopen(request)
com_image = response.read()
with open(coverfile, 'wb') as the_file:
the_file.write(com_image)
r = requests.get(comic['ComicImageALT'], params=None, stream=True, headers=mylar.CV_HEADERS)
logger.info('Successfully retrieved cover for ' + comic['ComicName'])
except Exception, e:
logger.warn('[%s] Error fetching data using : %s' % (e, comic['ComicImageALT']))
logger.warn('Unable to download image from CV URL link: ' + comic['ComicImageALT'] + ' [Status Code returned: ' + str(r.status_code) + ']')
logger.fdebug('comic image retrieval status code: ' + str(r.status_code))
if str(r.status_code) != '200':
logger.warn('Unable to download image from CV URL link: ' + comic['ComicImageALT'] + ' [Status Code returned: ' + str(r.status_code) + ']')
else:
if r.headers.get('Content-Encoding') == 'gzip':
buf = StringIO(r.content)
f = gzip.GzipFile(fileobj=buf)
with open(coverfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
ComicImage = helpers.replacetheslash(PRComicImage)
@ -1486,7 +1504,7 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu
sresults, explicit = mb.findComic(annComicName, mode, issue=None, explicit='all')#,explicit=True)
type='comic'
annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected', 'print edition', 'tpb', 'available in print', 'collects'}
annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected edition', 'print edition', 'tpb', 'available in print', 'collects'}
if len(sresults) == 1:
logger.fdebug('[IMPORTER-ANNUAL] - 1 result')
@ -1495,79 +1513,83 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu
num_res = 0
while (num_res < len(sresults)):
sr = sresults[num_res]
logger.fdebug("description:" + sr['description'])
if any(x in sr['description'].lower() for x in annual_types_ignore):
logger.fdebug('[IMPORTER-ANNUAL] - tradeback/collected edition detected - skipping ' + str(sr['comicid']))
else:
if comicid in sr['description']:
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.')
issueid = sr['comicid']
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' added to series list as an Annual')
if issueid in annualids:
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.')
num_res+=1 # need to manually increment since not a for-next loop
#logger.fdebug("description:" + sr['description'])
for x in annual_types_ignore:
if x in sr['description'].lower():
test_id_position = sr['description'].find(comicid)
if test_id_position >= sr['description'].lower().find(x) or test_id_position == -1:
logger.fdebug('[IMPORTER-ANNUAL] - tradeback/collected edition detected - skipping ' + str(sr['comicid']))
continue
issued = cv.getComic(issueid, 'issue')
if len(issued) is None or len(issued) == 0:
logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...')
pass
if comicid in sr['description']:
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.')
issueid = sr['comicid']
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' added to series list as an Annual')
if issueid in annualids:
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.')
num_res+=1 # need to manually increment since not a for-next loop
continue
issued = cv.getComic(issueid, 'issue')
if len(issued) is None or len(issued) == 0:
logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...')
pass
else:
n = 0
if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1:
sr_issues = 1
else:
n = 0
if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1:
sr_issues = 1
else:
sr_issues = sr['issues']
logger.fdebug('[IMPORTER-ANNUAL] - There are ' + str(sr_issues) + ' annuals in this series.')
while (n < int(sr_issues)):
try:
firstval = issued['issuechoice'][n]
except IndexError:
break
try:
cleanname = helpers.cleanName(firstval['Issue_Name'])
except:
cleanname = 'None'
issid = str(firstval['Issue_ID'])
issnum = str(firstval['Issue_Number'])
issname = cleanname
issdate = str(firstval['Issue_Date'])
stdate = str(firstval['Store_Date'])
int_issnum = helpers.issuedigits(issnum)
sr_issues = sr['issues']
logger.fdebug('[IMPORTER-ANNUAL] - There are ' + str(sr_issues) + ' annuals in this series.')
while (n < int(sr_issues)):
try:
firstval = issued['issuechoice'][n]
except IndexError:
break
try:
cleanname = helpers.cleanName(firstval['Issue_Name'])
except:
cleanname = 'None'
issid = str(firstval['Issue_ID'])
issnum = str(firstval['Issue_Number'])
issname = cleanname
issdate = str(firstval['Issue_Date'])
stdate = str(firstval['Store_Date'])
int_issnum = helpers.issuedigits(issnum)
newVals = {"Issue_Number": issnum,
"Int_IssueNumber": int_issnum,
"IssueDate": issdate,
"ReleaseDate": stdate,
"IssueName": issname,
"ComicID": comicid,
"ComicName": ComicName,
"ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(),
"ReleaseComicName": sr['name']}
newVals = {"Issue_Number": issnum,
"Int_IssueNumber": int_issnum,
"IssueDate": issdate,
"ReleaseDate": stdate,
"IssueName": issname,
"ComicID": comicid,
"ComicName": ComicName,
"ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(),
"ReleaseComicName": sr['name']}
iss_exists = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issid]).fetchone()
if iss_exists is None:
datechk = re.sub('-', '', issdate).strip() # converts date to 20140718 format
if mylar.AUTOWANT_ALL:
newVals['Status'] = "Wanted"
elif int(datechk) >= int(nowtime) and mylar.AUTOWANT_UPCOMING:
newVals['Status'] = "Wanted"
else:
newVals['Status'] = "Skipped"
iss_exists = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issid]).fetchone()
if iss_exists is None:
datechk = re.sub('-', '', issdate).strip() # converts date to 20140718 format
if mylar.AUTOWANT_ALL:
newVals['Status'] = "Wanted"
elif int(datechk) >= int(nowtime) and mylar.AUTOWANT_UPCOMING:
newVals['Status'] = "Wanted"
else:
newVals['Status'] = iss_exists['Status']
newVals['Status'] = "Skipped"
else:
newVals['Status'] = iss_exists['Status']
newCtrl = {"IssueID": issid}
myDB.upsert("annuals", newVals, newCtrl)
newCtrl = {"IssueID": issid}
myDB.upsert("annuals", newVals, newCtrl)
if issuechk is not None and issuetype == 'annual':
#logger.fdebug('[IMPORTER-ANNUAL] - Comparing annual ' + str(issuechk) + ' .. to .. ' + str(int_issnum))
if issuechk == int_issnum:
weeklyissue_check.append({"Int_IssueNumber": int_issnum,
"Issue_Number": issnum,
"IssueDate": issdate,
"ReleaseDate": stdate})
if issuechk is not None and issuetype == 'annual':
#logger.fdebug('[IMPORTER-ANNUAL] - Comparing annual ' + str(issuechk) + ' .. to .. ' + str(int_issnum))
if issuechk == int_issnum:
weeklyissue_check.append({"Int_IssueNumber": int_issnum,
"Issue_Number": issnum,
"IssueDate": issdate,
"ReleaseDate": stdate})
n+=1
n+=1
num_res+=1
elif len(sresults) == 0 or len(sresults) is None:

View File

@ -47,9 +47,6 @@ if platform.python_version() == '2.7.6':
def pullsearch(comicapi, comicquery, offset, explicit, type):
u_comicquery = urllib.quote(comicquery.encode('utf-8').strip())
u_comicquery = u_comicquery.replace(" ", "%20")
if '-' in u_comicquery:
#cause titles like A-Force will return 16,000+ results otherwise
u_comicquery = '%22' + u_comicquery + '%22'
if explicit == 'all' or explicit == 'loose':
PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=' + str(type) + '&query=' + u_comicquery + '&field_list=id,name,start_year,first_issue,site_detail_url,count_of_issues,image,publisher,deck,description&format=xml&page=' + str(offset)
@ -78,17 +75,7 @@ def pullsearch(comicapi, comicquery, offset, explicit, type):
except Exception, e:
logger.warn('Error fetching data from ComicVine: %s' % (e))
return
# try:
# file = urllib2.urlopen(PULLURL)
# except urllib2.HTTPError, err:
# logger.error('err : ' + str(err))
# logger.error("There was a major problem retrieving data from ComicVine - on their end. You'll have to try again later most likely.")
# return
# #convert to string:
# data = file.read()
# #close file because we dont need it anymore:
# file.close()
# #parse the xml you downloaded
dom = parseString(r.content) #(data)
return dom
@ -100,8 +87,8 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
comiclist = []
arcinfolist = []
chars = set('!?*')
if any((c in chars) for c in name):
chars = set('!?*&-')
if any((c in chars) for c in name) or 'annual' in name:
name = '"' +name +'"'
#print ("limityear: " + str(limityear))
@ -116,10 +103,11 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
explicit = 'all'
#OR
if ' and ' in comicquery.lower() or ' & ' in comicquery:
if ' and ' in comicquery.lower():
logger.fdebug('Enforcing exact naming match due to operator in title (and)')
explicit = 'all'
elif explicit == 'loose':
if explicit == 'loose':
logger.fdebug('Changing to loose mode - this will match ANY of the search words')
comicquery = name.replace(" ", " OR ")
elif explicit == 'explicit':
@ -127,7 +115,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
comicquery=name.replace(" ", " AND ")
else:
logger.fdebug('Default search mode - this will match on ALL search words')
comicquery = name.replace(" ", " AND ")
#comicquery = name.replace(" ", " AND ")
explicit = 'all'

View File

@ -181,6 +181,7 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
#logger.fdebug('publisher: ' + re.sub("'",'', pub).strip()) #publisher sometimes is given within quotes for some reason, strip 'em.
vol_find = feedme.entries[i].title.find('vol.')
series = feedme.entries[i].title[st_end +1:vol_find].strip()
series = re.sub('&amp;', '&', series).strip()
#logger.fdebug('series title: ' + series)
iss_st = feedme.entries[i].title.find(' - ', vol_find)
vol = re.sub('\.', '', feedme.entries[i].title[vol_find:iss_st]).strip()
@ -266,9 +267,19 @@ def nzbs(provider=None, forcerss=False):
feedthis = []
def _parse_feed(site, url):
def _parse_feed(site, url, verify):
logger.fdebug('[RSS] Fetching items from ' + site)
feedme = feedparser.parse(url, agent=str(mylar.USER_AGENT))
payload = None
headers = {'User-Agent': str(mylar.USER_AGENT)}
try:
r = requests.get(url, params=payload, verify=verify, headers=headers)
except Exception, e:
logger.warn('Error fetching RSS Feed Data from %s: %s' % (site, e))
return
feedme = feedparser.parse(r.content)
feedthis.append({"site": site,
"feed": feedme})
@ -276,8 +287,8 @@ def nzbs(provider=None, forcerss=False):
if mylar.NEWZNAB == 1:
for newznab_host in mylar.EXTRA_NEWZNABS:
logger.fdebug('[RSS] newznab name: ' + str(newznab_host[0]) + ' - enabled: ' + str(newznab_host[4]))
if str(newznab_host[4]) == '1':
logger.fdebug('[RSS] newznab name: ' + str(newznab_host[0]) + ' - enabled: ' + str(newznab_host[5]))
if str(newznab_host[5]) == '1':
newznab_hosts.append(newznab_host)
providercount = len(newznab_hosts) + int(mylar.EXPERIMENTAL == 1) + int(mylar.NZBSU == 1) + int(mylar.DOGNZB == 1)
@ -285,24 +296,24 @@ def nzbs(provider=None, forcerss=False):
if mylar.EXPERIMENTAL == 1:
max_entries = "250" if forcerss else "50"
_parse_feed('experimental', 'http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&max=' + max_entries + '&more=1')
_parse_feed('experimental', 'http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&max=' + max_entries + '&more=1', False)
if mylar.NZBSU == 1:
num_items = "&num=100" if forcerss else "" # default is 25
_parse_feed('nzb.su', 'http://api.nzb.su/rss?t=7030&dl=1&i=' + (mylar.NZBSU_UID or '1') + '&r=' + mylar.NZBSU_APIKEY + num_items)
_parse_feed('nzb.su', 'http://api.nzb.su/rss?t=7030&dl=1&i=' + (mylar.NZBSU_UID or '1') + '&r=' + mylar.NZBSU_APIKEY + num_items, bool(mylar.NZBSU_VERIFY))
if mylar.DOGNZB == 1:
num_items = "&num=100" if forcerss else "" # default is 25
_parse_feed('dognzb', 'https://dognzb.cr/rss.cfm?r=' + mylar.DOGNZB_APIKEY + '&t=7030' + num_items)
_parse_feed('dognzb', 'https://dognzb.cr/rss.cfm?r=' + mylar.DOGNZB_APIKEY + '&t=7030' + num_items, bool(mylar.DOGNZB_VERIFY))
for newznab_host in newznab_hosts:
site = newznab_host[0].rstrip()
(newznabuid, _, newznabcat) = (newznab_host[3] or '').partition('#')
(newznabuid, _, newznabcat) = (newznab_host[4] or '').partition('#')
newznabuid = newznabuid or '1'
newznabcat = newznabcat or '7030'
# 11-21-2014: added &num=100 to return 100 results (or maximum) - unsure of cross-reliablity
_parse_feed(site, newznab_host[1].rstrip() + '/rss?t=' + str(newznabcat) + '&dl=1&i=' + str(newznabuid) + '&num=100&r=' + newznab_host[2].rstrip())
_parse_feed(site, newznab_host[1].rstrip() + '/rss?t=' + str(newznabcat) + '&dl=1&i=' + str(newznabuid) + '&num=100&r=' + newznab_host[3].rstrip(), bool(newznab_host[2]))
feeddata = []
@ -478,19 +489,23 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None):
torinfo = {}
for tor in tresults:
torsplit = tor['Title'].split('/')
#&amp; have been brought into the title field incorretly occassionally - patched now, but to include those entries already in the
#cache db that have the incorrect entry, we'll adjust.
torTITLE = re.sub('&amp;', '&', tor['Title']).strip()
torsplit = torTITLE.split('/')
if mylar.PREFERRED_QUALITY == 1:
if 'cbr' in tor['Title']:
if 'cbr' in torTITLE:
logger.fdebug('Quality restriction enforced [ cbr only ]. Accepting result.')
else:
logger.fdebug('Quality restriction enforced [ cbr only ]. Rejecting result.')
elif mylar.PREFERRED_QUALITY == 2:
if 'cbz' in tor['Title']:
if 'cbz' in torTITLE:
logger.fdebug('Quality restriction enforced [ cbz only ]. Accepting result.')
else:
logger.fdebug('Quality restriction enforced [ cbz only ]. Rejecting result.')
logger.fdebug('tor-Title: ' + tor['Title'])
logger.fdebug('tor-Title: ' + torTITLE)
logger.fdebug('there are ' + str(len(torsplit)) + ' sections in this title')
i=0
if nzbprov is not None:
@ -542,7 +557,7 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None):
logger.fdebug(str(len(formatrem_seriesname)) + ' - formatrem_seriesname :' + formatrem_seriesname.lower())
if formatrem_seriesname.lower() in formatrem_torsplit.lower() or any(x.lower() in formatrem_torsplit.lower() for x in AS_Alt):
logger.fdebug('matched to : ' + tor['Title'])
logger.fdebug('matched to : ' + torTITLE)
logger.fdebug('matched on series title: ' + seriesname)
titleend = formatrem_torsplit[len(formatrem_seriesname):]
titleend = re.sub('\-', '', titleend) #remove the '-' which is unnecessary
@ -556,15 +571,15 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None):
extra = ''
#the title on 32P has a mix-mash of crap...ignore everything after cbz/cbr to cleanit
ctitle = tor['Title'].find('cbr')
ctitle = torTITLE.find('cbr')
if ctitle == 0:
ctitle = tor['Title'].find('cbz')
ctitle = torTITLE.find('cbz')
if ctitle == 0:
ctitle = tor['Title'].find('none')
ctitle = torTITLE.find('none')
if ctitle == 0:
logger.fdebug('cannot determine title properly - ignoring for now.')
continue
cttitle = tor['Title'][:ctitle]
cttitle = torTITLE[:ctitle]
if tor['Site'] == '32P':
st_pub = rebuiltline.find('(')

View File

@ -223,6 +223,11 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
newznab_host = None
searchprov = prov_order[prov_count].lower()
if searchprov == 'dognzb' and mylar.DOGNZB == 0:
#since dognzb could hit the 50 daily api limit during the middle of a search run, check here on each pass to make
#sure it's not disabled (it gets auto-disabled on maxing out the API hits)
prov_count+=1
continue
if searchmode == 'rss':
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName)
if findit == 'yes':
@ -451,6 +456,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
while (findloop < findcount):
#logger.fdebug('findloop: ' + str(findloop) + ' / findcount: ' + str(findcount))
comsrc = comsearch
if nzbprov == 'dognzb' and not mylar.DOGNZB:
foundc = "no"
done = True
break
while (cmloopit >= 1):
#if issue_except is None: issue_exc = ''
#else: issue_exc = issue_except
@ -568,14 +577,14 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
headers = {'User-Agent': str(mylar.USER_AGENT)}
payload = None
if findurl.startswith('https'):
if findurl.startswith('https:') and verify == False:
try:
from lib.requests.packages.urllib3 import disable_warnings
disable_warnings()
except:
logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.')
else:
elif findurl.startswith('http:') and verify == True:
verify = False
#logger.fdebug('[SSL: ' + str(verify) + '] Search URL: ' + findurl)
@ -591,10 +600,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
break
data = False
logger.info(r.content)
logger.info('status code: ' + str(r.status_code))
if str(r.status_code) != '200':
logger.warn('Unable to download torrent from ' + nzbprov + ' [Status Code returned: ' + str(r.status_code) + ']')
logger.warn('Unable to retrieve search results from ' + tmpprov + ' [Status Code returned: ' + str(r.status_code) + ']')
data = False
else:
data = r.content
@ -602,11 +612,17 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
bb = feedparser.parse(data)
else:
bb = "no results"
#logger.info('Search results:' + str(bb))
try:
if bb['feed']['error']:
logger.error('[ERROR CODE: ' + str(bb['feed']['error']['code']) + '] ' + str(bb['feed']['error']['description']))
bb = "no results"
if bb['feed']['error']['code'] == '910':
logger.warn('DAILY API limit reached. Disabling provider usage until 12:01am')
mylar.DOGNZB = 0
foundc = False
done = True
break
except:
#logger.info('no errors on data retrieval...proceeding')
pass
@ -829,18 +845,28 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
ComVersChk = 0
ctchk = cleantitle.split()
ctchk_indexes = []
volfound = False
vol_label = None
vol_nono = []
new_cleantitle = []
fndcomicversion = None
for ct in ctchk:
if any([ct.lower().startswith('v') and ct[1:].isdigit(), ct.lower()[:3] == 'vol', volfound == True]):
if volfound == True:
logger.fdebug('Split Volume label detected - ie. Vol 4. Attempting to adust.')
logger.fdebug('Split Volume label detected [' + ct + '] - ie. Vol 4. Attempting to adust.')
if ct.isdigit():
vol_label = vol_label + ' ' + str(ct)
vol_nono.append(ctchk.index(ct))
#recreate the cleantitle, with the volume label completely removed (but stored for comparison later)
ct = 'v' + str(ct)
ctchk_indexes.extend(range(0, len(ctchk)))
logger.info(ctchk_indexes)
for i in ctchk_indexes:
if i not in vol_nono:
new_cleantitle.append(ctchk[i])
cleantitle = ' '.join(new_cleantitle)
logger.fdebug('Newly finished reformed cleantitle (with NO volume label): ' + cleantitle)
volfound == False
cleantitle = re.sub(vol_label, ct, cleantitle).strip()
tmpsplit = ct
if tmpsplit.lower().startswith('vol'):
logger.fdebug('volume detected - stripping and re-analzying for volume label.')
@ -850,7 +876,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#if vol label set as 'Vol 4' it will obliterate the Vol, but pass over the '4' - set
#volfound to True so that it can loop back around.
if not tmpsplit.isdigit():
vol_label = ct #store the wording of how the Vol is defined so we can skip it later on.
#vol_label = ct #store the wording of how the Vol is defined so we can skip it later on.
vol_nono.append(ctchk.index(ct))
volfound = True
continue
@ -1251,26 +1278,26 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
# instead of the Series they belong to (V2012 vs V2013)
if annualize == "true" and int(ComicYear) == int(F_ComicVersion):
logger.fdebug("We matched on versions for annuals " + str(fndcomicversion))
scount+=1
cvers = "true"
#scount+=1
#cvers = "true"
elif int(F_ComicVersion) == int(D_ComicVersion) or int(F_ComicVersion) == int(S_ComicVersion):
logger.fdebug("We matched on versions..." + str(fndcomicversion))
scount+=1
cvers = "true"
#scount+=1
#cvers = "true"
else:
logger.fdebug("Versions wrong. Ignoring possible match.")
scount = 0
cvers = "false"
#scount = 0
#cvers = "false"
if cvers == "true":
#if cvers == "true":
#since we matched on versions, let's remove it entirely to improve matching.
logger.fdebug('Removing versioning [' + fndcomicversion + '] from nzb filename to improve matching algorithims.')
cissb4vers = re.sub(fndcomicversion, "", comic_iss_b4).strip()
logger.fdebug('New b4split : ' + str(cissb4vers))
splitit = cissb4vers.split(None)
splitst -=1
#logger.fdebug('Removing versioning [' + fndcomicversion + '] from nzb filename to improve matching algorithims.')
#cissb4vers = re.sub(fndcomicversion, "", comic_iss_b4).strip()
#logger.fdebug('New b4split : ' + str(cissb4vers))
#splitit = cissb4vers.split(None)
#splitst -=1
#do an initial check
initialchk = 'ok'
@ -1800,8 +1827,9 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
else:
host_newznab_fix = host_newznab
if 'warp?x=' in link:
logger.fdebug('NZBMegaSearch url detected. Adjusting...')
#account for nzbmegasearch & nzbhydra
if 'warp?x=' in link or 'indexerguid' in link:
logger.fdebug('NZBMegaSearch / NZBHydra url detected. Adjusting...')
nzbmega = True
else:
apikey = newznab[3].rstrip()
@ -2395,7 +2423,7 @@ def generate_id(nzbprov, link):
url_parts = urlparse.urlparse(link)
path_parts = url_parts[2].rpartition('/')
nzbid = path_parts[0].rsplit('/', 1)[1]
elif nzbprov == 'newznab':
elif nzbprov == 'newznab':
#if in format of http://newznab/getnzb/<id>.nzb&i=1&r=apikey
tmpid = urlparse.urlparse(link)[4] #param 4 is the query string from the url.
if 'warp' in urlparse.urlparse(link)[2] and 'x=' in tmpid:
@ -2406,6 +2434,8 @@ def generate_id(nzbprov, link):
# for the geek in all of us...
st = tmpid.find('&id')
end = tmpid.find('&', st +1)
if end == -1:
end = len(tmpid)
nzbid = re.sub('&id=', '', tmpid[st:end]).strip()
elif nzbprov == 'Torznab':
if mylar.TORZNAB_HOST.endswith('/'):

View File

@ -3425,6 +3425,8 @@ class WebInterface(object):
"maxsize": mylar.MAXSIZE,
"interface_list": interface_list,
"dupeconstraint": mylar.DUPECONSTRAINT,
"ddump": helpers.checked(mylar.DDUMP),
"duplicate_dump": mylar.DUPLICATE_DUMP,
"autowant_all": helpers.checked(mylar.AUTOWANT_ALL),
"autowant_upcoming": helpers.checked(mylar.AUTOWANT_UPCOMING),
"comic_cover_local": helpers.checked(mylar.COMIC_COVER_LOCAL),
@ -3689,7 +3691,7 @@ class WebInterface(object):
prowl_enabled=0, prowl_onsnatch=0, prowl_keys=None, prowl_priority=None, nma_enabled=0, nma_apikey=None, nma_priority=0, nma_onsnatch=0, pushover_enabled=0, pushover_onsnatch=0, pushover_apikey=None, pushover_userkey=None, pushover_priority=None, boxcar_enabled=0, boxcar_onsnatch=0, boxcar_token=None,
pushbullet_enabled=0, pushbullet_apikey=None, pushbullet_deviceid=None, pushbullet_onsnatch=0,
preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, file_opts=None, syno_fix=0, search_delay=None, chmod_dir=0777, chmod_file=0660, chowner=None, chgroup=None,
tsab=None, destination_dir=None, create_folders=1, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, dupeconstraint=None, **kwargs):
tsab=None, destination_dir=None, create_folders=1, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, dupeconstraint=None, ddump=0, duplicate_dump=None, **kwargs):
mylar.COMICVINE_API = comicvine_api
mylar.HTTP_HOST = http_host
mylar.HTTP_PORT = http_port
@ -3815,6 +3817,8 @@ class WebInterface(object):
mylar.COMIC_COVER_LOCAL = comic_cover_local
mylar.INTERFACE = interface
mylar.DUPECONSTRAINT = dupeconstraint
mylar.DDUMP = ddump
mylar.DUPLICATE_DUMP = duplicate_dump
mylar.ENABLE_EXTRA_SCRIPTS = enable_extra_scripts
mylar.EXTRA_SCRIPTS = extra_scripts
mylar.ENABLE_PRE_SCRIPTS = enable_pre_scripts
@ -4082,43 +4086,51 @@ class WebInterface(object):
filelocation = filelocation.encode('ASCII')
filelocation = urllib.unquote_plus(filelocation).decode('utf8')
issuedetails = helpers.IssueDetails(filelocation)
#print str(issuedetails)
issueinfo = '<table width="500"><tr><td>'
issueinfo += '<img style="float: left; padding-right: 10px" src=' + issuedetails[0]['IssueImage'] + ' height="400" width="263">'
issueinfo += '<h1><center><b>' + issuedetails[0]['series'] + '</br>[#' + issuedetails[0]['issue_number'] + ']</b></center></h1>'
issueinfo += '<center>"' + issuedetails[0]['title'] + '"</center></br>'
issueinfo += '</br><p class="alignleft">' + str(issuedetails[0]['pagecount']) + ' pages</p>'
if issuedetails[0]['day'] is None:
issueinfo += '<p class="alignright">(' + str(issuedetails[0]['year']) + '-' + str(issuedetails[0]['month']) + ')</p></br>'
if issuedetails:
#print str(issuedetails)
issueinfo = '<table width="500"><tr><td>'
issueinfo += '<img style="float: left; padding-right: 10px" src=' + issuedetails[0]['IssueImage'] + ' height="400" width="263">'
issueinfo += '<h1><center><b>' + issuedetails[0]['series'] + '</br>[#' + issuedetails[0]['issue_number'] + ']</b></center></h1>'
issueinfo += '<center>"' + issuedetails[0]['title'] + '"</center></br>'
issueinfo += '</br><p class="alignleft">' + str(issuedetails[0]['pagecount']) + ' pages</p>'
if issuedetails[0]['day'] is None:
issueinfo += '<p class="alignright">(' + str(issuedetails[0]['year']) + '-' + str(issuedetails[0]['month']) + ')</p></br>'
else:
issueinfo += '<p class="alignright">(' + str(issuedetails[0]['year']) + '-' + str(issuedetails[0]['month']) + '-' + str(issuedetails[0]['day']) + ')</p></br>'
if not issuedetails[0]['writer'] == 'None':
issueinfo += 'Writer: ' + issuedetails[0]['writer'] + '</br>'
if not issuedetails[0]['penciller'] == 'None':
issueinfo += 'Penciller: ' + issuedetails[0]['penciller'] + '</br>'
if not issuedetails[0]['inker'] == 'None':
issueinfo += 'Inker: ' + issuedetails[0]['inker'] + '</br>'
if not issuedetails[0]['colorist'] == 'None':
issueinfo += 'Colorist: ' + issuedetails[0]['colorist'] + '</br>'
if not issuedetails[0]['letterer'] == 'None':
issueinfo += 'Letterer: ' + issuedetails[0]['letterer'] + '</br>'
if not issuedetails[0]['editor'] == 'None':
issueinfo += 'Editor: ' + issuedetails[0]['editor'] + '</br>'
issueinfo += '</td></tr>'
#issueinfo += '<img src="interfaces/default/images/rename.png" height="25" width="25"></td></tr>'
if len(issuedetails[0]['summary']) > 1000:
issuesumm = issuedetails[0]['summary'][:1000] + '...'
else:
issuesumm = issuedetails[0]['summary']
issueinfo += '<tr><td>Summary: ' + issuesumm + '</br></td></tr>'
issueinfo += '<tr><td><center>' + os.path.split(filelocation)[1] + '</center>'
issueinfo += '</td></tr></table>'
else:
issueinfo += '<p class="alignright">(' + str(issuedetails[0]['year']) + '-' + str(issuedetails[0]['month']) + '-' + str(issuedetails[0]['day']) + ')</p></br>'
if not issuedetails[0]['writer'] == 'None':
issueinfo += 'Writer: ' + issuedetails[0]['writer'] + '</br>'
if not issuedetails[0]['penciller'] == 'None':
issueinfo += 'Penciller: ' + issuedetails[0]['penciller'] + '</br>'
if not issuedetails[0]['inker'] == 'None':
issueinfo += 'Inker: ' + issuedetails[0]['inker'] + '</br>'
if not issuedetails[0]['colorist'] == 'None':
issueinfo += 'Colorist: ' + issuedetails[0]['colorist'] + '</br>'
if not issuedetails[0]['letterer'] == 'None':
issueinfo += 'Letterer: ' + issuedetails[0]['letterer'] + '</br>'
if not issuedetails[0]['editor'] == 'None':
issueinfo += 'Editor: ' + issuedetails[0]['editor'] + '</br>'
issueinfo += '</td></tr>'
#issueinfo += '<img src="interfaces/default/images/rename.png" height="25" width="25"></td></tr>'
if len(issuedetails[0]['summary']) > 1000:
issuesumm = issuedetails[0]['summary'][:1000] + '...'
else:
issuesumm = issuedetails[0]['summary']
issueinfo += '<tr><td>Summary: ' + issuesumm + '</br></td></tr>'
issueinfo += '<tr><td><center>' + os.path.split(filelocation)[1] + '</center>'
issueinfo += '</td></tr></table>'
ErrorPNG = 'interfaces/default/images/symbol_exclamation.png'
issueinfo = '<table width="300"><tr><td>'
issueinfo += '<img style="float: left; padding-right: 10px" src=' + ErrorPNG + ' height="128" width="128">'
issueinfo += '<h1><center><b>ERROR</b></center></h1></br>'
issueinfo += '<center>Unable to retrieve metadata from within cbz file</center></br>'
issueinfo += '<center>Maybe you should try and tag the file again?</center></br>'
issueinfo += '<tr><td><center>' + os.path.split(filelocation)[1] + '</center>'
issueinfo += '</td></tr></table>'
return issueinfo
#import json
#json_dump = json.dumps(issuedetails)
#json_dump = json_dump.replace("\\","\\\\")
#print 'json_dump:' + str(json_dump)
#return json_dump
IssueInfo.exposed = True
def manual_metatag(self, dirName, issueid, filename, comicid, comversion):

View File

@ -754,8 +754,8 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
break
else:
#logger.fdebug('issuedate:' + str(datevalues[0]['issuedate']))
#logger.fdebug('status:' + str(datevalues[0]['status']))
logger.fdebug('issuedate:' + str(datevalues[0]['issuedate']))
logger.fdebug('status:' + str(datevalues[0]['status']))
datestatus = datevalues[0]['status']
validcheck = checkthis(datevalues[0]['issuedate'], datestatus, usedate)
if validcheck == True: