Merge branch 'development'

This commit is contained in:
Louis Vézina 2020-04-11 22:19:35 -04:00
commit 5dcd835308
127 changed files with 7699 additions and 1794 deletions

View File

@ -3,12 +3,9 @@ Bazarr is a companion application to Sonarr and Radarr. It manages and downloads
Be aware that Bazarr doesn't scan disk to detect series and movies: It only takes care of the series and movies that are indexed in Sonarr and Radarr.
## Support on Beerpay
## Support on Paypal
At the request of some, here is a way to demonstrate your appreciation for the efforts made in the development of Bazarr:
[![Beerpay](https://beerpay.io/morpheus65535/bazarr/badge.svg?style=beer-square)](https://beerpay.io/morpheus65535/bazarr)
You can also make a wish but keep in mind that we do not commit to make it happen:
[![Beerpay](https://beerpay.io/morpheus65535/bazarr/make-wish.svg?style=flat-square)](https://beerpay.io/morpheus65535/bazarr?focus=wish)
[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=XHHRWXT9YB7WE&source=url)
# Status
[![GitHub issues](https://img.shields.io/github/issues/morpheus65535/bazarr.svg?style=flat-square)](https://github.com/morpheus65535/bazarr/issues)
@ -69,6 +66,7 @@ If you need something that is not already part of Bazarr, feel free to create a
* Supersubtitles
* Titlovi
* TVSubtitles
* Wizdom
* XSubs
* Zimuku

View File

@ -14,7 +14,7 @@ from libs.six import PY3
def check_python_version():
python_version = platform.python_version_tuple()
minimum_py2_tuple = (2, 7, 13)
minimum_py3_tuple = (3, 6, 0)
minimum_py3_tuple = (3, 7, 0)
minimum_py2_str = ".".join(str(i) for i in minimum_py2_tuple)
minimum_py3_str = ".".join(str(i) for i in minimum_py3_tuple)

View File

@ -60,7 +60,7 @@ def check_and_apply_update():
if releases is None:
notifications.write(msg='Could not get releases from GitHub.',
queue='check_update', type='warning')
logging.warn('BAZARR Could not get releases from GitHub.')
logging.warning('BAZARR Could not get releases from GitHub.')
return
else:
release = releases[0]

View File

@ -3,11 +3,9 @@ import enzyme
from enzyme.exceptions import MalformedMKVError
import logging
import os
import subprocess
import locale
from knowit import api
from utils import get_binary
from pyprobe.pyprobe import VideoFileParser
class NotMKVAndNoFFprobe(Exception):
pass
@ -23,11 +21,18 @@ class EmbeddedSubsReader:
subtitles_list = []
if self.ffprobe:
parser = VideoFileParser(ffprobe=self.ffprobe, includeMissing=True, rawMode=False)
data = parser.parseFfprobe(file)
api.initialize({'provider': 'ffmpeg', 'ffmpeg': self.ffprobe})
data = api.know(file)
for detected_language in data['subtitles']:
subtitles_list.append([detected_language['language'], detected_language['forced'], detected_language["codec"]])
if 'subtitle' in data:
for detected_language in data['subtitle']:
if 'language' in detected_language:
language = detected_language['language'].alpha3
forced = detected_language['forced'] if 'forced' in detected_language else None
codec = detected_language['format'] if 'format' in detected_language else None
subtitles_list.append([language, forced, codec])
else:
continue
else:
if os.path.splitext(file)[1] == '.mkv':
with open(file, 'rb') as f:

View File

@ -3,6 +3,7 @@
from __future__ import absolute_import
import os
import pycountry
import ast
from get_args import args
from subzero.language import Language
@ -76,5 +77,48 @@ def get_language_set():
return language_set
def clean_desired_languages():
from list_subtitles import list_missing_subtitles, list_missing_subtitles_movies
enabled_languages = []
enabled_languages_temp = database.execute("SELECT code2 FROM table_settings_languages WHERE enabled=1")
for language in enabled_languages_temp:
enabled_languages.append(language['code2'])
series_languages = database.execute("SELECT sonarrSeriesId, languages FROM table_shows")
movies_languages = database.execute("SELECT radarrId, languages FROM table_movies")
for item in series_languages:
if item['languages'] != 'None':
try:
languages_list = ast.literal_eval(item['languages'])
except:
pass
else:
cleaned_languages_list = []
for language in languages_list:
if language in enabled_languages:
cleaned_languages_list.append(language)
if cleaned_languages_list != languages_list:
database.execute("UPDATE table_shows SET languages=? WHERE sonarrSeriesId=?",
(str(cleaned_languages_list), item['sonarrSeriesId']))
list_missing_subtitles(no=item['sonarrSeriesId'])
for item in movies_languages:
if item['languages'] != 'None':
try:
languages_list = ast.literal_eval(item['languages'])
except:
pass
else:
cleaned_languages_list = []
for language in languages_list:
if language in enabled_languages:
cleaned_languages_list.append(language)
if cleaned_languages_list != languages_list:
database.execute("UPDATE table_movies SET languages=? WHERE radarrId=?",
(str(cleaned_languages_list), item['radarrId']))
list_missing_subtitles_movies(no=item['radarrId'])
if __name__ == '__main__':
load_language_in_db()

View File

@ -25,6 +25,8 @@ def update_all_movies():
def update_movies():
logging.debug('BAZARR Starting movie sync from Radarr.')
apikey_radarr = settings.radarr.apikey
radarr_version = get_radarr_version()
movie_default_enabled = settings.general.getboolean('movie_default_enabled')
movie_default_language = settings.general.movie_default_language
movie_default_hi = settings.general.movie_default_hi
@ -130,6 +132,8 @@ def update_movies():
videoCodec = None
audioCodec = None
audio_language = profile_id_to_language(movie['qualityProfileId'], audio_profiles)
# Add movies in radarr to current movies list
current_movies_radarr.append(six.text_type(movie['tmdbId']))
@ -140,7 +144,7 @@ def update_movies():
'tmdbId': six.text_type(movie["tmdbId"]),
'poster': poster,
'fanart': fanart,
'audio_language': profile_id_to_language(movie['qualityProfileId'], audio_profiles),
'audio_language': audio_language,
'sceneName': sceneName,
'monitored': six.text_type(bool(movie['monitored'])),
'year': six.text_type(movie['year']),
@ -165,7 +169,7 @@ def update_movies():
'overview': overview,
'poster': poster,
'fanart': fanart,
'audio_language': profile_id_to_language(movie['qualityProfileId'], audio_profiles),
'audio_language': audio_language,
'sceneName': sceneName,
'monitored': six.text_type(bool(movie['monitored'])),
'sortTitle': movie['sortTitle'],
@ -189,7 +193,7 @@ def update_movies():
'overview': overview,
'poster': poster,
'fanart': fanart,
'audio_language': profile_id_to_language(movie['qualityProfileId'], audio_profiles),
'audio_language': audio_language,
'sceneName': sceneName,
'monitored': six.text_type(bool(movie['monitored'])),
'sortTitle': movie['sortTitle'],
@ -227,8 +231,8 @@ def update_movies():
for updated_movie in movies_to_update_list:
query = dict_converter.convert(updated_movie)
database.execute('''UPDATE table_movies SET ''' + query.keys_update + ''' WHERE radarrId = ?''',
query.values + (updated_movie['radarrId'],))
database.execute('''UPDATE table_movies SET ''' + query.keys_update + ''' WHERE tmdbId = ?''',
query.values + (updated_movie['tmdbId'],))
altered_movies.append([updated_movie['tmdbId'],
updated_movie['path'],
updated_movie['radarrId'],
@ -275,8 +279,11 @@ def get_profile_list():
radarr_version = get_radarr_version()
profiles_list = []
# Get profiles data from radarr
if radarr_version.startswith('0'):
url_radarr_api_movies = url_radarr() + "/api/profile?apikey=" + apikey_radarr
else:
url_radarr_api_movies = url_radarr() + "/api/v3/qualityprofile?apikey=" + apikey_radarr
url_radarr_api_movies = url_radarr() + "/api/profile?apikey=" + apikey_radarr
try:
profiles_json = requests.get(url_radarr_api_movies, timeout=60, verify=False)
except requests.exceptions.ConnectionError as errc:
@ -290,7 +297,7 @@ def get_profile_list():
if radarr_version.startswith('0'):
for profile in profiles_json.json():
profiles_list.append([profile['id'], profile['language'].capitalize()])
elif radarr_version.startswith('2'):
else:
for profile in profiles_json.json():
profiles_list.append([profile['id'], profile['language']['name'].capitalize()])

View File

@ -34,7 +34,7 @@ from notifier import send_notifications, send_notifications_movie
from get_providers import get_providers, get_providers_auth, provider_throttle, provider_pool
from get_args import args
from queueconfig import notifications
from pyprobe.pyprobe import VideoFileParser
from knowit import api
from database import database, dict_mapper
from analytics import track_event
@ -42,6 +42,7 @@ import six
from six.moves import range
from functools import reduce
from locale import getpreferredencoding
import chardet
def get_video(path, title, sceneName, use_scenename, providers=None, media_type="movie"):
@ -346,10 +347,20 @@ def manual_search(path, language, hi, forced, providers, providers_auth, sceneNa
not_matched = scores - matches
s.score = score
releases = ['n/a']
releases = []
if hasattr(s, 'release_info'):
if s.release_info is not None:
releases = s.release_info.split(',')
for s_item in s.release_info.split(','):
if s_item.strip():
releases.append(s_item)
if len(releases) == 0:
releases = ['n/a']
if s.uploader and s.uploader.strip():
s_uploader = s.uploader.strip()
else:
s_uploader = 'n/a'
subtitles_list.append(
dict(score=round((score / max_score * 100), 2),
@ -359,7 +370,7 @@ def manual_search(path, language, hi, forced, providers, providers_auth, sceneNa
provider=s.provider_name,
subtitle=codecs.encode(pickle.dumps(s.make_picklable()), "base64").decode(),
url=s.page_link, matches=list(matches), dont_matches=list(not_matched),
release_info=releases))
release_info=releases, uploader=s_uploader))
final_subtitles = sorted(subtitles_list, key=lambda x: (x['orig_score'], x['score_without_hash']),
reverse=True)
@ -470,6 +481,10 @@ def manual_upload_subtitle(path, language, forced, title, scene_name, media_type
chmod = int(settings.general.chmod, 8) if not sys.platform.startswith(
'win') and settings.general.getboolean('chmod_enabled') else None
dest_directory = get_target_folder(path)
fake_video_path = None
if dest_directory:
fake_video_path = os.path.join(dest_directory, os.path.split(path)[1])
_, ext = os.path.splitext(subtitle.filename)
language = alpha3_from_alpha2(language)
@ -482,7 +497,7 @@ def manual_upload_subtitle(path, language, forced, title, scene_name, media_type
if forced:
lang_obj = Language.rebuild(lang_obj, forced=True)
subtitle_path = get_subtitle_path(video_path=force_unicode(path),
subtitle_path = get_subtitle_path(video_path=force_unicode(fake_video_path if fake_video_path else path),
language=None if single else lang_obj,
extension=ext,
forced_tag=forced)
@ -492,7 +507,34 @@ def manual_upload_subtitle(path, language, forced, title, scene_name, media_type
if os.path.exists(subtitle_path):
os.remove(subtitle_path)
subtitle.save(subtitle_path)
if settings.general.utf8_encode:
try:
os.remove(subtitle_path + ".tmp")
except:
pass
subtitle.save(subtitle_path + ".tmp")
with open(subtitle_path + ".tmp", 'rb') as fr:
text = fr.read()
try:
guess = chardet.detect(text)
text = text.decode(guess["encoding"])
text = text.encode('utf-8')
except UnicodeError:
logging.exception("BAZARR subtitles file doesn't seems to be text based. Skipping this file: " +
subtitle_path)
else:
with open(subtitle_path, 'wb') as fw:
fw.write(text)
finally:
try:
os.remove(subtitle_path + ".tmp")
except:
pass
else:
subtitle.save(subtitle_path)
if chmod:
os.chmod(subtitle_path, chmod)
@ -868,7 +910,7 @@ def refine_from_db(path, video):
"WHERE table_episodes.path = ?", (path_replace_reverse(path),), only_one=True)
if data:
video.series, year, country = series_re.match(data['seriesTitle']).groups()
video.series = data['seriesTitle']
video.season = int(data['season'])
video.episode = int(data['episode'])
video.title = data['episodeTitle']
@ -915,37 +957,30 @@ def refine_from_ffprobe(path, video):
else:
logging.debug('BAZARR FFprobe used is %s', exe)
parser = VideoFileParser(ffprobe=exe, includeMissing=True, rawMode=False)
data = parser.parseFfprobe(path)
api.initialize({'provider': 'ffmpeg', 'ffmpeg': exe})
data = api.know(path)
logging.debug('FFprobe found: %s', data)
if 'videos' not in data:
if 'video' not in data:
logging.debug('BAZARR FFprobe was unable to find video tracks in the file!')
else:
if 'resolution' in data['videos'][0]:
if 'resolution' in data['video'][0]:
if not video.resolution:
if data['videos'][0]['resolution'][0] >= 3200:
video.resolution = "2160p"
elif data['videos'][0]['resolution'][0] >= 1800:
video.resolution = "1080p"
elif data['videos'][0]['resolution'][0] >= 1200:
video.resolution = "720p"
elif data['videos'][0]['resolution'][0] >= 0:
video.resolution = "480p"
if 'codec' in data['videos'][0]:
video.resolution = data['video'][0]['resolution']
if 'codec' in data['video'][0]:
if not video.video_codec:
video.video_codec = data['videos'][0]['codec']
if 'framerate' in data['videos'][0]:
video.video_codec = data['video'][0]['codec']
if 'frame_rate' in data['video'][0]:
if not video.fps:
video.fps = data['videos'][0]['framerate']
video.fps = data['video'][0]['frame_rate']
if 'audios' not in data:
if 'audio' not in data:
logging.debug('BAZARR FFprobe was unable to find audio tracks in the file!')
else:
if 'codec' in data['audios'][0]:
if 'codec' in data['audio'][0]:
if not video.audio_codec:
video.audio_codec = data['audios'][0]['codec'].upper()
video.audio_codec = data['audio'][0]['codec']
def upgrade_subtitles():

View File

@ -13,9 +13,7 @@ import operator
from subliminal import core
from subliminal_patch import search_external_subtitles
from subzero.language import Language
from bs4 import UnicodeDammit
import six
from binaryornot.check import is_binary
from get_args import args
from database import database
@ -27,6 +25,7 @@ from helper import path_replace, path_replace_movie, path_replace_reverse, \
from queueconfig import notifications
from embedded_subs_reader import embedded_subs_reader
import six
import chardet
gc.enable()
@ -367,25 +366,31 @@ def guess_external_subtitles(dest_folder, subtitles):
subtitle_path = os.path.join(dest_folder, subtitle)
if os.path.exists(subtitle_path) and os.path.splitext(subtitle_path)[1] in core.SUBTITLE_EXTENSIONS:
logging.debug("BAZARR falling back to file content analysis to detect language.")
if is_binary(subtitle_path):
logging.debug("BAZARR subtitles file doesn't seems to be text based. Skipping this file: " +
detected_language = None
# to improve performance, skip detection of files larger that 5M
if os.path.getsize(subtitle_path) > 5*1024*1024:
logging.debug("BAZARR subtitles file is too large to be text based. Skipping this file: " +
subtitle_path)
continue
detected_language = None
if six.PY3:
with open(subtitle_path, 'r', errors='ignore') as f:
text = f.read()
else:
with open(subtitle_path, 'r') as f:
text = f.read()
with open(subtitle_path, 'rb') as f:
text = f.read()
try:
encoding = UnicodeDammit(text)
if six.PY2:
text = text.decode(encoding.original_encoding)
# to improve performance, use only the first 32K to detect encoding
guess = chardet.detect(text[:32768])
logging.debug('BAZARR detected encoding %r', guess)
if guess["confidence"] < 0.6:
raise UnicodeError
if guess["confidence"] < 0.8 or guess["encoding"] == "ascii":
guess["encoding"] = "utf-8"
text = text.decode(guess["encoding"])
detected_language = guess_language(text)
except Exception as e:
except UnicodeError:
logging.exception("BAZARR subtitles file doesn't seems to be text based. Skipping this file: " +
subtitle_path)
except:
logging.exception('BAZARR Error trying to detect language for this subtitles file: ' +
subtitle_path + ' You should try to delete this subtitles file manually and ask '
'Bazarr to download it again.')

View File

@ -95,6 +95,7 @@ def configure_logging(debug=False):
logging.getLogger("subliminal_patch").setLevel(logging.CRITICAL)
logging.getLogger("subzero").setLevel(logging.ERROR)
logging.getLogger("knowit").setLevel(logging.CRITICAL)
logging.getLogger("enzyme").setLevel(logging.CRITICAL)
logging.getLogger("guessit").setLevel(logging.WARNING)
logging.getLogger("rebulk").setLevel(logging.WARNING)

View File

@ -1,6 +1,6 @@
# coding=utf-8
bazarr_version = '0.8.4.2'
bazarr_version = '0.8.4.3'
import os
os.environ["SZ_USER_AGENT"] = "Bazarr/1"
@ -47,7 +47,7 @@ from beaker.middleware import SessionMiddleware
from cork import Cork
from bottle import route, template, static_file, request, redirect, response, HTTPError, app, hook, abort
from datetime import timedelta, datetime
from get_languages import load_language_in_db, language_from_alpha3, language_from_alpha2, alpha2_from_alpha3
from get_languages import load_language_in_db, language_from_alpha3, language_from_alpha2, alpha2_from_alpha3, clean_desired_languages
from get_providers import get_providers, get_providers_auth, list_throttled_providers
from get_series import *
@ -57,7 +57,7 @@ from get_movies import *
from list_subtitles import store_subtitles, store_subtitles_movie, series_scan_subtitles, movies_scan_subtitles, \
list_missing_subtitles, list_missing_subtitles_movies
from get_subtitle import download_subtitle, series_download_subtitles, movies_download_subtitles, \
manual_search, manual_download_subtitle, manual_upload_subtitle
manual_search, manual_download_subtitle, manual_upload_subtitle, wanted_search_missing_subtitles
from utils import history_log, history_log_movie, get_sonarr_version, get_radarr_version
from helper import path_replace_reverse, path_replace_reverse_movie
from scheduler import Scheduler
@ -1625,6 +1625,8 @@ def save_settings():
database.execute("UPDATE table_settings_notifier SET enabled=?, url=? WHERE name=?",
(enabled,notifier_url,notifier['name']))
clean_desired_languages()
scheduler.update_configurable_tasks()
logging.info('BAZARR Settings saved succesfully.')
@ -2005,7 +2007,7 @@ def perform_manual_upload_subtitle_movie():
forced=forced,
title=title,
scene_name=sceneName,
media_type='series',
media_type='movie',
subtitle=upload)
if result is not None:
@ -2223,6 +2225,8 @@ def api_help():
# Mute DeprecationWarning
warnings.simplefilter("ignore", DeprecationWarning)
# Mute Insecure HTTPS requests made to Sonarr and Radarr
warnings.filterwarnings('ignore', message='Unverified HTTPS request')
if six.PY3:
warnings.simplefilter("ignore", BrokenPipeError)
server = CherryPyWSGIServer((str(settings.general.ip), (int(args.port) if args.port else int(settings.general.port))), app)

View File

@ -323,6 +323,10 @@ class Apprise(object):
# bad attachments
return False
# Allow Asset default value
body_format = self.asset.body_format \
if body_format is None else body_format
# Iterate over our loaded plugins
for server in self.find(tag):
if status is None:

View File

@ -86,23 +86,32 @@ class AppriseAsset(object):
'apprise-{TYPE}-{XY}{EXTENSION}',
))
def __init__(self, theme='default', image_path_mask=None,
image_url_mask=None, default_extension=None):
# This value can also be set on calls to Apprise.notify(). This allows
# you to let Apprise upfront the type of data being passed in. This
# must be of type NotifyFormat. Possible values could be:
# - NotifyFormat.TEXT
# - NotifyFormat.MARKDOWN
# - NotifyFormat.HTML
# - None
#
# If no format is specified (hence None), then no special pre-formating
# actions will take place during a notificaton. This has been and always
# will be the default.
body_format = None
def __init__(self, **kwargs):
"""
Asset Initialization
"""
if theme:
self.theme = theme
# Assign default arguments if specified
for key, value in kwargs.items():
if not hasattr(AppriseAsset, key):
raise AttributeError(
'AppriseAsset init(): '
'An invalid key {} was specified.'.format(key))
if image_path_mask is not None:
self.image_path_mask = image_path_mask
if image_url_mask is not None:
self.image_url_mask = image_url_mask
if default_extension is not None:
self.default_extension = default_extension
setattr(self, key, value)
def color(self, notify_type, color_type=None):
"""

View File

@ -102,7 +102,7 @@ class AppriseAttachment(object):
# Initialize our default cache value
cache = cache if cache is not None else self.cache
if isinstance(asset, AppriseAsset):
if asset is None:
# prepare default asset
asset = self.asset

View File

@ -115,7 +115,7 @@ class AppriseConfig(object):
# Initialize our default cache value
cache = cache if cache is not None else self.cache
if isinstance(asset, AppriseAsset):
if asset is None:
# prepare default asset
asset = self.asset
@ -165,6 +165,39 @@ class AppriseConfig(object):
# Return our status
return return_status
def add_config(self, content, asset=None, tag=None, format=None):
"""
Adds one configuration file in it's raw format. Content gets loaded as
a memory based object and only exists for the life of this
AppriseConfig object it was loaded into.
If you know the format ('yaml' or 'text') you can specify
it for slightly less overhead during this call. Otherwise the
configuration is auto-detected.
"""
if asset is None:
# prepare default asset
asset = self.asset
if not isinstance(content, six.string_types):
logger.warning(
"An invalid configuration (type={}) was specified.".format(
type(content)))
return False
logger.debug("Loading raw configuration: {}".format(content))
# Create ourselves a ConfigMemory Object to store our configuration
instance = config.ConfigMemory(
content=content, format=format, asset=asset, tag=tag)
# Add our initialized plugin to our server listings
self.configs.append(instance)
# Return our status
return True
def servers(self, tag=MATCH_ALL_TAG, *args, **kwargs):
"""
Returns all of our servers dynamically build based on parsed

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# Copyright (C) 2020 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
@ -24,10 +24,10 @@
# THE SOFTWARE.
__title__ = 'apprise'
__version__ = '0.8.2'
__version__ = '0.8.5'
__author__ = 'Chris Caron'
__license__ = 'MIT'
__copywrite__ = 'Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>'
__copywrite__ = 'Copyright (C) 2020 Chris Caron <lead2gold@gmail.com>'
__email__ = 'lead2gold@gmail.com'
__status__ = 'Production'

View File

@ -28,6 +28,7 @@ import time
import mimetypes
from ..URLBase import URLBase
from ..utils import parse_bool
from ..AppriseLocale import gettext_lazy as _
class AttachBase(URLBase):
@ -61,7 +62,35 @@ class AttachBase(URLBase):
# 5 MB = 5242880 bytes
max_file_size = 5242880
def __init__(self, name=None, mimetype=None, cache=True, **kwargs):
# Here is where we define all of the arguments we accept on the url
# such as: schema://whatever/?overflow=upstream&format=text
# These act the same way as tokens except they are optional and/or
# have default values set if mandatory. This rule must be followed
template_args = {
'cache': {
'name': _('Cache Age'),
'type': 'int',
# We default to (600) which means we cache for 10 minutes
'default': 600,
},
'mime': {
'name': _('Forced Mime Type'),
'type': 'string',
},
'name': {
'name': _('Forced File Name'),
'type': 'string',
},
'verify': {
'name': _('Verify SSL'),
# SSL Certificate Authority Verification
'type': 'bool',
# Provide a default
'default': True,
},
}
def __init__(self, name=None, mimetype=None, cache=None, **kwargs):
"""
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
@ -109,19 +138,27 @@ class AttachBase(URLBase):
# Absolute path to attachment
self.download_path = None
# Set our cache flag; it can be True or a (positive) integer
try:
self.cache = cache if isinstance(cache, bool) else int(cache)
# Set our cache flag; it can be True, False, None, or a (positive)
# integer... nothing else
if cache is not None:
try:
self.cache = cache if isinstance(cache, bool) else int(cache)
except (TypeError, ValueError):
err = 'An invalid cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
# Some simple error checking
if self.cache < 0:
err = 'A negative cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
except (ValueError, TypeError):
err = 'An invalid cache value ({}) was specified.'.format(cache)
self.logger.warning(err)
raise TypeError(err)
else:
self.cache = None
# Validate mimetype if specified
if self._mimetype:
@ -211,12 +248,16 @@ class AttachBase(URLBase):
Simply returns true if the object has downloaded and stored the
attachment AND the attachment has not expired.
"""
cache = self.template_args['cache']['default'] \
if self.cache is None else self.cache
if self.download_path and os.path.isfile(self.download_path) \
and self.cache:
and cache:
# We have enough reason to look further into our cached content
# and verify it has not expired.
if self.cache is True:
if cache is True:
# return our fixed content as is; we will always cache it
return True
@ -224,7 +265,7 @@ class AttachBase(URLBase):
# content again.
try:
age_in_sec = time.time() - os.stat(self.download_path).st_mtime
if age_in_sec <= self.cache:
if age_in_sec <= cache:
return True
except (OSError, IOError):

View File

@ -78,6 +78,11 @@ class AttachHTTP(AttachBase):
# Where our content is written to upon a call to download.
self._temp_file = None
# Our Query String Dictionary; we use this to track arguments
# specified that aren't otherwise part of this class
self.qsd = {k: v for k, v in kwargs.get('qsd', {}).items()
if k not in self.template_args}
return
def download(self, **kwargs):
@ -122,6 +127,7 @@ class AttachHTTP(AttachBase):
url,
headers=headers,
auth=auth,
params=self.qsd,
verify=self.verify_certificate,
timeout=self.connection_timeout_sec,
stream=True) as r:
@ -252,18 +258,21 @@ class AttachHTTP(AttachBase):
Returns the URL built dynamically based on specified arguments.
"""
# Prepare our cache value
if isinstance(self.cache, bool) or not self.cache:
cache = 'yes' if self.cache else 'no'
else:
cache = int(self.cache)
# Define any arguments set
args = {
'verify': 'yes' if self.verify_certificate else 'no',
'cache': cache,
}
# Prepare our cache value
if self.cache is not None:
if isinstance(self.cache, bool) or not self.cache:
cache = 'yes' if self.cache else 'no'
else:
cache = int(self.cache)
# Set our cache value
args['cache'] = cache
if self._mimetype:
# A format was enforced
args['mime'] = self._mimetype
@ -275,6 +284,9 @@ class AttachHTTP(AttachBase):
# Append our headers into our args
args.update({'+{}'.format(k): v for k, v in self.headers.items()})
# Apply any remaining entries to our URL
args.update(self.qsd)
# Determine Authentication
auth = ''
if self.user and self.password:
@ -290,7 +302,7 @@ class AttachHTTP(AttachBase):
default_port = 443 if self.secure else 80
return '{schema}://{auth}{hostname}{port}{fullpath}/?{args}'.format(
return '{schema}://{auth}{hostname}{port}{fullpath}?{args}'.format(
schema=self.secure_protocol if self.secure else self.protocol,
auth=auth,
hostname=self.quote(self.host, safe=''),

View File

@ -118,7 +118,9 @@ def print_version_msg():
help='Perform a trial run but only prints the notification '
'services to-be triggered to stdout. Notifications are never '
'sent using this mode.')
@click.option('--verbose', '-v', count=True)
@click.option('--verbose', '-v', count=True,
help='Makes the operation more talkative. Use multiple v to '
'increase the verbosity. I.e.: -vvvv')
@click.option('--version', '-V', is_flag=True,
help='Display the apprise version and exit.')
@click.argument('urls', nargs=-1,

View File

@ -92,7 +92,8 @@ class ConfigBase(URLBase):
# Store the encoding
self.encoding = kwargs.get('encoding')
if 'format' in kwargs:
if 'format' in kwargs \
and isinstance(kwargs['format'], six.string_types):
# Store the enforced config format
self.config_format = kwargs.get('format').lower()
@ -249,6 +250,109 @@ class ConfigBase(URLBase):
return results
@staticmethod
def detect_config_format(content, **kwargs):
"""
Takes the specified content and attempts to detect the format type
The function returns the actual format type if detected, otherwise
it returns None
"""
# Detect Format Logic:
# - A pound/hashtag (#) is alawys a comment character so we skip over
# lines matched here.
# - Detection begins on the first non-comment and non blank line
# matched.
# - If we find a string followed by a colon, we know we're dealing
# with a YAML file.
# - If we find a string that starts with a URL, or our tag
# definitions (accepting commas) followed by an equal sign we know
# we're dealing with a TEXT format.
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(?P<text>((?P<tag>[ \t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|'
r'((?P<yaml>[a-z0-9]+):.*))?$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error('Invalid apprise config specified')
return None
# By default set our return value to None since we don't know
# what the format is yet
config_format = None
# iterate over each line of the file to attempt to detect it
# stop the moment a the type has been determined
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Undetectable apprise configuration found '
'based on line {}.'.format(line))
# Take an early exit
return None
# Attempt to detect configuration
if result.group('yaml'):
config_format = ConfigFormat.YAML
ConfigBase.logger.debug(
'Detected YAML configuration '
'based on line {}.'.format(line))
break
elif result.group('text'):
config_format = ConfigFormat.TEXT
ConfigBase.logger.debug(
'Detected TEXT configuration '
'based on line {}.'.format(line))
break
# If we reach here, we have a comment entry
# Adjust default format to TEXT
config_format = ConfigFormat.TEXT
return config_format
@staticmethod
def config_parse(content, asset=None, config_format=None, **kwargs):
"""
Takes the specified config content and loads it based on the specified
config_format. If a format isn't specified, then it is auto detected.
"""
if config_format is None:
# Detect the format
config_format = ConfigBase.detect_config_format(content)
if not config_format:
# We couldn't detect configuration
ConfigBase.logger.error('Could not detect configuration')
return list()
if config_format not in CONFIG_FORMATS:
# Invalid configuration type specified
ConfigBase.logger.error(
'An invalid configuration format ({}) was specified'.format(
config_format))
return list()
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Execute our config parse function which always returns a list
return fn(content=content, asset=asset)
@staticmethod
def config_parse_text(content, asset=None):
"""
@ -270,9 +374,6 @@ class ConfigBase(URLBase):
<URL>
"""
# For logging, track the line number
line = 0
response = list()
# Define what a valid line should look like
@ -290,10 +391,7 @@ class ConfigBase(URLBase):
ConfigBase.logger.error('Invalid apprise text data specified')
return list()
for entry in content:
# Increment our line count
line += 1
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax

View File

@ -0,0 +1,82 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .ConfigBase import ConfigBase
from ..AppriseLocale import gettext_lazy as _
class ConfigMemory(ConfigBase):
"""
For information that was loaded from memory and does not
persist anywhere.
"""
# The default descriptive name associated with the service
service_name = _('Memory')
# The default protocol
protocol = 'memory'
def __init__(self, content, **kwargs):
"""
Initialize Memory Object
Memory objects just store the raw configuration in memory. There is
no external reference point. It's always considered cached.
"""
super(ConfigMemory, self).__init__(**kwargs)
# Store our raw config into memory
self.content = content
if self.config_format is None:
# Detect our format if possible
self.config_format = \
ConfigMemory.detect_config_format(self.content)
return
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
return 'memory://'
def read(self, **kwargs):
"""
Simply return content stored into memory
"""
return self.content
@staticmethod
def parse_url(url):
"""
Memory objects have no parseable URL
"""
# These URLs can not be parsed
return None

View File

@ -1,21 +1,21 @@
# Translations template for apprise.
# Copyright (C) 2019 Chris Caron
# Copyright (C) 2020 Chris Caron
# This file is distributed under the same license as the apprise project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2019.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: apprise 0.8.2\n"
"Project-Id-Version: apprise 0.8.5\n"
"Report-Msgid-Bugs-To: lead2gold@gmail.com\n"
"POT-Creation-Date: 2019-11-25 18:50-0500\n"
"POT-Creation-Date: 2020-03-30 16:00-0400\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.7.0\n"
"Generated-By: Babel 2.8.0\n"
msgid "API Key"
msgstr ""
@ -71,6 +71,9 @@ msgstr ""
msgid "Bot Token"
msgstr ""
msgid "Cache Age"
msgstr ""
msgid "Cache Results"
msgstr ""
@ -95,6 +98,9 @@ msgstr ""
msgid "Device ID"
msgstr ""
msgid "Device Name"
msgstr ""
msgid "Display Footer"
msgstr ""
@ -128,6 +134,12 @@ msgstr ""
msgid "Footer Logo"
msgstr ""
msgid "Forced File Name"
msgstr ""
msgid "Forced Mime Type"
msgstr ""
msgid "From Email"
msgstr ""
@ -164,6 +176,9 @@ msgstr ""
msgid "Log to STDERR"
msgstr ""
msgid "Memory"
msgstr ""
msgid "Message Hook"
msgstr ""
@ -203,6 +218,9 @@ msgstr ""
msgid "Priority"
msgstr ""
msgid "Private Key"
msgstr ""
msgid "Project ID"
msgstr ""
@ -365,6 +383,9 @@ msgstr ""
msgid "Version"
msgstr ""
msgid "Vibration"
msgstr ""
msgid "Web Based"
msgstr ""

View File

@ -86,7 +86,7 @@ class NotifyD7Networks(NotifyBase):
# The services URL
service_url = 'https://d7networks.com/'
# All pushover requests are secure
# All notification requests are secure
secure_protocol = 'd7sms'
# Allow 300 requests per minute.
@ -94,7 +94,7 @@ class NotifyD7Networks(NotifyBase):
request_rate_per_sec = 0.20
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_twilio'
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_d7networks'
# D7 Networks batch notification URL
notify_batch_url = 'http://rest-api.d7networks.com/secure/sendbatch'

View File

@ -51,6 +51,7 @@ from ..common import NotifyType
from ..utils import parse_bool
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
from ..attachment.AttachBase import AttachBase
class NotifyDiscord(NotifyBase):
@ -312,6 +313,19 @@ class NotifyDiscord(NotifyBase):
# Always call throttle before any remote server i/o is made
self.throttle()
# Perform some simple error checking
if isinstance(attach, AttachBase):
if not attach:
# We could not access the attachment
self.logger.error(
'Could not access attachment {}.'.format(
attach.url(privacy=True)))
return False
self.logger.debug(
'Posting Discord attachment {}'.format(
attach.url(privacy=True)))
# Our attachment path (if specified)
files = None
try:

View File

@ -269,6 +269,14 @@ class NotifyEmail(NotifyBase):
# Define object templates
templates = (
'{schema}://{host}',
'{schema}://{host}:{port}',
'{schema}://{host}/{targets}',
'{schema}://{host}:{port}/{targets}',
'{schema}://{user}@{host}',
'{schema}://{user}@{host}:{port}',
'{schema}://{user}@{host}/{targets}',
'{schema}://{user}@{host}:{port}/{targets}',
'{schema}://{user}:{password}@{host}',
'{schema}://{user}:{password}@{host}:{port}',
'{schema}://{user}:{password}@{host}/{targets}',
@ -280,13 +288,11 @@ class NotifyEmail(NotifyBase):
'user': {
'name': _('User Name'),
'type': 'string',
'required': True,
},
'password': {
'name': _('Password'),
'type': 'string',
'private': True,
'required': True,
},
'host': {
'name': _('Domain'),
@ -388,7 +394,7 @@ class NotifyEmail(NotifyBase):
self.from_name = from_name
self.from_addr = from_addr
if not self.from_addr:
if self.user and not self.from_addr:
# detect our email address
self.from_addr = '{}@{}'.format(
re.split(r'[\s@]+', self.user)[0],
@ -446,6 +452,10 @@ class NotifyEmail(NotifyBase):
# Apply any defaults based on certain known configurations
self.NotifyEmailDefaults()
# if there is still no smtp_host then we fall back to the hostname
if not self.smtp_host:
self.smtp_host = self.host
return
def NotifyEmailDefaults(self):
@ -454,10 +464,11 @@ class NotifyEmail(NotifyBase):
it was provided.
"""
if self.smtp_host:
if self.smtp_host or not self.user:
# SMTP Server was explicitly specified, therefore it is assumed
# the caller knows what he's doing and is intentionally
# over-riding any smarts to be applied
# over-riding any smarts to be applied. We also can not apply
# any default if there was no user specified.
return
# detect our email address using our user/host combo
@ -573,21 +584,22 @@ class NotifyEmail(NotifyBase):
# First attach our body to our content as the first element
base.attach(content)
attach_error = False
# Now store our attachments
for attachment in attach:
if not attachment:
# We could not load the attachment; take an early
# exit since this isn't what the end user wanted
self.logger.warning(
'The specified attachment could not be referenced:'
' {}.'.format(attachment.url(privacy=True)))
# We could not access the attachment
self.logger.error(
'Could not access attachment {}.'.format(
attachment.url(privacy=True)))
# Mark our failure
attach_error = True
break
return False
self.logger.debug(
'Preparing Email attachment {}'.format(
attachment.url(privacy=True)))
with open(attachment.path, "rb") as abody:
app = MIMEApplication(
@ -600,11 +612,6 @@ class NotifyEmail(NotifyBase):
base.attach(app)
if attach_error:
# Mark our error and quit early
has_error = True
break
# bind the socket variable to the current namespace
socket = None
@ -687,7 +694,7 @@ class NotifyEmail(NotifyBase):
args['bcc'] = ','.join(self.bcc)
# pull email suffix from username (if present)
user = self.user.split('@')[0]
user = None if not self.user else self.user.split('@')[0]
# Determine Authentication
auth = ''
@ -697,7 +704,7 @@ class NotifyEmail(NotifyBase):
password=self.pprint(
self.password, privacy, mode=PrivacyMode.Secret, safe=''),
)
else:
elif user:
# user url
auth = '{user}@'.format(
user=NotifyEmail.quote(user, safe=''),

View File

@ -0,0 +1,352 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Sources
# - https://dreambox.de/en/
# - https://dream.reichholf.net/wiki/Hauptseite
# - https://dream.reichholf.net/wiki/Enigma2:WebInterface#Message
# - https://github.com/E2OpenPlugins/e2openplugin-OpenWebif
# - https://github.com/E2OpenPlugins/e2openplugin-OpenWebif/wiki/\
# OpenWebif-API-documentation#message
import six
import requests
from json import loads
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyType
from ..AppriseLocale import gettext_lazy as _
class Enigma2MessageType(object):
# Defines the Enigma2 notification types Apprise can map to
INFO = 1
WARNING = 2
ERROR = 3
# If a mapping fails, the default of Enigma2MessageType.INFO is used
MESSAGE_MAPPING = {
NotifyType.INFO: Enigma2MessageType.INFO,
NotifyType.SUCCESS: Enigma2MessageType.INFO,
NotifyType.WARNING: Enigma2MessageType.WARNING,
NotifyType.FAILURE: Enigma2MessageType.ERROR,
}
class NotifyEnigma2(NotifyBase):
"""
A wrapper for Enigma2 Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Enigma2'
# The services URL
service_url = 'https://dreambox.de/'
# The default protocol
protocol = 'enigma2'
# The default secure protocol
secure_protocol = 'enigma2s'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_enigma2'
# Enigma2 does not support a title
title_maxlen = 0
# The maximum allowable characters allowed in the body per message
body_maxlen = 1000
# Throttle a wee-bit to avoid thrashing
request_rate_per_sec = 0.5
# Define object templates
templates = (
'{schema}://{host}',
'{schema}://{host}:{port}',
'{schema}://{user}@{host}',
'{schema}://{user}@{host}:{port}',
'{schema}://{user}:{password}@{host}',
'{schema}://{user}:{password}@{host}:{port}',
'{schema}://{host}/{fullpath}',
'{schema}://{host}:{port}/{fullpath}',
'{schema}://{user}@{host}/{fullpath}',
'{schema}://{user}@{host}:{port}/{fullpath}',
'{schema}://{user}:{password}@{host}/{fullpath}',
'{schema}://{user}:{password}@{host}:{port}/{fullpath}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'host': {
'name': _('Hostname'),
'type': 'string',
'required': True,
},
'port': {
'name': _('Port'),
'type': 'int',
'min': 1,
'max': 65535,
},
'user': {
'name': _('Username'),
'type': 'string',
},
'password': {
'name': _('Password'),
'type': 'string',
'private': True,
},
'fullpath': {
'name': _('Path'),
'type': 'string',
},
})
template_args = dict(NotifyBase.template_args, **{
'timeout': {
'name': _('Server Timeout'),
'type': 'int',
# The number of seconds to display the message for
'default': 13,
# -1 means infinit
'min': -1,
},
})
# Define any kwargs we're using
template_kwargs = {
'headers': {
'name': _('HTTP Header'),
'prefix': '+',
},
}
def __init__(self, timeout=None, headers=None, **kwargs):
"""
Initialize Enigma2 Object
headers can be a dictionary of key/value pairs that you want to
additionally include as part of the server headers to post with
"""
super(NotifyEnigma2, self).__init__(**kwargs)
try:
self.timeout = int(timeout)
if self.timeout < self.template_args['timeout']['min']:
# Bulletproof; can't go lower then min value
self.timeout = self.template_args['timeout']['min']
except (ValueError, TypeError):
# Use default timeout
self.timeout = self.template_args['timeout']['default']
self.fullpath = kwargs.get('fullpath')
if not isinstance(self.fullpath, six.string_types):
self.fullpath = '/'
self.headers = {}
if headers:
# Store our extra headers
self.headers.update(headers)
return
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
'timeout': str(self.timeout),
}
# Append our headers into our args
args.update({'+{}'.format(k): v for k, v in self.headers.items()})
# Determine Authentication
auth = ''
if self.user and self.password:
auth = '{user}:{password}@'.format(
user=NotifyEnigma2.quote(self.user, safe=''),
password=self.pprint(
self.password, privacy, mode=PrivacyMode.Secret, safe=''),
)
elif self.user:
auth = '{user}@'.format(
user=NotifyEnigma2.quote(self.user, safe=''),
)
default_port = 443 if self.secure else 80
return '{schema}://{auth}{hostname}{port}{fullpath}?{args}'.format(
schema=self.secure_protocol if self.secure else self.protocol,
auth=auth,
hostname=NotifyEnigma2.quote(self.host, safe=''),
port='' if self.port is None or self.port == default_port
else ':{}'.format(self.port),
fullpath=NotifyEnigma2.quote(self.fullpath, safe='/'),
args=NotifyEnigma2.urlencode(args),
)
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Enigma2 Notification
"""
# prepare Enigma2 Object
headers = {
'User-Agent': self.app_id,
}
params = {
'text': body,
'type': MESSAGE_MAPPING.get(
notify_type, Enigma2MessageType.INFO),
'timeout': self.timeout,
}
# Apply any/all header over-rides defined
headers.update(self.headers)
auth = None
if self.user:
auth = (self.user, self.password)
# Set our schema
schema = 'https' if self.secure else 'http'
url = '%s://%s' % (schema, self.host)
if isinstance(self.port, int):
url += ':%d' % self.port
# Prepare our message URL
url += self.fullpath.rstrip('/') + '/api/message'
self.logger.debug('Enigma2 POST URL: %s (cert_verify=%r)' % (
url, self.verify_certificate,
))
self.logger.debug('Enigma2 Parameters: %s' % str(params))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.get(
url,
params=params,
headers=headers,
auth=auth,
verify=self.verify_certificate,
)
if r.status_code != requests.codes.ok:
# We had a problem
status_str = \
NotifyEnigma2.http_response_code_lookup(r.status_code)
self.logger.warning(
'Failed to send Enigma2 notification: '
'{}{}error={}.'.format(
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug('Response Details:\r\n{}'.format(r.content))
# Return; we're done
return False
# We were able to post our message; now lets evaluate the response
try:
# Acquire our result
result = loads(r.content).get('result', False)
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
# We could not parse JSON response.
result = False
if not result:
self.logger.warning(
'Failed to send Enigma2 notification: '
'There was no server acknowledgement.')
self.logger.debug('Response Details:\r\n{}'.format(r.content))
# Return; we're done
return False
self.logger.info('Sent Enigma2 notification.')
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending Enigma2 '
'notification to %s.' % self.host)
self.logger.debug('Socket Exception: %s' % str(e))
# Return; we're done
return False
return True
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
# Add our headers that the user can potentially over-ride if they wish
# to to our returned result set
results['headers'] = results['qsd-']
results['headers'].update(results['qsd+'])
# Tidy our header entries by unquoting them
results['headers'] = {
NotifyEnigma2.unquote(x): NotifyEnigma2.unquote(y)
for x, y in results['headers'].items()}
# Save timeout value (if specified)
if 'timeout' in results['qsd'] and len(results['qsd']['timeout']):
results['timeout'] = results['qsd']['timeout']
return results

View File

@ -71,7 +71,7 @@ class NotifyGitter(NotifyBase):
# The services URL
service_url = 'https://gitter.im/'
# All pushover requests are secure
# All notification requests are secure
secure_protocol = 'gitter'
# A URL that takes you to the setup/help of the specific protocol
@ -102,7 +102,7 @@ class NotifyGitter(NotifyBase):
# Define object templates
templates = (
'{schema}://{token}:{targets}/',
'{schema}://{token}/{targets}/',
)
# Define our template tokens

View File

@ -130,6 +130,11 @@ class NotifyJoin(NotifyBase):
'regex': (r'^[a-z0-9]{32}$', 'i'),
'map_to': 'targets',
},
'device_name': {
'name': _('Device Name'),
'type': 'string',
'map_to': 'targets',
},
'group': {
'name': _('Group'),
'type': 'choice:string',
@ -210,18 +215,7 @@ class NotifyJoin(NotifyBase):
'group.{}'.format(group_re.group('name').lower()))
continue
elif IS_DEVICE_RE.match(target):
self.targets.append(target)
continue
self.logger.warning(
'Ignoring invalid Join device/group "{}"'.format(target)
)
if not self.targets:
msg = 'No Join targets to notify.'
self.logger.warning(msg)
raise TypeError(msg)
self.targets.append(target)
return
@ -247,12 +241,18 @@ class NotifyJoin(NotifyBase):
url_args = {
'apikey': self.apikey,
'deviceId': target,
'priority': str(self.priority),
'title': title,
'text': body,
}
if IS_GROUP_RE.match(target) or IS_DEVICE_RE.match(target):
url_args['deviceId'] = target
else:
# Support Device Names
url_args['deviceNames'] = target
# prepare our image for display if configured to do so
image_url = None if not self.include_image \
else self.image_url(notify_type)

View File

@ -0,0 +1,377 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To use this service you will need a Kavenegar account from their website
# at https://kavenegar.com/
#
# After you've established your account you can get your API Key from your
# account profile: https://panel.kavenegar.com/client/setting/account
#
# This provider does not accept +1 (for example) as a country code. You need
# to specify 001 instead.
#
import re
import requests
from json import loads
from .NotifyBase import NotifyBase
from ..common import NotifyType
from ..utils import parse_list
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
# Extend HTTP Error Messages
# Based on https://kavenegar.com/rest.html
KAVENEGAR_HTTP_ERROR_MAP = {
200: 'The request was approved',
400: 'Parameters are incomplete',
401: 'Account has been disabled',
402: 'The operation failed',
403: 'The API Key is invalid',
404: 'The method is unknown',
405: 'The GET/POST request is wrong',
406: 'Invalid mandatory parameters sent',
407: 'You canot access the information you want',
409: 'The server is unable to response',
411: 'The recipient is invalid',
412: 'The sender is invalid',
413: 'Message empty or message length exceeded',
414: 'The number of recipients is more than 200',
415: 'The start index is larger then the total',
416: 'The source IP of the service does not match the settings',
417: 'The submission date is incorrect, '
'either expired or not in the correct format',
418: 'Your account credit is insufficient',
422: 'Data cannot be processed due to invalid characters',
501: 'SMS can only be sent to the account holder number',
}
# Some Phone Number Detection
IS_PHONE_NO = re.compile(r'^\+?(?P<phone>[0-9\s)(+-]+)\s*$')
class NotifyKavenegar(NotifyBase):
"""
A wrapper for Kavenegar Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Kavenegar'
# The services URL
service_url = 'https://kavenegar.com/'
# All notification requests are secure
secure_protocol = 'kavenegar'
# Allow 300 requests per minute.
# 60/300 = 0.2
request_rate_per_sec = 0.20
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_kavenegar'
# Kavenegar single notification URL
notify_url = 'http://api.kavenegar.com/v1/{apikey}/sms/send.json'
# The maximum length of the body
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Define object templates
templates = (
'{schema}://{apikey}/{targets}',
'{schema}://{source}@{apikey}/{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'apikey': {
'name': _('API Key'),
'type': 'string',
'required': True,
'private': True,
'regex': (r'^[a-z0-9]+$', 'i'),
},
'source': {
'name': _('Source Phone No'),
'type': 'string',
'prefix': '+',
'regex': (r'^[0-9\s)(+-]+$', 'i'),
},
'target_phone': {
'name': _('Target Phone No'),
'type': 'string',
'prefix': '+',
'regex': (r'^[0-9\s)(+-]+$', 'i'),
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
'required': True,
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'to': {
'alias_of': 'targets',
},
'from': {
'alias_of': 'source',
},
})
def __init__(self, apikey, source=None, targets=None, **kwargs):
"""
Initialize Kavenegar Object
"""
super(NotifyKavenegar, self).__init__(**kwargs)
# API Key (associated with project)
self.apikey = validate_regex(
apikey, *self.template_tokens['apikey']['regex'])
if not self.apikey:
msg = 'An invalid Kavenegar API Key ' \
'({}) was specified.'.format(apikey)
self.logger.warning(msg)
raise TypeError(msg)
self.source = None
if source is not None:
result = IS_PHONE_NO.match(source)
if not result:
msg = 'The Kavenegar source specified ({}) is invalid.'\
.format(source)
self.logger.warning(msg)
raise TypeError(msg)
# Further check our phone # for it's digit count
result = ''.join(re.findall(r'\d+', result.group('phone')))
if len(result) < 11 or len(result) > 14:
msg = 'The MessageBird source # specified ({}) is invalid.'\
.format(source)
self.logger.warning(msg)
raise TypeError(msg)
# Store our source
self.source = result
# Parse our targets
self.targets = list()
for target in parse_list(targets):
# Validate targets and drop bad ones:
result = IS_PHONE_NO.match(target)
if result:
# Further check our phone # for it's digit count
# if it's less than 10, then we can assume it's
# a poorly specified phone no and spit a warning
result = ''.join(re.findall(r'\d+', result.group('phone')))
if len(result) < 11 or len(result) > 14:
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
continue
# store valid phone number
self.targets.append(result)
continue
self.logger.warning(
'Dropped invalid phone # ({}) specified.'.format(target))
if len(self.targets) == 0:
msg = 'There are no valid targets identified to notify.'
self.logger.warning(msg)
raise TypeError(msg)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Sends SMS Message
"""
# error tracking (used for function return)
has_error = False
# Prepare our headers
headers = {
'User-Agent': self.app_id,
'Accept': 'application/json',
}
# Our URL
url = self.notify_url.format(apikey=self.apikey)
# use the list directly
targets = list(self.targets)
while len(targets):
# Get our target(s) to notify
target = targets.pop(0)
# Prepare our payload
payload = {
'receptor': target,
'message': body,
}
if self.source:
# Only set source if specified
payload['sender'] = self.source
# Some Debug Logging
self.logger.debug(
'Kavenegar POST URL: {} (cert_verify={})'.format(
url, self.verify_certificate))
self.logger.debug('Kavenegar Payload: {}' .format(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
url,
params=payload,
headers=headers,
verify=self.verify_certificate,
)
if r.status_code not in (
requests.codes.created, requests.codes.ok):
# We had a problem
status_str = \
NotifyBase.http_response_code_lookup(
r.status_code, KAVENEGAR_HTTP_ERROR_MAP)
try:
# Update our status response if we can
json_response = loads(r.content)
status_str = json_response.get('message', status_str)
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
# We could not parse JSON response.
# We will just use the status we already have.
pass
self.logger.warning(
'Failed to send Kavenegar SMS notification to {}: '
'{}{}error={}.'.format(
target,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# Mark our failure
has_error = True
continue
# If we reach here; the message was sent
self.logger.info(
'Sent Kavenegar SMS notification to {}.'.format(target))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending Kavenegar:%s ' % (
', '.join(self.targets)) + 'notification.'
)
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
}
return '{schema}://{source}{apikey}/{targets}?{args}'.format(
schema=self.secure_protocol,
source='' if not self.source else '{}@'.format(self.source),
apikey=self.pprint(self.apikey, privacy, safe=''),
targets='/'.join(
[NotifyKavenegar.quote(x, safe='') for x in self.targets]),
args=NotifyKavenegar.urlencode(args))
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url, verify_host=False)
if not results:
# We're done early as we couldn't load the results
return results
# Store the source if specified
if results.get('user', None):
results['source'] = results['user']
# Get our entries; split_path() looks after unquoting content for us
# by default
results['targets'] = NotifyKavenegar.split_path(results['fullpath'])
# The hostname is our authentication key
results['apikey'] = NotifyKavenegar.unquote(results['host'])
# Support the 'to' variable so that we can support targets this way too
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyKavenegar.parse_list(results['qsd']['to'])
if 'from' in results['qsd'] and len(results['qsd']['from']):
results['source'] = \
NotifyKavenegar.unquote(results['qsd']['from'])
return results

View File

@ -98,7 +98,7 @@ class NotifyMSG91(NotifyBase):
notify_url = 'https://world.msg91.com/api/sendhttp.php'
# The maximum length of the body
body_maxlen = 140
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.

View File

@ -101,7 +101,7 @@ class NotifyMailgun(NotifyBase):
# The services URL
service_url = 'https://www.mailgun.com/'
# All pushover requests are secure
# All notification requests are secure
secure_protocol = 'mailgun'
# Mailgun advertises they allow 300 requests per minute.

View File

@ -41,6 +41,7 @@ from ..common import NotifyImageSize
from ..common import NotifyFormat
from ..utils import parse_bool
from ..utils import parse_list
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
# Define default path
@ -74,12 +75,16 @@ class MatrixWebhookMode(object):
# Support the slack webhook plugin
SLACK = "slack"
# Support the t2bot webhook plugin
T2BOT = "t2bot"
# webhook modes are placed ito this list for validation purposes
MATRIX_WEBHOOK_MODES = (
MatrixWebhookMode.DISABLED,
MatrixWebhookMode.MATRIX,
MatrixWebhookMode.SLACK,
MatrixWebhookMode.T2BOT,
)
@ -122,6 +127,11 @@ class NotifyMatrix(NotifyBase):
# Define object templates
templates = (
# Targets are ignored when using t2bot mode; only a token is required
'{schema}://{token}',
'{schema}://{user}@{token}',
# All other non-t2bot setups require targets
'{schema}://{user}:{password}@{host}/{targets}',
'{schema}://{user}:{password}@{host}:{port}/{targets}',
'{schema}://{token}:{password}@{host}/{targets}',
@ -199,8 +209,7 @@ class NotifyMatrix(NotifyBase):
},
})
def __init__(self, targets=None, mode=None, include_image=False,
**kwargs):
def __init__(self, targets=None, mode=None, include_image=False, **kwargs):
"""
Initialize Matrix Object
"""
@ -233,6 +242,16 @@ class NotifyMatrix(NotifyBase):
self.logger.warning(msg)
raise TypeError(msg)
if self.mode == MatrixWebhookMode.T2BOT:
# t2bot configuration requires that a webhook id is specified
self.access_token = validate_regex(
self.host, r'^[a-z0-9]{64}$', 'i')
if not self.access_token:
msg = 'An invalid T2Bot/Matrix Webhook ID ' \
'({}) was specified.'.format(self.host)
self.logger.warning(msg)
raise TypeError(msg)
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Matrix Notification
@ -257,20 +276,30 @@ class NotifyMatrix(NotifyBase):
'Content-Type': 'application/json',
}
# Acquire our access token from our URL
access_token = self.password if self.password else self.user
if self.mode != MatrixWebhookMode.T2BOT:
# Acquire our access token from our URL
access_token = self.password if self.password else self.user
default_port = 443 if self.secure else 80
default_port = 443 if self.secure else 80
# Prepare our URL
url = '{schema}://{hostname}:{port}/{webhook_path}/{token}'.format(
schema='https' if self.secure else 'http',
hostname=self.host,
port='' if self.port is None
or self.port == default_port else self.port,
webhook_path=MATRIX_V1_WEBHOOK_PATH,
token=access_token,
)
# Prepare our URL
url = '{schema}://{hostname}:{port}/{webhook_path}/{token}'.format(
schema='https' if self.secure else 'http',
hostname=self.host,
port='' if self.port is None
or self.port == default_port else self.port,
webhook_path=MATRIX_V1_WEBHOOK_PATH,
token=access_token,
)
else:
#
# t2bot Setup
#
# Prepare our URL
url = 'https://webhooks.t2bot.io/api/v1/matrix/hook/' \
'{token}'.format(token=self.access_token)
# Retrieve our payload
payload = getattr(self, '_{}_webhook_payload'.format(self.mode))(
@ -381,7 +410,7 @@ class NotifyMatrix(NotifyBase):
payload = {
'displayName':
self.user if self.user else self.matrix_default_user,
self.user if self.user else self.app_id,
'format': 'html',
}
@ -399,6 +428,27 @@ class NotifyMatrix(NotifyBase):
return payload
def _t2bot_webhook_payload(self, body, title='',
notify_type=NotifyType.INFO, **kwargs):
"""
Format the payload for a T2Bot Matrix based messages
"""
# Retrieve our payload
payload = self._matrix_webhook_payload(
body=body, title=title, notify_type=notify_type, **kwargs)
# Acquire our image url if we're configured to do so
image_url = None if not self.include_image else \
self.image_url(notify_type)
if image_url:
# t2bot can take an avatarUrl Entry
payload['avatarUrl'] = image_url
return payload
def _send_server_notification(self, body, title='',
notify_type=NotifyType.INFO, **kwargs):
"""
@ -867,6 +917,9 @@ class NotifyMatrix(NotifyBase):
))
self.logger.debug('Matrix Payload: %s' % str(payload))
# Initialize our response object
r = None
try:
r = fn(
url,
@ -948,7 +1001,8 @@ class NotifyMatrix(NotifyBase):
"""
Ensure we relinquish our token
"""
self._logout()
if self.mode != MatrixWebhookMode.T2BOT:
self._logout()
def url(self, privacy=False, *args, **kwargs):
"""
@ -997,12 +1051,14 @@ class NotifyMatrix(NotifyBase):
us to substantiate this object.
"""
results = NotifyBase.parse_url(url)
results = NotifyBase.parse_url(url, verify_host=False)
if not results:
# We're done early as we couldn't load the results
return results
if not results.get('host'):
return None
# Get our rooms
results['targets'] = NotifyMatrix.split_path(results['fullpath'])
@ -1040,4 +1096,37 @@ class NotifyMatrix(NotifyBase):
results['mode'] = results['qsd'].get(
'mode', results['qsd'].get('webhook'))
# t2bot detection... look for just a hostname, and/or just a user/host
# if we match this; we can go ahead and set the mode (but only if
# it was otherwise not set)
if results['mode'] is None \
and not results['password'] \
and not results['targets']:
# Default mode to t2bot
results['mode'] = MatrixWebhookMode.T2BOT
return results
@staticmethod
def parse_native_url(url):
"""
Support https://webhooks.t2bot.io/api/v1/matrix/hook/WEBHOOK_TOKEN/
"""
result = re.match(
r'^https?://webhooks\.t2bot\.io/api/v1/matrix/hook/'
r'(?P<webhook_token>[A-Z0-9_-]+)/?'
r'(?P<args>\?.+)?$', url, re.I)
if result:
mode = 'mode={}'.format(MatrixWebhookMode.T2BOT)
return NotifyMatrix.parse_url(
'{schema}://{webhook_token}/{args}'.format(
schema=NotifyMatrix.secure_protocol,
webhook_token=result.group('webhook_token'),
args='?{}'.format(mode) if not result.group('args')
else '{}&{}'.format(result.group('args'), mode)))
return None

View File

@ -63,7 +63,7 @@ class NotifyMessageBird(NotifyBase):
notify_url = 'https://rest.messagebird.com/messages'
# The maximum length of the body
body_maxlen = 140
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.

View File

@ -64,21 +64,12 @@ class NotifyNexmo(NotifyBase):
notify_url = 'https://rest.nexmo.com/sms/json'
# The maximum length of the body
body_maxlen = 140
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Default Time To Live
# By default Nexmo attempt delivery for 72 hours, however the maximum
# effective value depends on the operator and is typically 24 - 48 hours.
# We recommend this value should be kept at its default or at least 30
# minutes.
default_ttl = 900000
ttl_max = 604800000
ttl_min = 20000
# Define object templates
templates = (
'{schema}://{apikey}:{secret}@{from_phone}',
@ -135,6 +126,12 @@ class NotifyNexmo(NotifyBase):
'secret': {
'alias_of': 'secret',
},
# Default Time To Live
# By default Nexmo attempt delivery for 72 hours, however the maximum
# effective value depends on the operator and is typically 24 - 48
# hours. We recommend this value should be kept at its default or at
# least 30 minutes.
'ttl': {
'name': _('ttl'),
'type': 'int',
@ -170,7 +167,7 @@ class NotifyNexmo(NotifyBase):
raise TypeError(msg)
# Set our Time to Live Flag
self.ttl = self.default_ttl
self.ttl = self.template_args['ttl']['default']
try:
self.ttl = int(ttl)
@ -178,7 +175,8 @@ class NotifyNexmo(NotifyBase):
# Do nothing
pass
if self.ttl < self.ttl_min or self.ttl > self.ttl_max:
if self.ttl < self.template_args['ttl']['min'] or \
self.ttl > self.template_args['ttl']['max']:
msg = 'The Nexmo TTL specified ({}) is out of range.'\
.format(self.ttl)
self.logger.warning(msg)

View File

@ -0,0 +1,294 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CON
import requests
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyType
from ..utils import parse_list
from ..AppriseLocale import gettext_lazy as _
class NotifyNextcloud(NotifyBase):
"""
A wrapper for Nextcloud Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Nextcloud'
# The services URL
service_url = 'https://nextcloud.com/'
# Insecure protocol (for those self hosted requests)
protocol = 'ncloud'
# The default protocol (this is secure for notica)
secure_protocol = 'nclouds'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_nextcloud'
# Nextcloud URL
notify_url = '{schema}://{host}/ocs/v2.php/apps/admin_notifications/' \
'api/v1/notifications/{target}'
# Nextcloud does not support a title
title_maxlen = 255
# Defines the maximum allowable characters per message.
body_maxlen = 4000
# Define object templates
templates = (
'{schema}://{user}:{password}@{host}/{targets}',
'{schema}://{user}:{password}@{host}:{port}/{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'host': {
'name': _('Hostname'),
'type': 'string',
'required': True,
},
'port': {
'name': _('Port'),
'type': 'int',
'min': 1,
'max': 65535,
},
'user': {
'name': _('Username'),
'type': 'string',
},
'password': {
'name': _('Password'),
'type': 'string',
'private': True,
},
'target_user': {
'name': _('Target User'),
'type': 'string',
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
'required': True,
},
})
# Define any kwargs we're using
template_kwargs = {
'headers': {
'name': _('HTTP Header'),
'prefix': '+',
},
}
def __init__(self, targets=None, headers=None, **kwargs):
"""
Initialize Nextcloud Object
"""
super(NotifyNextcloud, self).__init__(**kwargs)
self.targets = parse_list(targets)
if len(self.targets) == 0:
msg = 'At least one Nextcloud target user must be specified.'
self.logger.warning(msg)
raise TypeError(msg)
self.headers = {}
if headers:
# Store our extra headers
self.headers.update(headers)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Nextcloud Notification
"""
# Prepare our Header
headers = {
'User-Agent': self.app_id,
'OCS-APIREQUEST': 'true',
}
# Apply any/all header over-rides defined
headers.update(self.headers)
# error tracking (used for function return)
has_error = False
# Create a copy of the targets list
targets = list(self.targets)
while len(targets):
target = targets.pop(0)
# Prepare our Payload
payload = {
'shortMessage': title if title else self.app_desc,
}
if body:
# Only store the longMessage if a body was defined; nextcloud
# doesn't take kindly to empty longMessage entries.
payload['longMessage'] = body
auth = None
if self.user:
auth = (self.user, self.password)
notify_url = self.notify_url.format(
schema='https' if self.secure else 'http',
host=self.host if not isinstance(self.port, int)
else '{}:{}'.format(self.host, self.port),
target=target,
)
self.logger.debug('Nextcloud POST URL: %s (cert_verify=%r)' % (
notify_url, self.verify_certificate,
))
self.logger.debug('Nextcloud Payload: %s' % str(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
notify_url,
data=payload,
headers=headers,
auth=auth,
verify=self.verify_certificate,
)
if r.status_code != requests.codes.ok:
# We had a problem
status_str = \
NotifyNextcloud.http_response_code_lookup(
r.status_code)
self.logger.warning(
'Failed to send Nextcloud notification:'
'{}{}error={}.'.format(
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# track our failure
has_error = True
continue
else:
self.logger.info('Sent Nextcloud notification.')
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending Nextcloud '
'notification.',
)
self.logger.debug('Socket Exception: %s' % str(e))
# track our failure
has_error = True
continue
return not has_error
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
}
# Append our headers into our args
args.update({'+{}'.format(k): v for k, v in self.headers.items()})
# Determine Authentication
auth = ''
if self.user and self.password:
auth = '{user}:{password}@'.format(
user=NotifyNextcloud.quote(self.user, safe=''),
password=self.pprint(
self.password, privacy, mode=PrivacyMode.Secret, safe=''),
)
elif self.user:
auth = '{user}@'.format(
user=NotifyNextcloud.quote(self.user, safe=''),
)
default_port = 443 if self.secure else 80
return '{schema}://{auth}{hostname}{port}/{targets}?{args}' \
.format(
schema=self.secure_protocol
if self.secure else self.protocol,
auth=auth,
hostname=NotifyNextcloud.quote(self.host, safe=''),
port='' if self.port is None or self.port == default_port
else ':{}'.format(self.port),
targets='/'.join([NotifyNextcloud.quote(x)
for x in self.targets]),
args=NotifyNextcloud.urlencode(args),
)
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
# Fetch our targets
results['targets'] = \
NotifyNextcloud.split_path(results['fullpath'])
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyNextcloud.parse_list(results['qsd']['to'])
# Add our headers that the user can potentially over-ride if they
# wish to to our returned result set
results['headers'] = results['qsd-']
results['headers'].update(results['qsd+'])
return results

View File

@ -147,6 +147,19 @@ class NotifyPushBullet(NotifyBase):
# We need to upload our payload first so that we can source it
# in remaining messages
for attachment in attach:
# Perform some simple error checking
if not attachment:
# We could not access the attachment
self.logger.error(
'Could not access attachment {}.'.format(
attachment.url(privacy=True)))
return False
self.logger.debug(
'Preparing PushBullet attachment {}'.format(
attachment.url(privacy=True)))
# prepare payload
payload = {
'file_name': attachment.name,
@ -253,7 +266,7 @@ class NotifyPushBullet(NotifyBase):
continue
self.logger.info(
'Sent PushBullet attachment (%s) to "%s".' % (
'Sent PushBullet attachment ({}) to "{}".'.format(
attach_payload['file_name'], recipient))
return not has_error

View File

@ -0,0 +1,832 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# We use io because it allows us to test the open() call
import io
import base64
import requests
from json import loads
from .NotifyBase import NotifyBase
from ..common import NotifyType
from ..utils import parse_list
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
class PushSaferSound(object):
"""
Defines all of the supported PushSafe sounds
"""
# Silent
SILENT = 0
# Ahem (IM)
AHEM = 1
# Applause (Mail)
APPLAUSE = 2
# Arrow (Reminder)
ARROW = 3
# Baby (SMS)
BABY = 4
# Bell (Alarm)
BELL = 5
# Bicycle (Alarm2)
BICYCLE = 6
# Boing (Alarm3)
BOING = 7
# Buzzer (Alarm4)
BUZZER = 8
# Camera (Alarm5)
CAMERA = 9
# Car Horn (Alarm6)
CAR_HORN = 10
# Cash Register (Alarm7)
CASH_REGISTER = 11
# Chime (Alarm8)
CHIME = 12
# Creaky Door (Alarm9)
CREAKY_DOOR = 13
# Cuckoo Clock (Alarm10)
CUCKOO_CLOCK = 14
# Disconnect (Call)
DISCONNECT = 15
# Dog (Call2)
DOG = 16
# Doorbell (Call3)
DOORBELL = 17
# Fanfare (Call4)
FANFARE = 18
# Gun Shot (Call5)
GUN_SHOT = 19
# Honk (Call6)
HONK = 20
# Jaw Harp (Call7)
JAW_HARP = 21
# Morse (Call8)
MORSE = 22
# Electricity (Call9)
ELECTRICITY = 23
# Radio Tuner (Call10)
RADIO_TURNER = 24
# Sirens
SIRENS = 25
# Military Trumpets
MILITARY_TRUMPETS = 26
# Ufo
UFO = 27
# Whah Whah Whah
LONG_WHAH = 28
# Man Saying Goodbye
GOODBYE = 29
# Man Saying Hello
HELLO = 30
# Man Saying No
NO = 31
# Man Saying Ok
OKAY = 32
# Man Saying Ooohhhweee
OOOHHHWEEE = 33
# Man Saying Warning
WARNING = 34
# Man Saying Welcome
WELCOME = 35
# Man Saying Yeah
YEAH = 36
# Man Saying Yes
YES = 37
# Beep short
BEEP1 = 38
# Weeeee short
WEEE = 39
# Cut in and out short
CUTINOUT = 40
# Finger flicking glas short
FLICK_GLASS = 41
# Wa Wa Waaaa short
SHORT_WHAH = 42
# Laser short
LASER = 43
# Wind Chime short
WIND_CHIME = 44
# Echo short
ECHO = 45
# Zipper short
ZIPPER = 46
# HiHat short
HIHAT = 47
# Beep 2 short
BEEP2 = 48
# Beep 3 short
BEEP3 = 49
# Beep 4 short
BEEP4 = 50
# The Alarm is armed
ALARM_ARMED = 51
# The Alarm is disarmed
ALARM_DISARMED = 52
# The Backup is ready
BACKUP_READY = 53
# The Door is closed
DOOR_CLOSED = 54
# The Door is opend
DOOR_OPENED = 55
# The Window is closed
WINDOW_CLOSED = 56
# The Window is open
WINDOW_OPEN = 57
# The Light is off
LIGHT_ON = 58
# The Light is on
LIGHT_OFF = 59
# The Doorbell rings
DOORBELL_RANG = 60
PUSHSAFER_SOUND_MAP = {
# Device Default,
'silent': PushSaferSound.SILENT,
'ahem': PushSaferSound.AHEM,
'applause': PushSaferSound.APPLAUSE,
'arrow': PushSaferSound.ARROW,
'baby': PushSaferSound.BABY,
'bell': PushSaferSound.BELL,
'bicycle': PushSaferSound.BICYCLE,
'bike': PushSaferSound.BICYCLE,
'boing': PushSaferSound.BOING,
'buzzer': PushSaferSound.BUZZER,
'camera': PushSaferSound.CAMERA,
'carhorn': PushSaferSound.CAR_HORN,
'horn': PushSaferSound.CAR_HORN,
'cashregister': PushSaferSound.CASH_REGISTER,
'chime': PushSaferSound.CHIME,
'creakydoor': PushSaferSound.CREAKY_DOOR,
'cuckooclock': PushSaferSound.CUCKOO_CLOCK,
'cuckoo': PushSaferSound.CUCKOO_CLOCK,
'disconnect': PushSaferSound.DISCONNECT,
'dog': PushSaferSound.DOG,
'doorbell': PushSaferSound.DOORBELL,
'fanfare': PushSaferSound.FANFARE,
'gunshot': PushSaferSound.GUN_SHOT,
'honk': PushSaferSound.HONK,
'jawharp': PushSaferSound.JAW_HARP,
'morse': PushSaferSound.MORSE,
'electric': PushSaferSound.ELECTRICITY,
'radiotuner': PushSaferSound.RADIO_TURNER,
'sirens': PushSaferSound.SIRENS,
'militarytrumpets': PushSaferSound.MILITARY_TRUMPETS,
'military': PushSaferSound.MILITARY_TRUMPETS,
'trumpets': PushSaferSound.MILITARY_TRUMPETS,
'ufo': PushSaferSound.UFO,
'whahwhah': PushSaferSound.LONG_WHAH,
'whah': PushSaferSound.SHORT_WHAH,
'goodye': PushSaferSound.GOODBYE,
'hello': PushSaferSound.HELLO,
'no': PushSaferSound.NO,
'okay': PushSaferSound.OKAY,
'ok': PushSaferSound.OKAY,
'ooohhhweee': PushSaferSound.OOOHHHWEEE,
'warn': PushSaferSound.WARNING,
'warning': PushSaferSound.WARNING,
'welcome': PushSaferSound.WELCOME,
'yeah': PushSaferSound.YEAH,
'yes': PushSaferSound.YES,
'beep': PushSaferSound.BEEP1,
'beep1': PushSaferSound.BEEP1,
'weee': PushSaferSound.WEEE,
'wee': PushSaferSound.WEEE,
'cutinout': PushSaferSound.CUTINOUT,
'flickglass': PushSaferSound.FLICK_GLASS,
'laser': PushSaferSound.LASER,
'windchime': PushSaferSound.WIND_CHIME,
'echo': PushSaferSound.ECHO,
'zipper': PushSaferSound.ZIPPER,
'hihat': PushSaferSound.HIHAT,
'beep2': PushSaferSound.BEEP2,
'beep3': PushSaferSound.BEEP3,
'beep4': PushSaferSound.BEEP4,
'alarmarmed': PushSaferSound.ALARM_ARMED,
'armed': PushSaferSound.ALARM_ARMED,
'alarmdisarmed': PushSaferSound.ALARM_DISARMED,
'disarmed': PushSaferSound.ALARM_DISARMED,
'backupready': PushSaferSound.BACKUP_READY,
'dooropen': PushSaferSound.DOOR_OPENED,
'dopen': PushSaferSound.DOOR_OPENED,
'doorclosed': PushSaferSound.DOOR_CLOSED,
'dclosed': PushSaferSound.DOOR_CLOSED,
'windowopen': PushSaferSound.WINDOW_OPEN,
'wopen': PushSaferSound.WINDOW_OPEN,
'windowclosed': PushSaferSound.WINDOW_CLOSED,
'wclosed': PushSaferSound.WINDOW_CLOSED,
'lighton': PushSaferSound.LIGHT_ON,
'lon': PushSaferSound.LIGHT_ON,
'lightoff': PushSaferSound.LIGHT_OFF,
'loff': PushSaferSound.LIGHT_OFF,
'doorbellrang': PushSaferSound.DOORBELL_RANG,
}
# Priorities
class PushSaferPriority(object):
LOW = -2
MODERATE = -1
NORMAL = 0
HIGH = 1
EMERGENCY = 2
PUSHSAFER_PRIORITIES = (
PushSaferPriority.LOW,
PushSaferPriority.MODERATE,
PushSaferPriority.NORMAL,
PushSaferPriority.HIGH,
PushSaferPriority.EMERGENCY,
)
PUSHSAFER_PRIORITY_MAP = {
# short for 'low'
'low': PushSaferPriority.LOW,
# short for 'medium'
'medium': PushSaferPriority.MODERATE,
# short for 'normal'
'normal': PushSaferPriority.NORMAL,
# short for 'high'
'high': PushSaferPriority.HIGH,
# short for 'emergency'
'emergency': PushSaferPriority.EMERGENCY,
}
# Identify the priority ou want to designate as the fall back
DEFAULT_PRIORITY = "normal"
# Vibrations
class PushSaferVibration(object):
"""
Defines the acceptable vibration settings for notification
"""
# x1
LOW = 1
# x2
NORMAL = 2
# x3
HIGH = 3
# Identify all of the vibrations in one place
PUSHSAFER_VIBRATIONS = (
PushSaferVibration.LOW,
PushSaferVibration.NORMAL,
PushSaferVibration.HIGH,
)
# At this time, the following pictures can be attached to each notification
# at one time. When more are supported, just add their argument below
PICTURE_PARAMETER = (
'p',
'p2',
'p3',
)
# Flag used as a placeholder to sending to all devices
PUSHSAFER_SEND_TO_ALL = 'a'
class NotifyPushSafer(NotifyBase):
"""
A wrapper for PushSafer Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Pushsafer'
# The services URL
service_url = 'https://www.pushsafer.com/'
# The default insecure protocol
protocol = 'psafer'
# The default secure protocol
secure_protocol = 'psafers'
# Number of requests to a allow per second
request_rate_per_sec = 1.2
# The icon ID of 25 looks like a megaphone
default_pushsafer_icon = 25
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_pushsafer'
# Defines the hostname to post content to; since this service supports
# both insecure and secure methods, we set the {schema} just before we
# post the message upstream.
notify_url = '{schema}://www.pushsafer.com/api'
# Define object templates
templates = (
'{schema}://{privatekey}',
'{schema}://{privatekey}/{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'privatekey': {
'name': _('Private Key'),
'type': 'string',
'private': True,
'required': True,
},
'target_device': {
'name': _('Target Device'),
'type': 'string',
'map_to': 'targets',
},
'target_email': {
'name': _('Target Email'),
'type': 'string',
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'priority': {
'name': _('Priority'),
'type': 'choice:int',
'values': PUSHSAFER_PRIORITIES,
},
'sound': {
'name': _('Sound'),
'type': 'choice:string',
'values': PUSHSAFER_SOUND_MAP,
},
'vibration': {
'name': _('Vibration'),
'type': 'choice:int',
'values': PUSHSAFER_VIBRATIONS,
},
'to': {
'alias_of': 'targets',
},
})
def __init__(self, privatekey, targets=None, priority=None, sound=None,
vibration=None, **kwargs):
"""
Initialize PushSafer Object
"""
super(NotifyPushSafer, self).__init__(**kwargs)
#
# Priority
#
try:
# Acquire our priority if we can:
# - We accept both the integer form as well as a string
# representation
self.priority = int(priority)
except TypeError:
# NoneType means use Default; this is an okay exception
self.priority = None
except ValueError:
# Input is a string; attempt to get the lookup from our
# priority mapping
priority = priority.lower().strip()
# This little bit of black magic allows us to match against
# low, lo, l (for low);
# normal, norma, norm, nor, no, n (for normal)
# ... etc
match = next((key for key in PUSHSAFER_PRIORITY_MAP.keys()
if key.startswith(priority)), None) \
if priority else None
# Now test to see if we got a match
if not match:
msg = 'An invalid PushSafer priority ' \
'({}) was specified.'.format(priority)
self.logger.warning(msg)
raise TypeError(msg)
# store our successfully looked up priority
self.priority = PUSHSAFER_PRIORITY_MAP[match]
if self.priority is not None and \
self.priority not in PUSHSAFER_PRIORITY_MAP.values():
msg = 'An invalid PushSafer priority ' \
'({}) was specified.'.format(priority)
self.logger.warning(msg)
raise TypeError(msg)
#
# Sound
#
try:
# Acquire our sound if we can:
# - We accept both the integer form as well as a string
# representation
self.sound = int(sound)
except TypeError:
# NoneType means use Default; this is an okay exception
self.sound = None
except ValueError:
# Input is a string; attempt to get the lookup from our
# sound mapping
sound = sound.lower().strip()
# This little bit of black magic allows us to match against
# against multiple versions of the same string
# ... etc
match = next((key for key in PUSHSAFER_SOUND_MAP.keys()
if key.startswith(sound)), None) \
if sound else None
# Now test to see if we got a match
if not match:
msg = 'An invalid PushSafer sound ' \
'({}) was specified.'.format(sound)
self.logger.warning(msg)
raise TypeError(msg)
# store our successfully looked up sound
self.sound = PUSHSAFER_SOUND_MAP[match]
if self.sound is not None and \
self.sound not in PUSHSAFER_SOUND_MAP.values():
msg = 'An invalid PushSafer sound ' \
'({}) was specified.'.format(sound)
self.logger.warning(msg)
raise TypeError(msg)
#
# Vibration
#
try:
# Use defined integer as is if defined, no further error checking
# is performed
self.vibration = int(vibration)
except TypeError:
# NoneType means use Default; this is an okay exception
self.vibration = None
except ValueError:
msg = 'An invalid PushSafer vibration ' \
'({}) was specified.'.format(vibration)
self.logger.warning(msg)
raise TypeError(msg)
if self.vibration and self.vibration not in PUSHSAFER_VIBRATIONS:
msg = 'An invalid PushSafer vibration ' \
'({}) was specified.'.format(vibration)
self.logger.warning(msg)
raise TypeError(msg)
#
# Private Key (associated with project)
#
self.privatekey = validate_regex(privatekey)
if not self.privatekey:
msg = 'An invalid PushSafer Private Key ' \
'({}) was specified.'.format(privatekey)
self.logger.warning(msg)
raise TypeError(msg)
self.targets = parse_list(targets)
if len(self.targets) == 0:
self.targets = (PUSHSAFER_SEND_TO_ALL, )
return
def send(self, body, title='', notify_type=NotifyType.INFO, attach=None,
**kwargs):
"""
Perform PushSafer Notification
"""
# error tracking (used for function return)
has_error = False
# Initialize our list of attachments
attachments = []
if attach:
# We need to upload our payload first so that we can source it
# in remaining messages
for attachment in attach:
# prepare payload
if not attachment:
# We could not access the attachment
self.logger.error(
'Could not access attachment {}.'.format(
attachment.url(privacy=True)))
return False
if not attachment.mimetype.startswith('image/'):
# Attachment not supported; continue peacefully
self.logger.debug(
'Ignoring unsupported PushSafer attachment {}.'.format(
attachment.url(privacy=True)))
continue
self.logger.debug(
'Posting PushSafer attachment {}'.format(
attachment.url(privacy=True)))
try:
with io.open(attachment.path, 'rb') as f:
# Output must be in a DataURL format (that's what
# PushSafer calls it):
attachment = (
attachment.name,
'data:{};base64,{}'.format(
attachment.mimetype,
base64.b64encode(f.read())))
except (OSError, IOError) as e:
self.logger.warning(
'An I/O error occured while reading {}.'.format(
attachment.name if attachment else 'attachment'))
self.logger.debug('I/O Exception: %s' % str(e))
return False
# Save our pre-prepared payload for attachment posting
attachments.append(attachment)
# Create a copy of the targets list
targets = list(self.targets)
while len(targets):
recipient = targets.pop(0)
# prepare payload
payload = {
't': title,
'm': body,
# Our default icon to use
'i': self.default_pushsafer_icon,
# Notification Color
'c': self.color(notify_type),
# Target Recipient
'd': recipient,
}
if self.sound is not None:
# Only apply sound setting if it was specified
payload['s'] = str(self.sound)
if self.vibration is not None:
# Only apply vibration setting
payload['v'] = str(self.vibration)
if not attachments:
okay, response = self._send(payload)
if not okay:
has_error = True
continue
self.logger.info(
'Sent PushSafer notification to "%s".' % (recipient))
else:
# Create a copy of our payload object
_payload = payload.copy()
for idx in range(
0, len(attachments), len(PICTURE_PARAMETER)):
# Send our attachments to our same user (already prepared
# as our payload object)
for c, attachment in enumerate(
attachments[idx:idx + len(PICTURE_PARAMETER)]):
# Get our attachment information
filename, dataurl = attachment
_payload.update({PICTURE_PARAMETER[c]: dataurl})
self.logger.debug(
'Added attachment (%s) to "%s".' % (
filename, recipient))
okay, response = self._send(_payload)
if not okay:
has_error = True
continue
self.logger.info(
'Sent PushSafer attachment (%s) to "%s".' % (
filename, recipient))
# More then the maximum messages shouldn't cause all of
# the text to loop on future iterations
_payload = payload.copy()
_payload['t'] = ''
_payload['m'] = '...'
return not has_error
def _send(self, payload, **kwargs):
"""
Wrapper to the requests (post) object
"""
headers = {
'User-Agent': self.app_id,
}
# Prepare the notification URL to post to
notify_url = self.notify_url.format(
schema='https' if self.secure else 'http'
)
# Store the payload key
payload['k'] = self.privatekey
self.logger.debug('PushSafer POST URL: %s (cert_verify=%r)' % (
notify_url, self.verify_certificate,
))
self.logger.debug('PushSafer Payload: %s' % str(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
# Default response type
response = None
# Initialize our Pushsafer expected responses
_code = None
_str = 'Unknown'
try:
# Open our attachment path if required:
r = requests.post(
notify_url,
data=payload,
headers=headers,
verify=self.verify_certificate,
)
try:
response = loads(r.content)
_code = response.get('status')
_str = response.get('success', _str) \
if _code == 1 else response.get('error', _str)
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
# Fall back to the existing unparsed value
response = r.content
if r.status_code not in (
requests.codes.ok, requests.codes.no_content):
# We had a problem
status_str = \
NotifyPushSafer.http_response_code_lookup(
r.status_code)
self.logger.warning(
'Failed to deliver payload to PushSafer:'
'{}{}error={}.'.format(
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
return False, response
elif _code != 1:
# It's a bit backwards, but:
# 1 is returned if we succeed
# 0 is returned if we fail
self.logger.warning(
'Failed to deliver payload to PushSafer;'
' error={}.'.format(_str))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
return False, response
# otherwise we were successful
return True, response
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured communicating with PushSafer.')
self.logger.debug('Socket Exception: %s' % str(e))
return False, response
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
}
if self.priority is not None:
# Store our priority; but only if it was specified
args['priority'] = \
next((key for key, value in PUSHSAFER_PRIORITY_MAP.items()
if value == self.priority),
DEFAULT_PRIORITY) # pragma: no cover
if self.sound is not None:
# Store our sound; but only if it was specified
args['sound'] = \
next((key for key, value in PUSHSAFER_SOUND_MAP.items()
if value == self.sound), '') # pragma: no cover
if self.vibration is not None:
# Store our vibration; but only if it was specified
args['vibration'] = str(self.vibration)
targets = '/'.join([NotifyPushSafer.quote(x) for x in self.targets])
if targets == PUSHSAFER_SEND_TO_ALL:
# keyword is reserved for internal usage only; it's safe to remove
# it from the recipients list
targets = ''
return '{schema}://{privatekey}/{targets}?{args}'.format(
schema=self.secure_protocol if self.secure else self.protocol,
privatekey=self.pprint(self.privatekey, privacy, safe=''),
targets=targets,
args=NotifyPushSafer.urlencode(args))
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
# Fetch our targets
results['targets'] = \
NotifyPushSafer.split_path(results['fullpath'])
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyPushSafer.parse_list(results['qsd']['to'])
# Setup the token; we store it in Private Key for global
# plugin consistency with naming conventions
results['privatekey'] = NotifyPushSafer.unquote(results['host'])
if 'priority' in results['qsd'] and len(results['qsd']['priority']):
results['priority'] = \
NotifyPushSafer.unquote(results['qsd']['priority'])
if 'sound' in results['qsd'] and len(results['qsd']['sound']):
results['sound'] = \
NotifyPushSafer.unquote(results['qsd']['sound'])
if 'vibration' in results['qsd'] and len(results['qsd']['vibration']):
results['vibration'] = \
NotifyPushSafer.unquote(results['qsd']['vibration'])
return results

View File

@ -68,7 +68,7 @@ class NotifyPushed(NotifyBase):
title_maxlen = 0
# The maximum allowable characters allowed in the body per message
body_maxlen = 140
body_maxlen = 160
# Define object templates
templates = (

View File

@ -32,6 +32,7 @@ from ..common import NotifyType
from ..utils import parse_list
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
from ..attachment.AttachBase import AttachBase
# Flag used as a placeholder to sending to all devices
PUSHOVER_SEND_TO_ALL = 'ALL_DEVICES'
@ -140,6 +141,14 @@ class NotifyPushover(NotifyBase):
# Default Pushover sound
default_pushover_sound = PushoverSound.PUSHOVER
# 2.5MB is the maximum supported image filesize as per documentation
# here: https://pushover.net/api#attachments (Dec 26th, 2019)
attach_max_size_bytes = 2621440
# The regular expression of the current attachment supported mime types
# At this time it is only images
attach_supported_mime_type = r'^image/.*'
# Define object templates
templates = (
'{schema}://{user_key}@{token}',
@ -281,17 +290,12 @@ class NotifyPushover(NotifyBase):
raise TypeError(msg)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
def send(self, body, title='', notify_type=NotifyType.INFO, attach=None,
**kwargs):
"""
Perform Pushover Notification
"""
headers = {
'User-Agent': self.app_id,
'Content-Type': 'application/x-www-form-urlencoded'
}
auth = (self.token, '')
# error tracking (used for function return)
has_error = False
@ -314,7 +318,7 @@ class NotifyPushover(NotifyBase):
'token': self.token,
'user': self.user_key,
'priority': str(self.priority),
'title': title,
'title': title if title else self.app_desc,
'message': body,
'device': device,
'sound': self.sound,
@ -323,60 +327,162 @@ class NotifyPushover(NotifyBase):
if self.priority == PushoverPriority.EMERGENCY:
payload.update({'retry': self.retry, 'expire': self.expire})
self.logger.debug('Pushover POST URL: %s (cert_verify=%r)' % (
self.notify_url, self.verify_certificate,
))
self.logger.debug('Pushover Payload: %s' % str(payload))
if attach:
# Create a copy of our payload
_payload = payload.copy()
# Always call throttle before any remote server i/o is made
self.throttle()
# Send with attachments
for attachment in attach:
# Simple send
if not self._send(_payload, attachment):
# Mark our failure
has_error = True
# clean exit from our attachment loop
break
try:
r = requests.post(
self.notify_url,
data=payload,
headers=headers,
auth=auth,
verify=self.verify_certificate,
)
if r.status_code != requests.codes.ok:
# We had a problem
status_str = \
NotifyPushover.http_response_code_lookup(
r.status_code, PUSHOVER_HTTP_ERROR_MAP)
self.logger.warning(
'Failed to send Pushover notification to {}: '
'{}{}error={}.'.format(
device,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# To handle multiple attachments, clean up our message
_payload['title'] = '...'
_payload['message'] = attachment.name
# No need to alarm for each consecutive attachment uploaded
# afterwards
_payload['sound'] = PushoverSound.NONE
else:
# Simple send
if not self._send(payload):
# Mark our failure
has_error = True
continue
else:
self.logger.info(
'Sent Pushover notification to %s.' % device)
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending Pushover:%s ' % (
device) + 'notification.'
)
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
def _send(self, payload, attach=None):
"""
Wrapper to the requests (post) object
"""
if isinstance(attach, AttachBase):
# Perform some simple error checking
if not attach:
# We could not access the attachment
self.logger.error(
'Could not access attachment {}.'.format(
attach.url(privacy=True)))
return False
# Perform some basic checks as we want to gracefully skip
# over unsupported mime types.
if not re.match(
self.attach_supported_mime_type,
attach.mimetype,
re.I):
# No problem; we just don't support this attachment
# type; gracefully move along
self.logger.debug(
'Ignored unsupported Pushover attachment ({}): {}'
.format(
attach.mimetype,
attach.url(privacy=True)))
return True
# If we get here, we're dealing with a supported image.
# Verify that the filesize is okay though.
file_size = len(attach)
if not (file_size > 0
and file_size <= self.attach_max_size_bytes):
# File size is no good
self.logger.warning(
'Pushover attachment size ({}B) exceeds limit: {}'
.format(file_size, attach.url(privacy=True)))
return False
self.logger.debug(
'Posting Pushover attachment {}'.format(
attach.url(privacy=True)))
# Default Header
headers = {
'User-Agent': self.app_id,
}
# Authentication
auth = (self.token, '')
# Some default values for our request object to which we'll update
# depending on what our payload is
files = None
self.logger.debug('Pushover POST URL: %s (cert_verify=%r)' % (
self.notify_url, self.verify_certificate,
))
self.logger.debug('Pushover Payload: %s' % str(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
# Open our attachment path if required:
if attach:
files = {'attachment': (attach.name, open(attach.path, 'rb'))}
r = requests.post(
self.notify_url,
data=payload,
headers=headers,
files=files,
auth=auth,
verify=self.verify_certificate,
)
if r.status_code != requests.codes.ok:
# We had a problem
status_str = \
NotifyPushover.http_response_code_lookup(
r.status_code, PUSHOVER_HTTP_ERROR_MAP)
self.logger.warning(
'Failed to send Pushover notification to {}: '
'{}{}error={}.'.format(
payload['device'],
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
return False
else:
self.logger.info(
'Sent Pushover notification to %s.' % payload['device'])
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending Pushover:%s ' % (
payload['device']) + 'notification.'
)
self.logger.debug('Socket Exception: %s' % str(e))
return False
except (OSError, IOError) as e:
self.logger.warning(
'An I/O error occured while reading {}.'.format(
attach.name if attach else 'attachment'))
self.logger.debug('I/O Exception: %s' % str(e))
return False
finally:
# Close our file (if it's open) stored in the second element
# of our files tuple (index 1)
if files:
files['attachment'][1].close()
return True
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.

View File

@ -89,7 +89,7 @@ class NotifySNS(NotifyBase):
# The maximum length of the body
# Source: https://docs.aws.amazon.com/sns/latest/api/API_Publish.html
body_maxlen = 140
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.

View File

@ -0,0 +1,476 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To use this service you will need a Sinch account to which you can get your
# API_TOKEN and SERVICE_PLAN_ID right from your console/dashboard at:
# https://dashboard.sinch.com/sms/overview
#
# You will also need to send the SMS From a phone number or account id name.
# This is identified as the source (or where the SMS message will originate
# from). Activated phone numbers can be found on your dashboard here:
# - https://dashboard.sinch.com/numbers/your-numbers/numbers
#
import re
import six
import requests
import json
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyType
from ..utils import parse_list
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
# Some Phone Number Detection
IS_PHONE_NO = re.compile(r'^\+?(?P<phone>[0-9\s)(+-]+)\s*$')
class SinchRegion(object):
"""
Defines the Sinch Server Regions
"""
USA = 'us'
EUROPE = 'eu'
# Used for verification purposes
SINCH_REGIONS = (SinchRegion.USA, SinchRegion.EUROPE)
class NotifySinch(NotifyBase):
"""
A wrapper for Sinch Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Sinch'
# The services URL
service_url = 'https://sinch.com/'
# All notification requests are secure
secure_protocol = 'sinch'
# Allow 300 requests per minute.
# 60/300 = 0.2
request_rate_per_sec = 0.20
# the number of seconds undelivered messages should linger for
# in the Sinch queue
validity_period = 14400
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_sinch'
# Sinch uses the http protocol with JSON requests
# - the 'spi' gets substituted with the Service Provider ID
# provided as part of the Apprise URL.
notify_url = 'https://{region}.sms.api.sinch.com/xms/v1/{spi}/batches'
# The maximum length of the body
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Define object templates
templates = (
'{schema}://{service_plan_id}:{api_token}@{from_phone}',
'{schema}://{service_plan_id}:{api_token}@{from_phone}/{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'service_plan_id': {
'name': _('Account SID'),
'type': 'string',
'private': True,
'required': True,
'regex': (r'^[a-f0-9]+$', 'i'),
},
'api_token': {
'name': _('Auth Token'),
'type': 'string',
'private': True,
'required': True,
'regex': (r'^[a-f0-9]+$', 'i'),
},
'from_phone': {
'name': _('From Phone No'),
'type': 'string',
'required': True,
'regex': (r'^\+?[0-9\s)(+-]+$', 'i'),
'map_to': 'source',
},
'target_phone': {
'name': _('Target Phone No'),
'type': 'string',
'prefix': '+',
'regex': (r'^[0-9\s)(+-]+$', 'i'),
'map_to': 'targets',
},
'short_code': {
'name': _('Target Short Code'),
'type': 'string',
'regex': (r'^[0-9]{5,6}$', 'i'),
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'to': {
'alias_of': 'targets',
},
'from': {
'alias_of': 'from_phone',
},
'spi': {
'alias_of': 'service_plan_id',
},
'region': {
'name': _('Region'),
'type': 'string',
'regex': (r'^[a-z]{2}$', 'i'),
'default': SinchRegion.USA,
},
'token': {
'alias_of': 'api_token',
},
})
def __init__(self, service_plan_id, api_token, source, targets=None,
region=None, **kwargs):
"""
Initialize Sinch Object
"""
super(NotifySinch, self).__init__(**kwargs)
# The Account SID associated with the account
self.service_plan_id = validate_regex(
service_plan_id, *self.template_tokens['service_plan_id']['regex'])
if not self.service_plan_id:
msg = 'An invalid Sinch Account SID ' \
'({}) was specified.'.format(service_plan_id)
self.logger.warning(msg)
raise TypeError(msg)
# The Authentication Token associated with the account
self.api_token = validate_regex(
api_token, *self.template_tokens['api_token']['regex'])
if not self.api_token:
msg = 'An invalid Sinch Authentication Token ' \
'({}) was specified.'.format(api_token)
self.logger.warning(msg)
raise TypeError(msg)
# The Source Phone # and/or short-code
self.source = source
if not IS_PHONE_NO.match(self.source):
msg = 'The Account (From) Phone # or Short-code specified ' \
'({}) is invalid.'.format(source)
self.logger.warning(msg)
raise TypeError(msg)
# Setup our region
self.region = self.template_args['region']['default'] \
if not isinstance(region, six.string_types) else region.lower()
if self.region and self.region not in SINCH_REGIONS:
msg = 'The region specified ({}) is invalid.'.format(region)
self.logger.warning(msg)
raise TypeError(msg)
# Tidy source
self.source = re.sub(r'[^\d]+', '', self.source)
if len(self.source) < 11 or len(self.source) > 14:
# A short code is a special 5 or 6 digit telephone number
# that's shorter than a full phone number.
if len(self.source) not in (5, 6):
msg = 'The Account (From) Phone # specified ' \
'({}) is invalid.'.format(source)
self.logger.warning(msg)
raise TypeError(msg)
# else... it as a short code so we're okay
else:
# We're dealing with a phone number; so we need to just
# place a plus symbol at the end of it
self.source = '+{}'.format(self.source)
# Parse our targets
self.targets = list()
for target in parse_list(targets):
# Validate targets and drop bad ones:
result = IS_PHONE_NO.match(target)
if result:
# Further check our phone # for it's digit count
# if it's less than 10, then we can assume it's
# a poorly specified phone no and spit a warning
result = ''.join(re.findall(r'\d+', result.group('phone')))
if len(result) < 11 or len(result) > 14:
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
continue
# store valid phone number
self.targets.append('+{}'.format(result))
continue
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
if not self.targets:
if len(self.source) in (5, 6):
# raise a warning since we're a short-code. We need
# a number to message
msg = 'There are no valid Sinch targets to notify.'
self.logger.warning(msg)
raise TypeError(msg)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Sinch Notification
"""
# error tracking (used for function return)
has_error = False
# Prepare our headers
headers = {
'User-Agent': self.app_id,
'Authorization': 'Bearer {}'.format(self.api_token),
'Content-Type': 'application/json',
}
# Prepare our payload
payload = {
'body': body,
'from': self.source,
# The To gets populated in the loop below
'to': None,
}
# Prepare our Sinch URL (spi = Service Provider ID)
url = self.notify_url.format(
region=self.region, spi=self.service_plan_id)
# Create a copy of the targets list
targets = list(self.targets)
if len(targets) == 0:
# No sources specified, use our own phone no
targets.append(self.source)
while len(targets):
# Get our target to notify
target = targets.pop(0)
# Prepare our user
payload['to'] = [target]
# Some Debug Logging
self.logger.debug('Sinch POST URL: {} (cert_verify={})'.format(
url, self.verify_certificate))
self.logger.debug('Sinch Payload: {}' .format(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
url,
data=json.dumps(payload),
headers=headers,
verify=self.verify_certificate,
)
# The responsne might look like:
# {
# "id": "CJloRJOe3MtDITqx",
# "to": ["15551112222"],
# "from": "15553334444",
# "canceled": false,
# "body": "This is a test message from your Sinch account",
# "type": "mt_text",
# "created_at": "2020-01-14T01:05:20.694Z",
# "modified_at": "2020-01-14T01:05:20.694Z",
# "delivery_report": "none",
# "expire_at": "2020-01-17T01:05:20.694Z",
# "flash_message": false
# }
if r.status_code not in (
requests.codes.created, requests.codes.ok):
# We had a problem
status_str = \
NotifyBase.http_response_code_lookup(r.status_code)
# set up our status code to use
status_code = r.status_code
try:
# Update our status response if we can
json_response = json.loads(r.content)
status_code = json_response.get('code', status_code)
status_str = json_response.get('message', status_str)
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
# We could not parse JSON response.
# We will just use the status we already have.
pass
self.logger.warning(
'Failed to send Sinch notification to {}: '
'{}{}error={}.'.format(
target,
status_str,
', ' if status_str else '',
status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# Mark our failure
has_error = True
continue
else:
self.logger.info(
'Sent Sinch notification to {}.'.format(target))
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending Sinch:%s ' % (
target) + 'notification.'
)
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
'region': self.region,
}
return '{schema}://{spi}:{token}@{source}/{targets}/?{args}'.format(
schema=self.secure_protocol,
spi=self.pprint(
self.service_plan_id, privacy, mode=PrivacyMode.Tail, safe=''),
token=self.pprint(self.api_token, privacy, safe=''),
source=NotifySinch.quote(self.source, safe=''),
targets='/'.join(
[NotifySinch.quote(x, safe='') for x in self.targets]),
args=NotifySinch.urlencode(args))
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url, verify_host=False)
if not results:
# We're done early as we couldn't load the results
return results
# Get our entries; split_path() looks after unquoting content for us
# by default
results['targets'] = NotifySinch.split_path(results['fullpath'])
# The hostname is our source number
results['source'] = NotifySinch.unquote(results['host'])
# Get our service_plan_ide and api_token from the user/pass config
results['service_plan_id'] = NotifySinch.unquote(results['user'])
results['api_token'] = NotifySinch.unquote(results['password'])
# Auth Token
if 'token' in results['qsd'] and len(results['qsd']['token']):
# Extract the account spi from an argument
results['api_token'] = \
NotifySinch.unquote(results['qsd']['token'])
# Account SID
if 'spi' in results['qsd'] and len(results['qsd']['spi']):
# Extract the account spi from an argument
results['service_plan_id'] = \
NotifySinch.unquote(results['qsd']['spi'])
# Support the 'from' and 'source' variable so that we can support
# targets this way too.
# The 'from' makes it easier to use yaml configuration
if 'from' in results['qsd'] and len(results['qsd']['from']):
results['source'] = \
NotifySinch.unquote(results['qsd']['from'])
if 'source' in results['qsd'] and len(results['qsd']['source']):
results['source'] = \
NotifySinch.unquote(results['qsd']['source'])
# Allow one to define a region
if 'region' in results['qsd'] and len(results['qsd']['region']):
results['region'] = \
NotifySinch.unquote(results['qsd']['region'])
# Support the 'to' variable so that we can support targets this way too
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifySinch.parse_list(results['qsd']['to'])
return results

View File

@ -176,7 +176,7 @@ class NotifySlack(NotifyBase):
'type': 'string',
'private': True,
'required': True,
'regex': (r'^[A-Z0-9]{9}$', 'i'),
'regex': (r'^[A-Z0-9]+$', 'i'),
},
# Token required as part of the Webhook request
# /........./BBBBBBBBB/........................
@ -185,7 +185,7 @@ class NotifySlack(NotifyBase):
'type': 'string',
'private': True,
'required': True,
'regex': (r'^[A-Z0-9]{9}$', 'i'),
'regex': (r'^[A-Z0-9]+$', 'i'),
},
# Token required as part of the Webhook request
# /........./........./CCCCCCCCCCCCCCCCCCCCCCCC
@ -194,7 +194,7 @@ class NotifySlack(NotifyBase):
'type': 'string',
'private': True,
'required': True,
'regex': (r'^[A-Za-z0-9]{24}$', 'i'),
'regex': (r'^[A-Za-z0-9]+$', 'i'),
},
'target_encoded_id': {
'name': _('Target Encoded ID'),
@ -435,8 +435,18 @@ class NotifySlack(NotifyBase):
if attach and self.mode is SlackMode.BOT and attach_channel_list:
# Send our attachments (can only be done in bot mode)
for attachment in attach:
self.logger.info(
'Posting Slack Attachment {}'.format(attachment.name))
# Perform some simple error checking
if not attachment:
# We could not access the attachment
self.logger.error(
'Could not access attachment {}.'.format(
attachment.url(privacy=True)))
return False
self.logger.debug(
'Posting Slack attachment {}'.format(
attachment.url(privacy=True)))
# Prepare API Upload Payload
_payload = {
@ -515,25 +525,29 @@ class NotifySlack(NotifyBase):
'Response Details:\r\n{}'.format(r.content))
return False
try:
response = loads(r.content)
elif attach:
# Attachment posts return a JSON string
try:
response = loads(r.content)
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
pass
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
pass
if not (response and response.get('ok', True)):
# Bare minimum requirements not met
self.logger.warning(
'Failed to send {}to Slack: error={}.'.format(
attach.name if attach else '',
r.status_code))
if not (response and response.get('ok', True)):
# Bare minimum requirements not met
self.logger.warning(
'Failed to send {}to Slack: error={}.'.format(
attach.name if attach else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
return False
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
return False
else:
response = r.content
# Message Post Response looks like this:
# {

View File

@ -267,15 +267,22 @@ class NotifyTelegram(NotifyBase):
path = None
if isinstance(attach, AttachBase):
if not attach:
# We could not access the attachment
self.logger.error(
'Could not access attachment {}.'.format(
attach.url(privacy=True)))
return False
self.logger.debug(
'Posting Telegram attachment {}'.format(
attach.url(privacy=True)))
# Store our path to our file
path = attach.path
file_name = attach.name
mimetype = attach.mimetype
if not path:
# Could not load attachment
return False
# Process our attachment
function_name, key = \
next(((x['function_name'], x['key']) for x in self.mime_lookup
@ -470,6 +477,9 @@ class NotifyTelegram(NotifyBase):
# Return our detected userid
return _id
self.logger.warning(
'Failed to detect a Telegram user; '
'try sending your bot a message first.')
return 0
def send(self, body, title='', notify_type=NotifyType.INFO, attach=None,
@ -498,8 +508,12 @@ class NotifyTelegram(NotifyBase):
if self.notify_format == NotifyFormat.MARKDOWN:
payload['parse_mode'] = 'MARKDOWN'
else:
# Either TEXT or HTML; if TEXT we'll make it HTML
payload['text'] = '{}{}'.format(
'{}\r\n'.format(title) if title else '',
body,
)
elif self.notify_format == NotifyFormat.HTML:
payload['parse_mode'] = 'HTML'
# HTML Spaces (&nbsp;) and tabs (&emsp;) aren't supported
@ -517,31 +531,23 @@ class NotifyTelegram(NotifyBase):
# Tabs become 3 spaces
title = re.sub('&emsp;?', ' ', title, re.I)
# HTML
title = NotifyTelegram.escape_html(title, whitespace=False)
payload['text'] = '{}{}'.format(
'<b>{}</b>\r\n'.format(title) if title else '',
body,
)
# HTML
else: # TEXT
payload['parse_mode'] = 'HTML'
# Escape content
title = NotifyTelegram.escape_html(title, whitespace=False)
body = NotifyTelegram.escape_html(body, whitespace=False)
if title and self.notify_format == NotifyFormat.TEXT:
# Text HTML Formatting
payload['text'] = '<b>%s</b>\r\n%s' % (
title,
payload['text'] = '{}{}'.format(
'<b>{}</b>\r\n'.format(title) if title else '',
body,
)
elif title:
# Already HTML; trust developer has wrapped
# the title appropriately
payload['text'] = '%s\r\n%s' % (
title,
body,
)
else:
# Assign the body
payload['text'] = body
# Create a copy of the chat_ids list
targets = list(self.targets)
while len(targets):
@ -639,10 +645,10 @@ class NotifyTelegram(NotifyBase):
if attach:
# Send our attachments now (if specified and if it exists)
for attachment in attach:
sent_attachment = self.send_media(
payload['chat_id'], notify_type, attach=attachment)
if not self.send_media(
payload['chat_id'], notify_type,
attach=attachment):
if not sent_attachment:
# We failed; don't continue
has_error = True
break

View File

@ -23,7 +23,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To use this service you will need a Twillio account to which you can get your
# To use this service you will need a Twilio account to which you can get your
# AUTH_TOKEN and ACCOUNT SID right from your console/dashboard at:
# https://www.twilio.com/console
#
@ -67,7 +67,7 @@ class NotifyTwilio(NotifyBase):
# The services URL
service_url = 'https://www.twilio.com/'
# All pushover requests are secure
# All notification requests are secure
secure_protocol = 'twilio'
# Allow 300 requests per minute.
@ -86,7 +86,7 @@ class NotifyTwilio(NotifyBase):
'{sid}/Messages.json'
# The maximum length of the body
body_maxlen = 140
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.

View File

@ -0,0 +1,208 @@
# -*- coding: utf-8 -*-
import ssl
from os.path import isfile
import logging
# Default our global support flag
SLEEKXMPP_SUPPORT_AVAILABLE = False
try:
# Import sleekxmpp if available
import sleekxmpp
SLEEKXMPP_SUPPORT_AVAILABLE = True
except ImportError:
# No problem; we just simply can't support this plugin because we're
# either using Linux, or simply do not have sleekxmpp installed.
pass
class SleekXmppAdapter(object):
"""
Wrapper to sleekxmpp
"""
# Reference to XMPP client.
xmpp = None
# Whether everything succeeded
success = False
# The default protocol
protocol = 'xmpp'
# The default secure protocol
secure_protocol = 'xmpps'
# The default XMPP port
default_unsecure_port = 5222
# The default XMPP secure port
default_secure_port = 5223
# Taken from https://golang.org/src/crypto/x509/root_linux.go
CA_CERTIFICATE_FILE_LOCATIONS = [
# Debian/Ubuntu/Gentoo etc.
"/etc/ssl/certs/ca-certificates.crt",
# Fedora/RHEL 6
"/etc/pki/tls/certs/ca-bundle.crt",
# OpenSUSE
"/etc/ssl/ca-bundle.pem",
# OpenELEC
"/etc/pki/tls/cacert.pem",
# CentOS/RHEL 7
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
]
# This entry is a bit hacky, but it allows us to unit-test this library
# in an environment that simply doesn't have the sleekxmpp package
# available to us.
#
# If anyone is seeing this had knows a better way of testing this
# outside of what is defined in test/test_xmpp_plugin.py, please
# let me know! :)
_enabled = SLEEKXMPP_SUPPORT_AVAILABLE
def __init__(self, host=None, port=None, secure=False,
verify_certificate=True, xep=None, jid=None, password=None,
body=None, targets=None, before_message=None, logger=None):
"""
Initialize our SleekXmppAdapter object
"""
self.host = host
self.port = port
self.secure = secure
self.verify_certificate = verify_certificate
self.xep = xep
self.jid = jid
self.password = password
self.body = body
self.targets = targets
self.before_message = before_message
self.logger = logger or logging.getLogger(__name__)
# Use the Apprise log handlers for configuring the sleekxmpp logger.
apprise_logger = logging.getLogger('apprise')
sleek_logger = logging.getLogger('sleekxmpp')
for handler in apprise_logger.handlers:
sleek_logger.addHandler(handler)
sleek_logger.setLevel(apprise_logger.level)
if not self.load():
raise ValueError("Invalid XMPP Configuration")
def load(self):
# Prepare our object
self.xmpp = sleekxmpp.ClientXMPP(self.jid, self.password)
# Register our session
self.xmpp.add_event_handler("session_start", self.session_start)
for xep in self.xep:
# Load xep entries
try:
self.xmpp.register_plugin('xep_{0:04d}'.format(xep))
except sleekxmpp.plugins.base.PluginNotFound:
self.logger.warning(
'Could not register plugin {}'.format(
'xep_{0:04d}'.format(xep)))
return False
if self.secure:
# Don't even try to use the outdated ssl.PROTOCOL_SSLx
self.xmpp.ssl_version = ssl.PROTOCOL_TLSv1
# If the python version supports it, use highest TLS version
# automatically
if hasattr(ssl, "PROTOCOL_TLS"):
# Use the best version of TLS available to us
self.xmpp.ssl_version = ssl.PROTOCOL_TLS
self.xmpp.ca_certs = None
if self.verify_certificate:
# Set the ca_certs variable for certificate verification
self.xmpp.ca_certs = next(
(cert for cert in self.CA_CERTIFICATE_FILE_LOCATIONS
if isfile(cert)), None)
if self.xmpp.ca_certs is None:
self.logger.warning(
'XMPP Secure comunication can not be verified; '
'no local CA certificate file')
return False
# We're good
return True
def process(self):
"""
Thread that handles the server/client i/o
"""
# Establish connection to XMPP server.
# To speed up sending messages, don't use the "reattempt" feature,
# it will add a nasty delay even before connecting to XMPP server.
if not self.xmpp.connect((self.host, self.port),
use_ssl=self.secure, reattempt=False):
default_port = self.default_secure_port \
if self.secure else self.default_unsecure_port
default_schema = self.secure_protocol \
if self.secure else self.protocol
# Log connection issue
self.logger.warning(
'Failed to authenticate {jid} with: {schema}://{host}{port}'
.format(
jid=self.jid,
schema=default_schema,
host=self.host,
port='' if not self.port or self.port == default_port
else ':{}'.format(self.port),
))
return False
# Process XMPP communication.
self.xmpp.process(block=True)
return self.success
def session_start(self, *args, **kwargs):
"""
Session Manager
"""
targets = list(self.targets)
if not targets:
# We always default to notifying ourselves
targets.append(self.jid)
while len(targets) > 0:
# Get next target (via JID)
target = targets.pop(0)
# Invoke "before_message" event hook.
self.before_message()
# The message we wish to send, and the JID that will receive it.
self.xmpp.send_message(mto=target, mbody=self.body, mtype='chat')
# Using wait=True ensures that the send queue will be
# emptied before ending the session.
self.xmpp.disconnect(wait=True)
# Toggle our success flag
self.success = True

View File

@ -24,46 +24,17 @@
# THE SOFTWARE.
import re
import ssl
from os.path import isfile
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyType
from ..utils import parse_list
from ..AppriseLocale import gettext_lazy as _
from ..NotifyBase import NotifyBase
from ...URLBase import PrivacyMode
from ...common import NotifyType
from ...utils import parse_list
from ...AppriseLocale import gettext_lazy as _
from .SleekXmppAdapter import SleekXmppAdapter
# xep string parser
XEP_PARSE_RE = re.compile('^[^1-9]*(?P<xep>[1-9][0-9]{0,3})$')
# Default our global support flag
NOTIFY_XMPP_SUPPORT_ENABLED = False
# Taken from https://golang.org/src/crypto/x509/root_linux.go
CA_CERTIFICATE_FILE_LOCATIONS = [
# Debian/Ubuntu/Gentoo etc.
"/etc/ssl/certs/ca-certificates.crt",
# Fedora/RHEL 6
"/etc/pki/tls/certs/ca-bundle.crt",
# OpenSUSE
"/etc/ssl/ca-bundle.pem",
# OpenELEC
"/etc/pki/tls/cacert.pem",
# CentOS/RHEL 7
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
]
try:
# Import sleekxmpp if available
import sleekxmpp
NOTIFY_XMPP_SUPPORT_ENABLED = True
except ImportError:
# No problem; we just simply can't support this plugin because we're
# either using Linux, or simply do not have sleekxmpp installed.
pass
class NotifyXMPP(NotifyBase):
"""
@ -82,6 +53,9 @@ class NotifyXMPP(NotifyBase):
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_xmpp'
# Lower throttle rate for XMPP
request_rate_per_sec = 0.5
# The default XMPP port
default_unsecure_port = 5222
@ -98,7 +72,7 @@ class NotifyXMPP(NotifyBase):
# If anyone is seeing this had knows a better way of testing this
# outside of what is defined in test/test_xmpp_plugin.py, please
# let me know! :)
_enabled = NOTIFY_XMPP_SUPPORT_ENABLED
_enabled = SleekXmppAdapter._enabled
# Define object templates
templates = (
@ -231,10 +205,11 @@ class NotifyXMPP(NotifyBase):
result = XEP_PARSE_RE.match(xep)
if result is not None:
self.xep.append(int(result.group('xep')))
self.logger.debug('Loaded XMPP {}'.format(xep))
else:
self.logger.warning(
"Could not load XMPP xep {}".format(xep))
"Could not load XMPP {}".format(xep))
# By default we send ourselves a message
if targets:
@ -267,34 +242,7 @@ class NotifyXMPP(NotifyBase):
jid = self.host
password = self.password if self.password else self.user
# Prepare our object
xmpp = sleekxmpp.ClientXMPP(jid, password)
for xep in self.xep:
# Load xep entries
xmpp.register_plugin('xep_{0:04d}'.format(xep))
if self.secure:
xmpp.ssl_version = ssl.PROTOCOL_TLSv1
# If the python version supports it, use highest TLS version
# automatically
if hasattr(ssl, "PROTOCOL_TLS"):
# Use the best version of TLS available to us
xmpp.ssl_version = ssl.PROTOCOL_TLS
xmpp.ca_certs = None
if self.verify_certificate:
# Set the ca_certs variable for certificate verification
xmpp.ca_certs = next(
(cert for cert in CA_CERTIFICATE_FILE_LOCATIONS
if isfile(cert)), None)
if xmpp.ca_certs is None:
self.logger.warning(
'XMPP Secure comunication can not be verified; '
'no CA certificate found')
# Acquire our port number
# Compute port number
if not self.port:
port = self.default_secure_port \
if self.secure else self.default_unsecure_port
@ -302,48 +250,22 @@ class NotifyXMPP(NotifyBase):
else:
port = self.port
# Establish our connection
if not xmpp.connect((self.host, port)):
return False
xmpp.send_presence()
try:
xmpp.get_roster()
# Communicate with XMPP.
xmpp_adapter = SleekXmppAdapter(
host=self.host, port=port, secure=self.secure,
verify_certificate=self.verify_certificate, xep=self.xep,
jid=jid, password=password, body=body, targets=self.targets,
before_message=self.throttle, logger=self.logger)
except sleekxmpp.exceptions.IqError as e:
self.logger.warning('There was an error getting the XMPP roster.')
self.logger.debug(e.iq['error']['condition'])
xmpp.disconnect()
except ValueError:
# We failed
return False
except sleekxmpp.exceptions.IqTimeout:
self.logger.warning('XMPP Server is taking too long to respond.')
xmpp.disconnect()
return False
# Initialize XMPP machinery and begin processing the XML stream.
outcome = xmpp_adapter.process()
targets = list(self.targets)
if not targets:
# We always default to notifying ourselves
targets.append(jid)
while len(targets) > 0:
# Get next target (via JID)
target = targets.pop(0)
# Always call throttle before any remote server i/o is made
self.throttle()
# The message we wish to send, and the JID that
# will receive it.
xmpp.send_message(mto=target, mbody=body, mtype='chat')
# Using wait=True ensures that the send queue will be
# emptied before ending the session.
xmpp.disconnect(wait=True)
return True
return outcome
def url(self, privacy=False, *args, **kwargs):
"""

View File

@ -34,6 +34,7 @@ from os.path import abspath
# Used for testing
from . import NotifyEmail as NotifyEmailBase
from .NotifyGrowl import gntp
from .NotifyXMPP import SleekXmppAdapter
# NotifyBase object is passed in as a module not class
from . import NotifyBase
@ -63,6 +64,9 @@ __all__ = [
# gntp (used for NotifyGrowl Testing)
'gntp',
# sleekxmpp access points (used for NotifyXMPP Testing)
'SleekXmppAdapter',
]
# we mirror our base purely for the ability to reset everything; this
@ -217,9 +221,16 @@ def _sanitize_token(tokens, default_delimiter):
and 'default' not in tokens[key] \
and 'values' in tokens[key] \
and len(tokens[key]['values']) == 1:
# If there is only one choice; then make it the default
tokens[key]['default'] = \
tokens[key]['values'][0]
# - support dictionaries too
tokens[key]['default'] = tokens[key]['values'][0] \
if not isinstance(tokens[key]['values'], dict) \
else next(iter(tokens[key]['values']))
if 'values' in tokens[key] and isinstance(tokens[key]['values'], dict):
# Convert values into a list if it was defined as a dictionary
tokens[key]['values'] = [k for k in tokens[key]['values'].keys()]
if 'regex' in tokens[key]:
# Verify that we are a tuple; convert strings to tuples

27
libs/knowit/__init__.py Normal file
View File

@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
"""Know your media files better."""
from __future__ import unicode_literals
__title__ = 'knowit'
__version__ = '0.3.0-dev'
__short_version__ = '.'.join(__version__.split('.')[:2])
__author__ = 'Rato AQ2'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016-2017, Rato AQ2'
__url__ = 'https://github.com/ratoaq2/knowit'
#: Video extensions
VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
'.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
'.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
'.m4v', '.mjp', '.mjpeg', '.mjpg', '.mk3d', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx',
'.mp4', '.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm', '.ogv',
'.omf', '.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv',
'.vivo', '.vob', '.vro', '.webm', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
from .api import KnowitException, know

151
libs/knowit/__main__.py Normal file
View File

@ -0,0 +1,151 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import logging
import sys
from argparse import ArgumentParser
from six import PY2
import yaml
from . import (
__url__,
__version__,
api,
)
from .provider import ProviderError
from .serializer import (
get_json_encoder,
get_yaml_dumper,
)
from .utils import recurse_paths
logging.basicConfig(stream=sys.stdout, format='%(message)s')
logging.getLogger('CONSOLE').setLevel(logging.INFO)
logging.getLogger('knowit').setLevel(logging.ERROR)
console = logging.getLogger('CONSOLE')
logger = logging.getLogger('knowit')
def build_argument_parser():
"""Build the argument parser.
:return: the argument parser
:rtype: ArgumentParser
"""
opts = ArgumentParser()
opts.add_argument(dest='videopath', help='Path to the video to introspect', nargs='*')
provider_opts = opts.add_argument_group('Providers')
provider_opts.add_argument('-p', '--provider', dest='provider',
help='The provider to be used: mediainfo, ffmpeg or enzyme.')
output_opts = opts.add_argument_group('Output')
output_opts.add_argument('--debug', action='store_true', dest='debug',
help='Print useful information for debugging knowit and for reporting bugs.')
output_opts.add_argument('--report', action='store_true', dest='report',
help='Parse media and report all non-detected values')
output_opts.add_argument('-y', '--yaml', action='store_true', dest='yaml',
help='Display output in yaml format')
output_opts.add_argument('-N', '--no-units', action='store_true', dest='no_units',
help='Display output without units')
output_opts.add_argument('-P', '--profile', dest='profile',
help='Display values according to specified profile: code, default, human, technical')
conf_opts = opts.add_argument_group('Configuration')
conf_opts.add_argument('--mediainfo', dest='mediainfo',
help='The location to search for MediaInfo binaries')
conf_opts.add_argument('--ffmpeg', dest='ffmpeg',
help='The location to search for FFmpeg (ffprobe) binaries')
information_opts = opts.add_argument_group('Information')
information_opts.add_argument('--version', dest='version', action='store_true',
help='Display knowit version.')
return opts
def knowit(video_path, options, context):
"""Extract video metadata."""
context['path'] = video_path
if not options.report:
console.info('For: %s', video_path)
else:
console.info('Parsing: %s', video_path)
info = api.know(video_path, context)
if not options.report:
console.info('Knowit %s found: ', __version__)
console.info(dump(info, options, context))
return info
def dump(info, options, context):
"""Convert info to string using json or yaml format."""
if options.yaml:
data = {info['path']: info} if 'path' in info else info
result = yaml.dump(data, Dumper=get_yaml_dumper(context),
default_flow_style=False, allow_unicode=True)
if PY2:
result = result.decode('utf-8')
else:
result = json.dumps(info, cls=get_json_encoder(context), indent=4, ensure_ascii=False)
return result
def main(args=None):
"""Execute main function for entry point."""
argument_parser = build_argument_parser()
args = args or sys.argv[1:]
options = argument_parser.parse_args(args)
if options.debug:
logger.setLevel(logging.DEBUG)
logging.getLogger('enzyme').setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
paths = recurse_paths(options.videopath)
if paths:
report = {}
for i, videopath in enumerate(paths):
try:
context = dict(vars(options))
if options.report:
context['report'] = report
else:
del context['report']
knowit(videopath, options, context)
except ProviderError:
logger.exception('Error when processing video')
except OSError:
logger.exception('OS error when processing video')
except UnicodeError:
logger.exception('Character encoding error when processing video')
except api.KnowitException as e:
logger.error(e)
if options.report and i % 20 == 19 and report:
console.info('Unknown values so far:')
console.info(dump(report, options, vars(options)))
if options.report:
if report:
console.info('Knowit %s found unknown values:', __version__)
console.info(dump(report, options, vars(options)))
console.info('Please report them at %s', __url__)
else:
console.info('Knowit %s knows everything. :-)', __version__)
elif options.version:
console.info(api.debug_info())
else:
argument_parser.print_help()
if __name__ == '__main__':
main(sys.argv[1:])

132
libs/knowit/api.py Normal file
View File

@ -0,0 +1,132 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import traceback
from . import OrderedDict, __version__
from .config import Config
from .providers import (
EnzymeProvider,
FFmpegProvider,
# MediaInfoProvider,
)
_provider_map = OrderedDict([
# ('mediainfo', MediaInfoProvider),
('ffmpeg', FFmpegProvider),
('enzyme', EnzymeProvider)
])
provider_names = _provider_map.keys()
available_providers = OrderedDict([])
class KnowitException(Exception):
"""Exception raised when knowit fails to perform media info extraction because of an internal error."""
def initialize(context=None):
"""Initialize knowit."""
if not available_providers:
context = context or {}
config = Config.build(context.get('config'))
for name, provider_cls in _provider_map.items():
available_providers[name] = provider_cls(config, context.get(name) or config.general.get(name))
def know(video_path, context=None):
"""Return a dict containing the video metadata.
:param video_path:
:type video_path: string
:param context:
:type context: dict
:return:
:rtype: dict
"""
try:
# handle path-like objects
video_path = video_path.__fspath__()
except AttributeError:
pass
try:
context = context or {}
context.setdefault('profile', 'default')
initialize(context)
for name, provider in available_providers.items():
if name != (context.get('provider') or name):
continue
if provider.accepts(video_path):
result = provider.describe(video_path, context)
if result:
return result
return {}
except Exception:
raise KnowitException(debug_info(context=context, exc_info=True))
def dependencies(context=None):
"""Return all dependencies detected by knowit."""
deps = OrderedDict([])
try:
initialize(context)
for name, provider_cls in _provider_map.items():
if name in available_providers:
deps[name] = available_providers[name].version
else:
deps[name] = {}
except Exception:
pass
return deps
def _centered(value):
value = value[-52:]
return '| {msg:^53} |'.format(msg=value)
def debug_info(context=None, exc_info=False):
lines = [
'+-------------------------------------------------------+',
_centered('KnowIt {0}'.format(__version__)),
'+-------------------------------------------------------+'
]
first = True
for key, info in dependencies(context).items():
if not first:
lines.append(_centered(''))
first = False
for k, v in info.items():
lines.append(_centered(k))
lines.append(_centered(v))
if context:
debug_data = context.pop('debug_data', None)
lines.append('+-------------------------------------------------------+')
for k, v in context.items():
if v:
lines.append(_centered('{}: {}'.format(k, v)))
if debug_data:
lines.append('+-------------------------------------------------------+')
lines.append(debug_data())
if exc_info:
lines.append('+-------------------------------------------------------+')
lines.append(traceback.format_exc())
lines.append('+-------------------------------------------------------+')
lines.append(_centered('Please report any bug or feature request at'))
lines.append(_centered('https://github.com/ratoaq2/knowit/issues.'))
lines.append('+-------------------------------------------------------+')
return '\n'.join(lines)

59
libs/knowit/config.py Normal file
View File

@ -0,0 +1,59 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import namedtuple
from logging import NullHandler, getLogger
from pkg_resources import resource_stream
from six import text_type
import yaml
from .serializer import get_yaml_loader
logger = getLogger(__name__)
logger.addHandler(NullHandler())
_valid_aliases = ('code', 'default', 'human', 'technical')
_Value = namedtuple('_Value', _valid_aliases)
class Config(object):
"""Application config class."""
@classmethod
def build(cls, path=None):
"""Build config instance."""
loader = get_yaml_loader()
with resource_stream('knowit', 'defaults.yml') as stream:
cfgs = [yaml.load(stream, Loader=loader)]
if path:
with open(path, 'r') as stream:
cfgs.append(yaml.load(stream, Loader=loader))
profiles_data = {}
for cfg in cfgs:
if 'profiles' in cfg:
profiles_data.update(cfg['profiles'])
knowledge_data = {}
for cfg in cfgs:
if 'knowledge' in cfg:
knowledge_data.update(cfg['knowledge'])
data = {'general': {}}
for class_name, data_map in knowledge_data.items():
data.setdefault(class_name, {})
for code, detection_values in data_map.items():
alias_map = (profiles_data.get(class_name) or {}).get(code) or {}
alias_map.setdefault('code', code)
alias_map.setdefault('default', alias_map['code'])
alias_map.setdefault('human', alias_map['default'])
alias_map.setdefault('technical', alias_map['human'])
value = _Value(**{k: v for k, v in alias_map.items() if k in _valid_aliases})
for detection_value in detection_values:
data[class_name][text_type(detection_value)] = value
config = Config()
config.__dict__ = data
return config

36
libs/knowit/core.py Normal file
View File

@ -0,0 +1,36 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from logging import NullHandler, getLogger
from six import text_type
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class Reportable(object):
"""Reportable abstract class."""
def __init__(self, name, description=None, reportable=True):
"""Constructor."""
self.name = name
self._description = description
self.reportable = reportable
@property
def description(self):
"""Rule description."""
return self._description or self.name
def report(self, value, context):
"""Report unknown value."""
if not value or not self.reportable:
return
value = text_type(value)
if 'report' in context:
report_map = context['report'].setdefault(self.description, {})
if value not in report_map:
report_map[value] = context['path']
logger.info('Invalid %s: %r', self.description, value)

628
libs/knowit/defaults.yml Normal file
View File

@ -0,0 +1,628 @@
knowledge:
VideoCodec:
# https://en.wikipedia.org/wiki/MPEG-1#Part_2:_Video
MPEG1:
- MPEG-1V
- MPEG1VIDEO
# https://en.wikipedia.org/wiki/H.262/MPEG-2_Part_2
MPEG2:
- MPEG2
- MPEG-2V
# https://wiki.multimedia.cx/index.php/Microsoft_MPEG-4
MSMPEG4V1:
- MP41
- MPG4
MSMPEG4V2:
- MP42
- MSMPEG4V2
MSMPEG4V3:
- MP43
- AP41
- COL1
WMV1:
- WMV1
- WMV7
WMV2:
- WMV2
- WMV8
# MPEG-4:
# https://wiki.multimedia.cx/index.php/ISO_MPEG-4
# https://en.wikipedia.org/wiki/MPEG-4_Part_2
MPEG4:
- 3IV2
- BLZ0
- DIGI
- DXGM
- EM4A
- EPHV
- FMP4
- FVFW
- HDX4
- M4CC
- M4S2
- MP4S
- MP4V
- MVXM
- RMP4
- SEDG
- SMP4
- UMP4
- WV1F
- MPEG-4V
- ASP # V_MPEG-4/ISO/ASP
- MPEG4
DIVX:
- DIV1
- DIVX
- DX50
XVID:
- XVID
- XVIX
# VC-1:
# https://wiki.multimedia.cx/index.php/VC-1
# https://en.wikipedia.org/wiki/VC-1
VC1:
- WMV3
- WMV9
- WMVA
- WMVC1
- WMVP
- WVP2
- WMVR
- VC-1
- VC1
# H.263:
# https://wiki.multimedia.cx/index.php/H.263
# https://en.wikipedia.org/wiki/Sorenson_Media#Sorenson_Spark
H263:
- D263
- H263
- L263
- M263
- S263
- T263
- U263
- X263
# https://wiki.multimedia.cx/index.php/H.264
H264:
- AVC
- AVC1
- DAVC
- H264
- X264
- VSSH
# https://wiki.multimedia.cx/index.php/H.265
H265:
- HEVC
- H265
- X265
# https://wiki.multimedia.cx/index.php/On2_VP6 and https://en.wikipedia.org/wiki/VP6
VP6:
- VP60
- VP61
- VP62
# https://wiki.multimedia.cx/index.php/On2_VP7
VP7:
- VP70
- VP71
- VP72
# https://en.wikipedia.org/wiki/VP8
VP8:
- VP8
# https://en.wikipedia.org/wiki/VP9
# https://wiki.multimedia.cx/index.php/VP9
VP9:
- VP9
- VP90
CJPG:
- CJPG
QUICKTIME:
- QUICKTIME
__ignored__:
- MJPEG
- PNG
VideoEncoder:
DIVX:
- DIVX
X264:
- X264
X265:
- X265
XVID:
- XVID
VIMEO:
- VIMEO ENCODER
VideoProfile:
ADVANCED:
- ADVANCED
ADVANCEDSIMPLE:
- ADVANCED SIMPLE
- ADVANCED SIMPLE PROFILE
SIMPLE:
- SIMPLE
BASELINE:
- BASELINE
- CONSTRAINED BASELINE
MAIN:
- MAIN
MAIN10:
- MAIN 10
HIGH:
- HIGH
VideoProfileLevel:
L1:
- L1
- L1.0
L11:
- L1.1
L13:
- L1.3
L2:
- L2
L21:
- L2.1
L22:
- L2.2
L3:
- L3
- L3.0
L31:
- L3.1
L32:
- L3.2
L4:
- L4
- L4.0
L41:
- L4.1
L42:
- L4.2
L5:
- L5
- L5.0
L51:
- L5.1
LOW:
- LOW
MAIN:
- MAIN
HIGH:
- HIGH
H14:
- HIGH 1440
VideoProfileTier:
MAIN:
- MAIN
HIGH:
- HIGH
ScanType:
PROGRESSIVE:
- PROGRESSIVE
INTERLACED:
- INTERLACED
- MBAFF
- TT
- BB
- TB
- BT
BitRateMode:
VBR:
- VBR
CBR:
- CBR
AudioCompression:
LOSSY:
- LOSSY
LOSSLESS:
- LOSSLESS
AudioProfile:
CORE:
- CORE
HRA:
- HRA
- DTS-HD HRA
MA:
- MA
- DTS-HD MA
MAIN:
- MAIN
LC:
- LC
HEAAC:
- HE-AAC
HEAACV2:
- HE-AACV2
# https://www.lifewire.com/dts-96-24-1846848
96/24:
- 96/24
- DTS 96/24
# https://www.lifewire.com/what-is-dts-es-1846890
ESDISCRETE:
- ES DISCRETE
- DTS-ES
ESMATRIX:
- ES MATRIX
LAYER2:
- LAYER 2
LAYER3:
- LAYER 3
PRO:
- PRO
__ignored__:
- DOLBY DIGITAL
- DTS
# References:
# - https://ffmpeg.org/general.html#Audio-Codecs
AudioCodec:
AC3:
- AC3
- BSID9
- BSID10
- 2000
EAC3:
- EAC3
- AC3+
TRUEHD:
- TRUEHD
ATMOS:
- ATMOS
DTS:
- DTS
# DTS-HD used for DTS-HD High Resolution Audio and DTS-HD Master Audio
DTSHD:
- DTS-HD
AAC:
- AAC
FLAC:
- FLAC
PCM:
- PCM
- PCM_S16LE
# https://en.wikipedia.org/wiki/MPEG-1_Audio_Layer_II
MP2:
- MP2
- MPA1L2
- MPEG/L2
# https://en.wikipedia.org/wiki/MP3
MP3:
- MP3
- MPA1L3
- MPA2L3
- MPEG/L3
- 50
- 55
VORBIS:
- VORBIS
OPUS:
- OPUS
# https://wiki.multimedia.cx/index.php?title=Windows_Media_Audio_9
WMA1:
- 160
WMA2:
- 161
- WMAV2
WMAPRO:
- 162
- WMAPRO
# https://answers.microsoft.com/en-us/windows/forum/windows_vista-pictures/how-to-access-codec-voxware-rt29-metasound-75/a6dbea68-ca5c-e011-8dfc-68b599b31bf5
RT29:
- 75
SubtitleFormat:
PGS:
- PGS
- 144
- HDMV_PGS_SUBTITLE
VOBSUB:
- VOBSUB
- E0
- DVD_SUBTITLE
SUBRIP:
- SUBRIP
- UTF8
- SRT
# https://en.wikipedia.org/wiki/SubStation_Alpha
SSA:
- SSA
ASS:
- ASS
# https://en.wikipedia.org/wiki/MPEG-4_Part_17
TX3G:
- TX3G
DVBSUB:
- 6
MOVTEXT:
- MOV_TEXT
profiles:
VideoCodec:
MPEG1:
default: MPEG-1
human: MPEG-1 Video
technical: MPEG-1 Part 2
MPEG2:
default: MPEG-2
human: MPEG-2 Video
technical: MPEG-2 Part 2
aka: H.262
MSMPEG4V1:
default: Microsoft MPEG-4 v1
human: Microsoft MPEG-4 version 1
technical: MPEG-4 Part 2 Microsoft variant version 1
MSMPEG4V2:
default: Microsoft MPEG-4 v2
human: Microsoft MPEG-4 version 2
technical: MPEG-4 Part 2 Microsoft variant version 2
MSMPEG4V3:
default: Microsoft MPEG-4 v3
human: Microsoft MPEG-4 version 3
technical: MPEG-4 Part 2 Microsoft variant version 3
WMV1:
default: WMV 7
human: Windows Media Video 7
technical: Microsoft Windows Media Video v1/v7
WMV2:
default: WMV 8
human: Windows Media Video 8
technical: Microsoft Windows Media Video v2/v8
MPEG4:
default: MPEG-4
human: MPEG-4 Visual
technical: MPEG-4 Part 2
DIVX:
default: DivX
human: MPEG-4 Visual (DivX)
technical: MPEG-4 Part 2 (DivX)
XVID:
default: Xvid
human: MPEG-4 Visual (Xvid)
technical: MPEG-4 Part 2 (Xvid)
VC1:
default: VC-1
human: Windows Media Video 9
technical: Microsoft SMPTE 421M
H263:
default: H.263
H264:
default: H.264
human: Advanced Video Coding (H.264)
technical: MPEG-4 Part 10 - Advanced Video Coding
aka: AVC
H265:
default: H.265
human: High Efficiency Video Coding (H.265)
technical: MPEG-H Part 2 - High Efficiency Video Coding
aka: HEVC
VP6:
human: On2 VP6
technical: On2 TrueMotion VP6
VP7:
human: On2 VP7
technical: On2 TrueMotion VP7
VP8:
technical: Google VP8
VP9:
technical: Google VP9
CJPG:
default: WebCam JPEG
QUICKTIME:
default: QuickTime
VideoEncoder:
DIVX:
default: DivX
X264:
default: x264
X265:
default: x265
XVID:
default: Xvid
VIMEO:
default: Vimeo
VideoProfile:
ADVANCED:
default: Advanced
ADVANCEDSIMPLE:
default: Advanced Simple
SIMPLE:
default: Simple
BASELINE:
default: Baseline
MAIN:
default: Main
MAIN10:
default: Main 10
HIGH:
default: High
VideoProfileLevel:
L1:
default: '1'
technical: Level 1
L11:
default: '1.1'
technical: Level 1.1
L13:
default: '1.3'
technical: Level 1.3
L2:
default: '2'
technical: Level 2
L21:
default: '2.1'
technical: Level 2.1
L22:
default: '2.2'
technical: Level 2.2
L3:
default: '3'
technical: Level 3
L31:
default: '3.1'
technical: Level 3.1
L32:
default: '3.2'
technical: Level 3.2
L4:
default: '4'
technical: Level 4
L41:
default: '4.1'
technical: Level 4.1
L42:
default: '4.2'
technical: Level 4.2
L5:
default: '5'
technical: Level 5
L51:
default: '5.1'
technical: Level 5.1
LOW:
default: Low
MAIN:
default: Main
HIGH:
default: High
H14:
default: High 1440
VideoProfileTier:
MAIN:
default: Main
HIGH:
default: High
ScanType:
PROGRESSIVE:
default: Progressive
human: Progressive scanning
INTERLACED:
default: Interlaced
human: Interlaced video
BitRateMode:
VBR:
default: Variable
human: Variable bitrate
CBR:
default: Constant
human: Constant bitrate
AudioCompression:
LOSSY:
default: Lossy
human: Lossy compression
LOSSLESS:
default: Lossless
human: Lossless compression
AudioProfile:
HRA:
default: High Resolution Audio
MA:
default: Master Audio
MAIN:
default: Main
technical: Main Profile
LC:
default: Low Complexity
HEAAC:
default: High Efficiency
HEAACV2:
default: High Efficiency v2
human: High Efficiency version 2
96/24:
default: 96/24
human: 96 kHz 24 bits
technical: 96 kHz 24 bits Upscaled
ESDISCRETE:
default: Extended Surround
human: Extended Surround Discrete
ESMATRIX:
default: Extended Surround
human: Extended Surround Matrix
LAYER2:
default: Layer 2
LAYER3:
default: Layer 3
PRO:
default: Pro
technical: Professional
AudioCodec:
AC3:
default: AC-3
human: Dolby Digital
EAC3:
default: E-AC-3
human: Dolby Digital Plus
technical: Enhanced AC-3
TRUEHD:
default: TrueHD
human: Dolby TrueHD
ATMOS:
default: Atmos
human: Dolby Atmos
DTS:
DTSHD:
default: DTS-HD
AAC:
human: Advanced Audio Coding
FLAC:
human: Free Lossless Audio Codec
PCM:
human: Pulse-code Modulation
MP2:
human: MPEG Audio Layer 2
technical: MPEG-1/MPEG-2 Audio Layer 2
MP3:
human: MPEG Audio Layer 3
technical: MPEG-1/MPEG-2 Audio Layer 3
VORBIS:
default: Vorbis
OPUS:
default: Opus
WMA1:
default: WMA
human: Windows Media Audio 1
WMA2:
default: WMA 2
human: Windows Media Audio 2
WMAPRO:
default: WMA Pro
human: Windows Media Audio Pro
RT29:
default: RT29 MetaSound
human: Voxware RT29 MetaSound
SubtitleFormat:
PGS:
human: Presentation Graphic Stream
VOBSUB:
default: VobSub
SUBRIP:
default: SubRip
SSA:
human: SubStation Alpha
ASS:
human: Advanced SubStation Alpha
TX3G:
human: MPEG-4 Timed Text
technical: MPEG-4 Part 17
DVBSUB:
default: DVBSub
human: DVB Subtitle
technical: Digital Video Broadcasting Subtitles
MOVTEXT:
default: MOV Text

View File

@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .audio import (
AudioChannels,
AudioCodec,
AudioCompression,
AudioProfile,
BitRateMode,
)
from .basic import Basic
from .duration import Duration
from .language import Language
from .quantity import Quantity
from .subtitle import (
SubtitleFormat,
)
from .video import (
Ratio,
ScanType,
VideoCodec,
VideoEncoder,
VideoProfile,
VideoProfileLevel,
VideoProfileTier,
)
from .yesno import YesNo

View File

@ -0,0 +1,8 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .bitratemode import BitRateMode
from .channels import AudioChannels
from .codec import AudioCodec
from .compression import AudioCompression
from .profile import AudioProfile

View File

@ -0,0 +1,10 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ...property import Configurable
class BitRateMode(Configurable):
"""Bit Rate mode property."""
pass

View File

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import text_type
from ...property import Property
class AudioChannels(Property):
"""Audio Channels property."""
ignored = {
'object based', # Dolby Atmos
}
def handle(self, value, context):
"""Handle audio channels."""
if isinstance(value, int):
return value
v = text_type(value).lower()
if v not in self.ignored:
try:
return int(v)
except ValueError:
self.report(value, context)

View File

@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import text_type
from ...property import Configurable
class AudioCodec(Configurable):
"""Audio codec property."""
@classmethod
def _extract_key(cls, value):
key = text_type(value).upper()
if key.startswith('A_'):
key = key[2:]
# only the first part of the word. E.g.: 'AAC LC' => 'AAC'
return key.split(' ')[0]
@classmethod
def _extract_fallback_key(cls, value, key):
if '/' in key:
return key.split('/')[0]

View File

@ -0,0 +1,10 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ...property import Configurable
class AudioCompression(Configurable):
"""Audio Compression property."""
pass

View File

@ -0,0 +1,10 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ...property import Configurable
class AudioProfile(Configurable):
"""Audio profile property."""
pass

View File

@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import text_type
from ..property import Property
class Basic(Property):
"""Basic property to handle int, float and other basic types."""
def __init__(self, name, data_type, allow_fallback=False, **kwargs):
"""Init method."""
super(Basic, self).__init__(name, **kwargs)
self.data_type = data_type
self.allow_fallback = allow_fallback
def handle(self, value, context):
"""Handle value."""
if isinstance(value, self.data_type):
return value
try:
return self.data_type(text_type(value))
except ValueError:
if not self.allow_fallback:
self.report(value, context)

View File

@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from datetime import timedelta
from six import text_type
from ..property import Property
class Duration(Property):
"""Duration property."""
duration_re = re.compile(r'(?P<hours>\d{1,2}):'
r'(?P<minutes>\d{1,2}):'
r'(?P<seconds>\d{1,2})(?:\.'
r'(?P<millis>\d{3})'
r'(?P<micro>\d{3})?\d*)?')
def handle(self, value, context):
"""Return duration as timedelta."""
if isinstance(value, timedelta):
return value
elif isinstance(value, int):
return timedelta(milliseconds=value)
try:
return timedelta(milliseconds=int(float(value)))
except ValueError:
pass
try:
h, m, s, ms, mc = self.duration_re.match(text_type(value)).groups('0')
return timedelta(hours=int(h), minutes=int(m), seconds=int(s), milliseconds=int(ms), microseconds=int(mc))
except ValueError:
pass
self.report(value, context)

View File

@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import babelfish
from ..property import Property
class Language(Property):
"""Language property."""
def handle(self, value, context):
"""Handle languages."""
try:
if len(value) == 3:
return babelfish.Language.fromalpha3b(value)
return babelfish.Language.fromietf(value)
except (babelfish.Error, ValueError):
pass
try:
return babelfish.Language.fromname(value)
except babelfish.Error:
pass
self.report(value, context)
return babelfish.Language('und')

View File

@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import text_type
from ..property import Property
class Quantity(Property):
"""Quantity is a property with unit."""
def __init__(self, name, unit, data_type=int, **kwargs):
"""Init method."""
super(Quantity, self).__init__(name, **kwargs)
self.unit = unit
self.data_type = data_type
def handle(self, value, context):
"""Handle value with unit."""
if not isinstance(value, self.data_type):
try:
value = self.data_type(text_type(value))
except ValueError:
self.report(value, context)
return
return value if context.get('no_units') else value * self.unit

View File

@ -0,0 +1,4 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .format import SubtitleFormat

View File

@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import text_type
from ...property import Configurable
class SubtitleFormat(Configurable):
"""Subtitle Format property."""
@classmethod
def _extract_key(cls, value):
key = text_type(value) .upper()
if key.startswith('S_'):
key = key[2:]
return key.split('/')[-1]

View File

@ -0,0 +1,10 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .codec import VideoCodec
from .encoder import VideoEncoder
from .profile import VideoProfile
from .profile import VideoProfileLevel
from .profile import VideoProfileTier
from .ratio import Ratio
from .scantype import ScanType

View File

@ -0,0 +1,16 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ...property import Configurable
class VideoCodec(Configurable):
"""Video Codec handler."""
@classmethod
def _extract_key(cls, value):
key = value.upper().split('/')[-1]
if key.startswith('V_'):
key = key[2:]
return key.split(' ')[-1]

View File

@ -0,0 +1,10 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ...property import Configurable
class VideoEncoder(Configurable):
"""Video Encoder property."""
pass

View File

@ -0,0 +1,41 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import text_type
from ...property import Configurable
class VideoProfile(Configurable):
"""Video Profile property."""
@classmethod
def _extract_key(cls, value):
return value.upper().split('@')[0]
class VideoProfileLevel(Configurable):
"""Video Profile Level property."""
@classmethod
def _extract_key(cls, value):
values = text_type(value).upper().split('@')
if len(values) > 1:
value = values[1]
return value
# There's no level, so don't warn or report it
return False
class VideoProfileTier(Configurable):
"""Video Profile Tier property."""
@classmethod
def _extract_key(cls, value):
values = value.upper().split('@')
if len(values) > 2:
return values[2]
# There's no tier, so don't warn or report it
return False

View File

@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from six import text_type
from ...property import Property
class Ratio(Property):
"""Ratio property."""
def __init__(self, name, unit=None, **kwargs):
"""Constructor."""
super(Ratio, self).__init__(name, **kwargs)
self.unit = unit
ratio_re = re.compile(r'(?P<width>\d+)[:/](?P<height>\d+)')
def handle(self, value, context):
"""Handle ratio."""
match = self.ratio_re.match(text_type(value))
if match:
width, height = match.groups()
if (width, height) == ('0', '1'): # identity
return 1.
result = round(float(width) / float(height), 3)
if self.unit:
result *= self.unit
return result
self.report(value, context)

View File

@ -0,0 +1,10 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ...property import Configurable
class ScanType(Configurable):
"""Scan Type property."""
pass

View File

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import text_type
from ..property import Property
class YesNo(Property):
"""Yes or No handler."""
mapping = ('yes', 'true', '1')
def __init__(self, name, yes=True, no=False, hide_value=None, **kwargs):
"""Init method."""
super(YesNo, self).__init__(name, **kwargs)
self.yes = yes
self.no = no
self.hide_value = hide_value
def handle(self, value, context):
"""Handle boolean values."""
v = text_type(value).lower()
result = self.yes if v in self.mapping else self.no
return result if result != self.hide_value else None

137
libs/knowit/property.py Normal file
View File

@ -0,0 +1,137 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from logging import NullHandler, getLogger
from six import PY3, binary_type, string_types, text_type
from .core import Reportable
logger = getLogger(__name__)
logger.addHandler(NullHandler())
_visible_chars_table = dict.fromkeys(range(32))
def _is_unknown(value):
return isinstance(value, text_type) and (not value or value.lower() == 'unknown')
class Property(Reportable):
"""Property class."""
def __init__(self, name, default=None, private=False, description=None, delimiter=' / ', **kwargs):
"""Init method."""
super(Property, self).__init__(name, description, **kwargs)
self.default = default
self.private = private
# Used to detect duplicated values. e.g.: en / en or High@L4.0 / High@L4.0 or Progressive / Progressive
self.delimiter = delimiter
def extract_value(self, track, context):
"""Extract the property value from a given track."""
names = self.name.split('.')
value = track.get(names[0], {}).get(names[1]) if len(names) == 2 else track.get(self.name)
if value is None:
if self.default is None:
return
value = self.default
if isinstance(value, string_types):
if isinstance(value, binary_type):
value = text_type(value)
else:
value = value.translate(_visible_chars_table).strip()
if _is_unknown(value):
return
value = self._deduplicate(value)
result = self.handle(value, context)
if result is not None and not _is_unknown(result):
return result
@classmethod
def _deduplicate(cls, value):
values = value.split(' / ')
if len(values) == 2 and values[0] == values[1]:
return values[0]
return value
def handle(self, value, context):
"""Return the value without any modification."""
return value
class Configurable(Property):
"""Configurable property where values are in a config mapping."""
def __init__(self, config, *args, **kwargs):
"""Init method."""
super(Configurable, self).__init__(*args, **kwargs)
self.mapping = getattr(config, self.__class__.__name__)
@classmethod
def _extract_key(cls, value):
return text_type(value).upper()
@classmethod
def _extract_fallback_key(cls, value, key):
pass
def _lookup(self, key, context):
result = self.mapping.get(key)
if result is not None:
result = getattr(result, context.get('profile') or 'default')
return result if result != '__ignored__' else False
def handle(self, value, context):
"""Return Variable or Constant."""
key = self._extract_key(value)
if key is False:
return
result = self._lookup(key, context)
if result is False:
return
while not result and key:
key = self._extract_fallback_key(value, key)
result = self._lookup(key, context)
if result is False:
return
if not result:
self.report(value, context)
return result
class MultiValue(Property):
"""Property with multiple values."""
def __init__(self, prop=None, delimiter='/', single=False, handler=None, name=None, **kwargs):
"""Init method."""
super(MultiValue, self).__init__(prop.name if prop else name, **kwargs)
self.prop = prop
self.delimiter = delimiter
self.single = single
self.handler = handler
def handle(self, value, context):
"""Handle properties with multiple values."""
values = (self._split(value[0], self.delimiter)
if len(value) == 1 else value) if isinstance(value, list) else self._split(value, self.delimiter)
call = self.handler or self.prop.handle
if len(values) > 1 and not self.single:
return [call(item, context) if not _is_unknown(item) else None for item in values]
return call(values[0], context)
@classmethod
def _split(cls, value, delimiter='/'):
if value is None:
return
v = text_type(value)
result = map(text_type.strip, v.split(delimiter))
return list(result) if PY3 else result

135
libs/knowit/provider.py Normal file
View File

@ -0,0 +1,135 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from logging import NullHandler, getLogger
from . import OrderedDict
from .properties import Quantity
from .units import units
logger = getLogger(__name__)
logger.addHandler(NullHandler())
size_property = Quantity('size', units.byte, description='media size')
class Provider(object):
"""Base class for all providers."""
min_fps = 10
max_fps = 200
def __init__(self, config, mapping, rules=None):
"""Init method."""
self.config = config
self.mapping = mapping
self.rules = rules or {}
def accepts(self, target):
"""Whether or not the video is supported by this provider."""
raise NotImplementedError
def describe(self, target, context):
"""Read video metadata information."""
raise NotImplementedError
def _describe_tracks(self, video_path, general_track, video_tracks, audio_tracks, subtitle_tracks, context):
logger.debug('Handling general track')
props = self._describe_track(general_track, 'general', context)
if 'path' not in props:
props['path'] = video_path
if 'container' not in props:
props['container'] = os.path.splitext(video_path)[1][1:]
if 'size' not in props and os.path.isfile(video_path):
props['size'] = size_property.handle(os.path.getsize(video_path), context)
for track_type, tracks, in (('video', video_tracks),
('audio', audio_tracks),
('subtitle', subtitle_tracks)):
results = []
for track in tracks or []:
logger.debug('Handling %s track', track_type)
t = self._validate_track(track_type, self._describe_track(track, track_type, context))
if t:
results.append(t)
if results:
props[track_type] = results
return props
@classmethod
def _validate_track(cls, track_type, track):
if track_type != 'video' or 'frame_rate' not in track:
return track
frame_rate = track['frame_rate']
try:
frame_rate = frame_rate.magnitude
except AttributeError:
pass
if cls.min_fps < frame_rate < cls.max_fps:
return track
def _describe_track(self, track, track_type, context):
"""Describe track to a dict.
:param track:
:param track_type:
:rtype: dict
"""
props = OrderedDict()
pv_props = {}
for name, prop in self.mapping[track_type].items():
if not prop:
# placeholder to be populated by rules. It keeps the order
props[name] = None
continue
value = prop.extract_value(track, context)
if value is not None:
if not prop.private:
which = props
else:
which = pv_props
which[name] = value
for name, rule in self.rules.get(track_type, {}).items():
if props.get(name) is not None and not rule.override:
logger.debug('Skipping rule %s since property is already present: %r', name, props[name])
continue
value = rule.execute(props, pv_props, context)
if value is not None:
props[name] = value
elif name in props and not rule.override:
del props[name]
return props
@property
def version(self):
"""Return provider version information."""
raise NotImplementedError
class ProviderError(Exception):
"""Base class for provider exceptions."""
pass
class MalformedFileError(ProviderError):
"""Malformed File error."""
pass
class UnsupportedFileFormatError(ProviderError):
"""Unsupported File Format error."""
pass

View File

@ -0,0 +1,7 @@
# -*- coding: utf-8 -*-
"""Provider package."""
from __future__ import unicode_literals
from .enzyme import EnzymeProvider
from .ffmpeg import FFmpegProvider
#from .mediainfo import MediaInfoProvider

View File

@ -0,0 +1,153 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import logging
from collections import defaultdict
from logging import NullHandler, getLogger
import enzyme
from .. import OrderedDict
from ..properties import (
AudioCodec,
Basic,
Duration,
Language,
Quantity,
VideoCodec,
YesNo,
)
from ..property import Property
from ..provider import (
MalformedFileError,
Provider,
)
from ..rules import (
AudioChannelsRule,
ClosedCaptionRule,
HearingImpairedRule,
LanguageRule,
ResolutionRule,
)
from ..serializer import get_json_encoder
from ..units import units
from ..utils import todict
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class EnzymeProvider(Provider):
"""Enzyme Provider."""
def __init__(self, config, *args, **kwargs):
"""Init method."""
super(EnzymeProvider, self).__init__(config, {
'general': OrderedDict([
('title', Property('title', description='media title')),
('duration', Duration('duration', description='media duration')),
]),
'video': OrderedDict([
('id', Basic('number', int, description='video track number')),
('name', Property('name', description='video track name')),
('language', Language('language', description='video language')),
('width', Quantity('width', units.pixel)),
('height', Quantity('height', units.pixel)),
('scan_type', YesNo('interlaced', yes='Interlaced', no='Progressive', default='Progressive',
description='video scan type')),
('resolution', None), # populated with ResolutionRule
# ('bit_depth', Property('bit_depth', Integer('video bit depth'))),
('codec', VideoCodec(config, 'codec_id', description='video codec')),
('forced', YesNo('forced', hide_value=False, description='video track forced')),
('default', YesNo('default', hide_value=False, description='video track default')),
('enabled', YesNo('enabled', hide_value=True, description='video track enabled')),
]),
'audio': OrderedDict([
('id', Basic('number', int, description='audio track number')),
('name', Property('name', description='audio track name')),
('language', Language('language', description='audio language')),
('codec', AudioCodec(config, 'codec_id', description='audio codec')),
('channels_count', Basic('channels', int, description='audio channels count')),
('channels', None), # populated with AudioChannelsRule
('forced', YesNo('forced', hide_value=False, description='audio track forced')),
('default', YesNo('default', hide_value=False, description='audio track default')),
('enabled', YesNo('enabled', hide_value=True, description='audio track enabled')),
]),
'subtitle': OrderedDict([
('id', Basic('number', int, description='subtitle track number')),
('name', Property('name', description='subtitle track name')),
('language', Language('language', description='subtitle language')),
('hearing_impaired', None), # populated with HearingImpairedRule
('closed_caption', None), # populated with ClosedCaptionRule
('forced', YesNo('forced', hide_value=False, description='subtitle track forced')),
('default', YesNo('default', hide_value=False, description='subtitle track default')),
('enabled', YesNo('enabled', hide_value=True, description='subtitle track enabled')),
]),
}, {
'video': OrderedDict([
('language', LanguageRule('video language')),
('resolution', ResolutionRule('video resolution')),
]),
'audio': OrderedDict([
('language', LanguageRule('audio language')),
('channels', AudioChannelsRule('audio channels')),
]),
'subtitle': OrderedDict([
('language', LanguageRule('subtitle language')),
('hearing_impaired', HearingImpairedRule('subtitle hearing impaired')),
('closed_caption', ClosedCaptionRule('closed caption')),
])
})
def accepts(self, video_path):
"""Accept only MKV files."""
return video_path.lower().endswith('.mkv')
@classmethod
def extract_info(cls, video_path):
"""Extract info from the video."""
with open(video_path, 'rb') as f:
return todict(enzyme.MKV(f))
def describe(self, video_path, context):
"""Return video metadata."""
try:
data = defaultdict(dict)
ff = self.extract_info(video_path)
def debug_data():
"""Debug data."""
return json.dumps(ff, cls=get_json_encoder(context), indent=4, ensure_ascii=False)
context['debug_data'] = debug_data
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Video %r scanned using enzyme %r has raw data:\n%s',
video_path, enzyme.__version__, debug_data)
data.update(ff)
if 'info' in data and data['info'] is None:
return {}
except enzyme.MalformedMKVError: # pragma: no cover
raise MalformedFileError
if logger.level == logging.DEBUG:
logger.debug('Video {video_path} scanned using Enzyme {version} has raw data:\n{data}',
video_path=video_path, version=enzyme.__version__, data=json.dumps(data))
result = self._describe_tracks(video_path, data.get('info', {}), data.get('video_tracks'),
data.get('audio_tracks'), data.get('subtitle_tracks'), context)
if not result:
raise MalformedFileError
result['provider'] = {
'name': 'enzyme',
'version': self.version
}
return result
@property
def version(self):
"""Return enzyme version information."""
return {'enzyme': enzyme.__version__}

View File

@ -0,0 +1,276 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import logging
import re
from logging import NullHandler, getLogger
from subprocess import check_output
from six import ensure_text
from .. import (
OrderedDict,
VIDEO_EXTENSIONS,
)
from ..properties import (
AudioChannels,
AudioCodec,
AudioProfile,
Basic,
Duration,
Language,
Quantity,
Ratio,
ScanType,
SubtitleFormat,
VideoCodec,
VideoProfile,
VideoProfileLevel,
YesNo,
)
from ..property import (
Property,
)
from ..provider import (
MalformedFileError,
Provider,
)
from ..rules import (
AudioChannelsRule,
AudioCodecRule,
ClosedCaptionRule,
HearingImpairedRule,
LanguageRule,
ResolutionRule,
)
from ..serializer import get_json_encoder
from ..units import units
from ..utils import (
define_candidate,
detect_os,
)
logger = getLogger(__name__)
logger.addHandler(NullHandler())
WARN_MSG = r'''
=========================================================================================
FFmpeg (ffprobe) not found on your system or could not be loaded.
Visit https://ffmpeg.org/download.html to download it.
If you still have problems, please check if the downloaded version matches your system.
To load FFmpeg (ffprobe) from a specific location, please define the location as follow:
knowit --ffmpeg /usr/local/ffmpeg/bin <video_path>
knowit --ffmpeg /usr/local/ffmpeg/bin/ffprobe <video_path>
knowit --ffmpeg "C:\Program Files\FFmpeg" <video_path>
knowit --ffmpeg C:\Software\ffprobe.exe <video_path>
=========================================================================================
'''
class FFmpegExecutor(object):
"""Executor that knows how to execute media info: using ctypes or cli."""
version_re = re.compile(r'\bversion\s+(?P<version>\d+(?:\.\d+)+)\b')
locations = {
'unix': ('/usr/local/ffmpeg/lib', '/usr/local/ffmpeg/bin', '__PATH__'),
'windows': ('__PATH__', ),
'macos': ('__PATH__', ),
}
def __init__(self, location, version):
"""Constructor."""
self.location = location
self.version = version
def extract_info(self, filename):
"""Extract media info."""
json_dump = self._execute(filename)
return json.loads(json_dump)
def _execute(self, filename):
raise NotImplementedError
@classmethod
def _get_version(cls, output):
match = cls.version_re.search(output)
if match:
version = tuple([int(v) for v in match.groupdict()['version'].split('.')])
return version
@classmethod
def get_executor_instance(cls, suggested_path=None):
"""Return executor instance."""
os_family = detect_os()
logger.debug('Detected os: %s', os_family)
for exec_cls in (FFmpegCliExecutor, ):
executor = exec_cls.create(os_family, suggested_path)
if executor:
return executor
class FFmpegCliExecutor(FFmpegExecutor):
"""Executor that uses FFmpeg (ffprobe) cli."""
names = {
'unix': ('ffprobe', ),
'windows': ('ffprobe.exe', ),
'macos': ('ffprobe', ),
}
def _execute(self, filename):
return ensure_text(check_output([self.location, '-v', 'quiet', '-print_format', 'json',
'-show_format', '-show_streams', '-sexagesimal', filename]))
@classmethod
def create(cls, os_family=None, suggested_path=None):
"""Create the executor instance."""
for candidate in define_candidate(cls.locations, cls.names, os_family, suggested_path):
try:
output = ensure_text(check_output([candidate, '-version']))
version = cls._get_version(output)
if version:
logger.debug('FFmpeg cli detected: %s v%s', candidate, '.'.join(map(str, version)))
return FFmpegCliExecutor(candidate, version)
except OSError:
pass
class FFmpegProvider(Provider):
"""FFmpeg provider."""
def __init__(self, config, suggested_path=None):
"""Init method."""
super(FFmpegProvider, self).__init__(config, {
'general': OrderedDict([
('title', Property('tags.title', description='media title')),
('path', Property('filename', description='media path')),
('duration', Duration('duration', description='media duration')),
('size', Quantity('size', units.byte, description='media size')),
('bit_rate', Quantity('bit_rate', units.bps, description='media bit rate')),
]),
'video': OrderedDict([
('id', Basic('index', int, allow_fallback=True, description='video track number')),
('name', Property('tags.title', description='video track name')),
('language', Language('tags.language', description='video language')),
('duration', Duration('duration', description='video duration')),
('width', Quantity('width', units.pixel)),
('height', Quantity('height', units.pixel)),
('scan_type', ScanType(config, 'field_order', default='Progressive', description='video scan type')),
('aspect_ratio', Ratio('display_aspect_ratio', description='display aspect ratio')),
('pixel_aspect_ratio', Ratio('sample_aspect_ratio', description='pixel aspect ratio')),
('resolution', None), # populated with ResolutionRule
('frame_rate', Ratio('r_frame_rate', unit=units.FPS, description='video frame rate')),
# frame_rate_mode
('bit_rate', Quantity('bit_rate', units.bps, description='video bit rate')),
('bit_depth', Quantity('bits_per_raw_sample', units.bit, description='video bit depth')),
('codec', VideoCodec(config, 'codec_name', description='video codec')),
('profile', VideoProfile(config, 'profile', description='video codec profile')),
('profile_level', VideoProfileLevel(config, 'level', description='video codec profile level')),
# ('profile_tier', VideoProfileTier(config, 'codec_profile', description='video codec profile tier')),
('forced', YesNo('disposition.forced', hide_value=False, description='video track forced')),
('default', YesNo('disposition.default', hide_value=False, description='video track default')),
]),
'audio': OrderedDict([
('id', Basic('index', int, allow_fallback=True, description='audio track number')),
('name', Property('tags.title', description='audio track name')),
('language', Language('tags.language', description='audio language')),
('duration', Duration('duration', description='audio duration')),
('codec', AudioCodec(config, 'codec_name', description='audio codec')),
('_codec', AudioCodec(config, 'profile', description='audio codec', private=True, reportable=False)),
('profile', AudioProfile(config, 'profile', description='audio codec profile')),
('channels_count', AudioChannels('channels', description='audio channels count')),
('channels', None), # populated with AudioChannelsRule
('bit_depth', Quantity('bits_per_raw_sample', units.bit, description='audio bit depth')),
('bit_rate', Quantity('bit_rate', units.bps, description='audio bit rate')),
('sampling_rate', Quantity('sample_rate', units.Hz, description='audio sampling rate')),
('forced', YesNo('disposition.forced', hide_value=False, description='audio track forced')),
('default', YesNo('disposition.default', hide_value=False, description='audio track default')),
]),
'subtitle': OrderedDict([
('id', Basic('index', int, allow_fallback=True, description='subtitle track number')),
('name', Property('tags.title', description='subtitle track name')),
('language', Language('tags.language', description='subtitle language')),
('hearing_impaired', YesNo('disposition.hearing_impaired',
hide_value=False, description='subtitle hearing impaired')),
('closed_caption', None), # populated with ClosedCaptionRule
('format', SubtitleFormat(config, 'codec_name', description='subtitle format')),
('forced', YesNo('disposition.forced', hide_value=False, description='subtitle track forced')),
('default', YesNo('disposition.default', hide_value=False, description='subtitle track default')),
]),
}, {
'video': OrderedDict([
('language', LanguageRule('video language')),
('resolution', ResolutionRule('video resolution')),
]),
'audio': OrderedDict([
('language', LanguageRule('audio language')),
('channels', AudioChannelsRule('audio channels')),
('codec', AudioCodecRule('audio codec', override=True)),
]),
'subtitle': OrderedDict([
('language', LanguageRule('subtitle language')),
('hearing_impaired', HearingImpairedRule('subtitle hearing impaired')),
('closed_caption', ClosedCaptionRule('closed caption'))
])
})
self.executor = FFmpegExecutor.get_executor_instance(suggested_path)
def accepts(self, video_path):
"""Accept any video when FFprobe is available."""
if self.executor is None:
logger.warning(WARN_MSG)
self.executor = False
return self.executor and video_path.lower().endswith(VIDEO_EXTENSIONS)
def describe(self, video_path, context):
"""Return video metadata."""
data = self.executor.extract_info(video_path)
def debug_data():
"""Debug data."""
return json.dumps(data, cls=get_json_encoder(context), indent=4, ensure_ascii=False)
context['debug_data'] = debug_data
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Video %r scanned using ffmpeg %r has raw data:\n%s',
video_path, self.executor.location, debug_data())
general_track = data.get('format') or {}
if 'tags' in general_track:
general_track['tags'] = {k.lower(): v for k, v in general_track['tags'].items()}
video_tracks = []
audio_tracks = []
subtitle_tracks = []
for track in data.get('streams'):
track_type = track.get('codec_type')
if track_type == 'video':
video_tracks.append(track)
elif track_type == 'audio':
audio_tracks.append(track)
elif track_type == 'subtitle':
subtitle_tracks.append(track)
result = self._describe_tracks(video_path, general_track, video_tracks, audio_tracks, subtitle_tracks, context)
if not result:
raise MalformedFileError
result['provider'] = self.executor.location
result['provider'] = {
'name': 'ffmpeg',
'version': self.version
}
return result
@property
def version(self):
"""Return ffmpeg version information."""
if not self.executor:
return {}
return {self.executor.location: 'v{}'.format('.'.join(map(str, self.executor.version)))}

View File

@ -0,0 +1,335 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from ctypes import c_void_p, c_wchar_p
from logging import DEBUG, NullHandler, getLogger
from subprocess import CalledProcessError, check_output
from xml.dom import minidom
from xml.etree import ElementTree
from pymediainfo import MediaInfo
from pymediainfo import __version__ as pymediainfo_version
from six import ensure_text
from .. import (
OrderedDict,
VIDEO_EXTENSIONS,
)
from ..properties import (
AudioChannels,
AudioCodec,
AudioCompression,
AudioProfile,
Basic,
BitRateMode,
Duration,
Language,
Quantity,
ScanType,
SubtitleFormat,
VideoCodec,
VideoEncoder,
VideoProfile,
VideoProfileLevel,
VideoProfileTier,
YesNo,
)
from ..property import (
MultiValue,
Property,
)
from ..provider import (
MalformedFileError,
Provider,
)
from ..rules import (
AtmosRule,
AudioChannelsRule,
ClosedCaptionRule,
DtsHdRule,
HearingImpairedRule,
LanguageRule,
ResolutionRule,
)
from ..units import units
from ..utils import (
define_candidate,
detect_os,
)
logger = getLogger(__name__)
logger.addHandler(NullHandler())
WARN_MSG = r'''
=========================================================================================
MediaInfo not found on your system or could not be loaded.
Visit https://mediaarea.net/ to download it.
If you still have problems, please check if the downloaded version matches your system.
To load MediaInfo from a specific location, please define the location as follow:
knowit --mediainfo /usr/local/mediainfo/lib <video_path>
knowit --mediainfo /usr/local/mediainfo/bin <video_path>
knowit --mediainfo "C:\Program Files\MediaInfo" <video_path>
knowit --mediainfo C:\Software\MediaInfo.dll <video_path>
knowit --mediainfo C:\Software\MediaInfo.exe <video_path>
knowit --mediainfo /opt/mediainfo/libmediainfo.so <video_path>
knowit --mediainfo /opt/mediainfo/libmediainfo.dylib <video_path>
=========================================================================================
'''
class MediaInfoExecutor(object):
"""Media info executable knows how to execute media info: using ctypes or cli."""
version_re = re.compile(r'\bv(?P<version>\d+(?:\.\d+)+)\b')
locations = {
'unix': ('/usr/local/mediainfo/lib', '/usr/local/mediainfo/bin', '__PATH__'),
'windows': ('__PATH__', ),
'macos': ('__PATH__', ),
}
def __init__(self, location, version):
"""Constructor."""
self.location = location
self.version = version
def extract_info(self, filename):
"""Extract media info."""
return self._execute(filename)
def _execute(self, filename):
raise NotImplementedError
@classmethod
def _get_version(cls, output):
match = cls.version_re.search(output)
if match:
version = tuple([int(v) for v in match.groupdict()['version'].split('.')])
return version
@classmethod
def get_executor_instance(cls, suggested_path=None):
"""Return the executor instance."""
os_family = detect_os()
logger.debug('Detected os: %s', os_family)
for exec_cls in (MediaInfoCTypesExecutor, MediaInfoCliExecutor):
executor = exec_cls.create(os_family, suggested_path)
if executor:
return executor
class MediaInfoCliExecutor(MediaInfoExecutor):
"""Media info using cli."""
names = {
'unix': ('mediainfo', ),
'windows': ('MediaInfo.exe', ),
'macos': ('mediainfo', ),
}
def _execute(self, filename):
output_type = 'OLDXML' if self.version >= (17, 10) else 'XML'
return MediaInfo(ensure_text(check_output([self.location, '--Output=' + output_type, '--Full', filename])))
@classmethod
def create(cls, os_family=None, suggested_path=None):
"""Create the executor instance."""
for candidate in define_candidate(cls.locations, cls.names, os_family, suggested_path):
try:
output = ensure_text(check_output([candidate, '--version']))
version = cls._get_version(output)
if version:
logger.debug('MediaInfo cli detected: %s', candidate)
return MediaInfoCliExecutor(candidate, version)
except CalledProcessError as e:
# old mediainfo returns non-zero exit code for mediainfo --version
version = cls._get_version(ensure_text(e.output))
if version:
logger.debug('MediaInfo cli detected: %s', candidate)
return MediaInfoCliExecutor(candidate, version)
except OSError:
pass
class MediaInfoCTypesExecutor(MediaInfoExecutor):
"""Media info ctypes."""
names = {
'unix': ('libmediainfo.so.0', ),
'windows': ('MediaInfo.dll', ),
'macos': ('libmediainfo.0.dylib', 'libmediainfo.dylib'),
}
def _execute(self, filename):
# Create a MediaInfo handle
return MediaInfo.parse(filename, library_file=self.location)
@classmethod
def create(cls, os_family=None, suggested_path=None):
"""Create the executor instance."""
for candidate in define_candidate(cls.locations, cls.names, os_family, suggested_path):
if MediaInfo.can_parse(candidate):
lib = MediaInfo._get_library(candidate)
lib.MediaInfo_Option.argtypes = [c_void_p, c_wchar_p, c_wchar_p]
lib.MediaInfo_Option.restype = c_wchar_p
version = MediaInfoExecutor._get_version(lib.MediaInfo_Option(None, "Info_Version", ""))
logger.debug('MediaInfo library detected: %s (v%s)', candidate, '.'.join(map(str, version)))
return MediaInfoCTypesExecutor(candidate, version)
class MediaInfoProvider(Provider):
"""Media Info provider."""
executor = None
def __init__(self, config, suggested_path):
"""Init method."""
super(MediaInfoProvider, self).__init__(config, {
'general': OrderedDict([
('title', Property('title', description='media title')),
('path', Property('complete_name', description='media path')),
('duration', Duration('duration', description='media duration')),
('size', Quantity('file_size', units.byte, description='media size')),
('bit_rate', Quantity('overall_bit_rate', units.bps, description='media bit rate')),
]),
'video': OrderedDict([
('id', Basic('track_id', int, allow_fallback=True, description='video track number')),
('name', Property('name', description='video track name')),
('language', Language('language', description='video language')),
('duration', Duration('duration', description='video duration')),
('size', Quantity('stream_size', units.byte, description='video stream size')),
('width', Quantity('width', units.pixel)),
('height', Quantity('height', units.pixel)),
('scan_type', ScanType(config, 'scan_type', default='Progressive', description='video scan type')),
('aspect_ratio', Basic('display_aspect_ratio', float, description='display aspect ratio')),
('pixel_aspect_ratio', Basic('pixel_aspect_ratio', float, description='pixel aspect ratio')),
('resolution', None), # populated with ResolutionRule
('frame_rate', Quantity('frame_rate', units.FPS, float, description='video frame rate')),
# frame_rate_mode
('bit_rate', Quantity('bit_rate', units.bps, description='video bit rate')),
('bit_depth', Quantity('bit_depth', units.bit, description='video bit depth')),
('codec', VideoCodec(config, 'codec', description='video codec')),
('profile', VideoProfile(config, 'codec_profile', description='video codec profile')),
('profile_level', VideoProfileLevel(config, 'codec_profile', description='video codec profile level')),
('profile_tier', VideoProfileTier(config, 'codec_profile', description='video codec profile tier')),
('encoder', VideoEncoder(config, 'encoded_library_name', description='video encoder')),
('media_type', Property('internet_media_type', description='video media type')),
('forced', YesNo('forced', hide_value=False, description='video track forced')),
('default', YesNo('default', hide_value=False, description='video track default')),
]),
'audio': OrderedDict([
('id', Basic('track_id', int, allow_fallback=True, description='audio track number')),
('name', Property('title', description='audio track name')),
('language', Language('language', description='audio language')),
('duration', Duration('duration', description='audio duration')),
('size', Quantity('stream_size', units.byte, description='audio stream size')),
('codec', MultiValue(AudioCodec(config, 'codec', description='audio codec'))),
('profile', MultiValue(AudioProfile(config, 'format_profile', description='audio codec profile'),
delimiter=' / ')),
('channels_count', MultiValue(AudioChannels('channel_s', description='audio channels count'))),
('channel_positions', MultiValue(name='other_channel_positions', handler=(lambda x, *args: x),
delimiter=' / ', private=True, description='audio channels position')),
('channels', None), # populated with AudioChannelsRule
('bit_depth', Quantity('bit_depth', units.bit, description='audio bit depth')),
('bit_rate', MultiValue(Quantity('bit_rate', units.bps, description='audio bit rate'))),
('bit_rate_mode', MultiValue(BitRateMode(config, 'bit_rate_mode', description='audio bit rate mode'))),
('sampling_rate', MultiValue(Quantity('sampling_rate', units.Hz, description='audio sampling rate'))),
('compression', MultiValue(AudioCompression(config, 'compression_mode',
description='audio compression'))),
('forced', YesNo('forced', hide_value=False, description='audio track forced')),
('default', YesNo('default', hide_value=False, description='audio track default')),
]),
'subtitle': OrderedDict([
('id', Basic('track_id', int, allow_fallback=True, description='subtitle track number')),
('name', Property('title', description='subtitle track name')),
('language', Language('language', description='subtitle language')),
('hearing_impaired', None), # populated with HearingImpairedRule
('_closed_caption', Property('captionservicename', private=True)),
('closed_caption', None), # populated with ClosedCaptionRule
('format', SubtitleFormat(config, 'codec_id', description='subtitle format')),
('forced', YesNo('forced', hide_value=False, description='subtitle track forced')),
('default', YesNo('default', hide_value=False, description='subtitle track default')),
]),
}, {
'video': OrderedDict([
('language', LanguageRule('video language')),
('resolution', ResolutionRule('video resolution')),
]),
'audio': OrderedDict([
('language', LanguageRule('audio language')),
('channels', AudioChannelsRule('audio channels')),
('_atmosrule', AtmosRule('atmos rule')),
('_dtshdrule', DtsHdRule('dts-hd rule')),
]),
'subtitle': OrderedDict([
('language', LanguageRule('subtitle language')),
('hearing_impaired', HearingImpairedRule('subtitle hearing impaired')),
('closed_caption', ClosedCaptionRule('closed caption')),
])
})
self.executor = MediaInfoExecutor.get_executor_instance(suggested_path)
def accepts(self, video_path):
"""Accept any video when MediaInfo is available."""
if self.executor is None:
logger.warning(WARN_MSG)
self.executor = False
return self.executor and video_path.lower().endswith(VIDEO_EXTENSIONS)
def describe(self, video_path, context):
"""Return video metadata."""
media_info = self.executor.extract_info(video_path)
def debug_data():
"""Debug data."""
xml = ensure_text(ElementTree.tostring(media_info.xml_dom)).replace('\r', '').replace('\n', '')
return ensure_text(minidom.parseString(xml).toprettyxml(indent=' ', newl='\n', encoding='utf-8'))
context['debug_data'] = debug_data
if logger.isEnabledFor(DEBUG):
logger.debug('Video %r scanned using mediainfo %r has raw data:\n%s',
video_path, self.executor.location, debug_data())
data = media_info.to_data()
result = {}
if data.get('tracks'):
general_tracks = []
video_tracks = []
audio_tracks = []
subtitle_tracks = []
for track in data.get('tracks'):
track_type = track.get('track_type')
if track_type == 'General':
general_tracks.append(track)
elif track_type == 'Video':
video_tracks.append(track)
elif track_type == 'Audio':
audio_tracks.append(track)
elif track_type == 'Text':
subtitle_tracks.append(track)
result = self._describe_tracks(video_path, general_tracks[0] if general_tracks else {},
video_tracks, audio_tracks, subtitle_tracks, context)
if not result:
raise MalformedFileError
result['provider'] = {
'name': 'mediainfo',
'version': self.version
}
return result
@property
def version(self):
"""Return mediainfo version information."""
versions = [('pymediainfo', pymediainfo_version)]
if self.executor:
versions.append((self.executor.location, 'v{}'.format('.'.join(map(str, self.executor.version)))))
return OrderedDict(versions)

17
libs/knowit/rule.py Normal file
View File

@ -0,0 +1,17 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .core import Reportable
class Rule(Reportable):
"""Rule abstract class."""
def __init__(self, name, override=False, **kwargs):
"""Constructor."""
super(Rule, self).__init__(name, **kwargs)
self.override = override
def execute(self, props, pv_props, context):
"""How to execute a rule."""
raise NotImplementedError

View File

@ -0,0 +1,11 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .audio import AtmosRule
from .audio import AudioChannelsRule
from .audio import AudioCodecRule
from .audio import DtsHdRule
from .language import LanguageRule
from .subtitle import ClosedCaptionRule
from .subtitle import HearingImpairedRule
from .video import ResolutionRule

View File

@ -0,0 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .atmos import AtmosRule
from .channels import AudioChannelsRule
from .codec import AudioCodecRule
from .dtshd import DtsHdRule

View File

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ...rule import Rule
class AtmosRule(Rule):
"""Atmos rule."""
@classmethod
def _redefine(cls, props, name, index):
actual = props.get(name)
if isinstance(actual, list):
value = actual[index]
if value is None:
del props[name]
else:
props[name] = value
def execute(self, props, pv_props, context):
"""Execute the rule against properties."""
codecs = props.get('codec') or []
# TODO: handle this properly
if 'atmos' in {codec.lower() for codec in codecs if codec}:
index = None
for i, codec in enumerate(codecs):
if codec and 'atmos' in codec.lower():
index = i
break
if index is not None:
for name in ('channels_count', 'sampling_rate'):
self._redefine(props, name, index)

View File

@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from logging import NullHandler, getLogger
from six import text_type
from ...rule import Rule
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class AudioChannelsRule(Rule):
"""Audio Channel rule."""
mapping = {
1: '1.0',
2: '2.0',
6: '5.1',
8: '7.1',
}
def execute(self, props, pv_props, context):
"""Execute the rule against properties."""
count = props.get('channels_count')
if count is None:
return
channels = self.mapping.get(count) if isinstance(count, int) else None
positions = pv_props.get('channel_positions') or []
positions = positions if isinstance(positions, list) else [positions]
candidate = 0
for position in positions:
if not position:
continue
c = 0
for i in position.split('/'):
try:
c += float(i)
except ValueError:
logger.debug('Invalid %s: %s', self.description, i)
pass
c_count = int(c) + int(round((c - int(c)) * 10))
if c_count == count:
return text_type(c)
candidate = max(candidate, c)
if channels:
return channels
if candidate:
return text_type(candidate)
self.report(positions, context)

View File

@ -0,0 +1,13 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ...rule import Rule
class AudioCodecRule(Rule):
"""Audio Codec rule."""
def execute(self, props, pv_props, context):
"""Execute the rule against properties."""
if '_codec' in pv_props:
return pv_props.get('_codec')

View File

@ -0,0 +1,32 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ...rule import Rule
class DtsHdRule(Rule):
"""DTS-HD rule."""
@classmethod
def _redefine(cls, props, name, index):
actual = props.get(name)
if isinstance(actual, list):
value = actual[index]
if value is None:
del props[name]
else:
props[name] = value
def execute(self, props, pv_props, context):
"""Execute the rule against properties."""
if props.get('codec') == 'DTS-HD':
index = None
for i, profile in enumerate(props.get('profile', [])):
if profile and profile.upper() != 'CORE':
index = i
break
if index is not None:
for name in ('profile', 'channels_count', 'bit_rate',
'bit_rate_mode', 'sampling_rate', 'compression'):
self._redefine(props, name, index)

View File

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from logging import NullHandler, getLogger
import babelfish
from ..rule import Rule
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class LanguageRule(Rule):
"""Language rules."""
name_re = re.compile(r'(?P<name>\w+)\b', re.IGNORECASE)
def execute(self, props, pv_props, context):
"""Language detection using name."""
if 'language' in props:
return
if 'name' in props:
name = props.get('name', '')
match = self.name_re.match(name)
if match:
try:
return babelfish.Language.fromname(match.group('name'))
except babelfish.Error:
pass
logger.info('Invalid %s: %r', self.description, name)

View File

@ -0,0 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .closedcaption import ClosedCaptionRule
from .hearingimpaired import HearingImpairedRule

View File

@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from ...rule import Rule
class ClosedCaptionRule(Rule):
"""Closed caption rule."""
cc_re = re.compile(r'(\bcc\d\b)', re.IGNORECASE)
def execute(self, props, pv_props, context):
"""Execute closed caption rule."""
for name in (pv_props.get('_closed_caption'), props.get('name')):
if name and self.cc_re.search(name):
return True

View File

@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from ...rule import Rule
class HearingImpairedRule(Rule):
"""Hearing Impaired rule."""
hi_re = re.compile(r'(\bsdh\b)', re.IGNORECASE)
def execute(self, props, pv_props, context):
"""Hearing Impaired."""
name = props.get('name')
if name and self.hi_re.search(name):
return True

View File

@ -0,0 +1,4 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .resolution import ResolutionRule

View File

@ -0,0 +1,75 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ...rule import Rule
class ResolutionRule(Rule):
"""Resolution rule."""
standard_resolutions = (
480,
720,
1080,
2160,
4320,
)
uncommon_resolutions = (
240,
288,
360,
576,
)
resolutions = list(sorted(standard_resolutions + uncommon_resolutions))
square = 4. / 3
wide = 16. / 9
def execute(self, props, pv_props, context):
"""Return the resolution for the video.
The resolution is based on a widescreen TV (16:9)
1920x800 will be considered 1080p since the TV will use 1920x1080 with vertical black bars
1426x1080 is considered 1080p since the TV will use 1920x1080 with horizontal black bars
The calculation considers the display aspect ratio and the pixel aspect ratio (not only width and height).
The upper resolution is selected if there's no perfect match with the following list of resolutions:
240, 288, 360, 480, 576, 720, 1080, 2160, 4320
If no interlaced information is available, resolution will be considered Progressive.
"""
width = props.get('width')
height = props.get('height')
if not width or not height:
return
try:
width = width.magnitude
height = height.magnitude
except AttributeError:
pass
dar = props.get('aspect_ratio', float(width) / height)
par = props.get('pixel_aspect_ratio', 1)
scan_type = props.get('scan_type', 'p')[0].lower()
# selected DAR must be between 4:3 and 16:9
selected_dar = max(min(dar, self.wide), self.square)
# mod-16
stretched_width = int(round(width * par / 16)) * 16
# mod-8
calculated_height = int(round(stretched_width / selected_dar / 8)) * 8
selected_resolution = None
for r in reversed(self.resolutions):
if r < calculated_height:
break
selected_resolution = r
if selected_resolution:
return '{0}{1}'.format(selected_resolution, scan_type)
msg = '{width}x{height} - scan_type: {scan_type}, aspect_ratio: {dar}, pixel_aspect_ratio: {par}'.format(
width=width, height=height, scan_type=scan_type, dar=dar, par=par)
self.report(msg, context)

155
libs/knowit/serializer.py Normal file
View File

@ -0,0 +1,155 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from collections import OrderedDict
from datetime import timedelta
import babelfish
from six import text_type
import yaml
from .units import units
def format_property(context, o):
"""Convert properties to string."""
if isinstance(o, timedelta):
return format_duration(o, context['profile'])
if isinstance(o, babelfish.language.Language):
return format_language(o, context['profile'])
if hasattr(o, 'units'):
return format_quantity(o, context['profile'])
return text_type(o)
def get_json_encoder(context):
"""Return json encoder that handles all needed object types."""
class StringEncoder(json.JSONEncoder):
"""String json encoder."""
def default(self, o):
return format_property(context, o)
return StringEncoder
def get_yaml_dumper(context):
"""Return yaml dumper that handles all needed object types."""
class CustomDumper(yaml.SafeDumper):
"""Custom YAML Dumper."""
def default_representer(self, data):
"""Convert data to string."""
if isinstance(data, int):
return self.represent_int(data)
if isinstance(data, float):
return self.represent_float(data)
return self.represent_str(str(data))
def ordered_dict_representer(self, data):
"""Representer for OrderedDict."""
return self.represent_mapping('tag:yaml.org,2002:map', data.items())
def default_language_representer(self, data):
"""Convert language to string."""
return self.represent_str(format_language(data, context['profile']))
def default_quantity_representer(self, data):
"""Convert quantity to string."""
return self.default_representer(format_quantity(data, context['profile']))
def default_duration_representer(self, data):
"""Convert quantity to string."""
return self.default_representer(format_duration(data, context['profile']))
CustomDumper.add_representer(OrderedDict, CustomDumper.ordered_dict_representer)
CustomDumper.add_representer(babelfish.Language, CustomDumper.default_language_representer)
CustomDumper.add_representer(timedelta, CustomDumper.default_duration_representer)
CustomDumper.add_representer(units.Quantity, CustomDumper.default_quantity_representer)
return CustomDumper
def get_yaml_loader(constructors=None):
"""Return a yaml loader that handles sequences as python lists."""
constructors = constructors or {}
class CustomLoader(yaml.Loader):
"""Custom YAML Loader."""
pass
CustomLoader.add_constructor('tag:yaml.org,2002:seq', CustomLoader.construct_python_tuple)
for tag, constructor in constructors.items():
CustomLoader.add_constructor(tag, constructor)
return CustomLoader
def format_duration(duration, profile='default'):
if profile == 'technical':
return str(duration)
seconds = duration.total_seconds()
if profile == 'code':
return duration.total_seconds()
hours = int(seconds // 3600)
seconds = seconds - (hours * 3600)
minutes = int(seconds // 60)
seconds = int(seconds - (minutes * 60))
if profile == 'human':
if hours > 0:
return '{0} hours {1:02d} minutes {2:02d} seconds'.format(hours, minutes, seconds)
if minutes > 0:
return '{0} minutes {1:02d} seconds'.format(minutes, seconds)
return '{0} seconds'.format(seconds)
return '{0}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
def format_language(language, profile='default'):
if profile in ('default', 'human'):
return str(language.name)
return str(language)
def format_quantity(quantity, profile='default'):
"""Human friendly format."""
if profile == 'code':
return quantity.magnitude
unit = quantity.units
if unit != 'bit':
technical = profile == 'technical'
if unit == 'hertz':
return _format_quantity(quantity.magnitude, unit='Hz', binary=technical, precision=3 if technical else 1)
root_unit = quantity.to_root_units().units
if root_unit == 'bit':
return _format_quantity(quantity.magnitude, binary=technical, precision=3 if technical else 2)
if root_unit == 'bit / second':
return _format_quantity(quantity.magnitude, unit='bps', binary=technical, precision=3 if technical else 1)
return str(quantity)
def _format_quantity(num, unit='B', binary=False, precision=2):
fmt_pattern = '{value:3.%sf} {prefix}{affix}{unit}' % precision
factor = 1024. if binary else 1000.
binary_affix = 'i' if binary else ''
for prefix in ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'):
if abs(num) < factor:
return fmt_pattern.format(value=num, prefix=prefix, affix=binary_affix, unit=unit)
num /= factor
return fmt_pattern.format(value=num, prefix='Y', affix=binary_affix, unit=unit)
YAMLLoader = get_yaml_loader()

24
libs/knowit/units.py Normal file
View File

@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
def _build_unit_registry():
try:
from pint import UnitRegistry
registry = UnitRegistry()
registry.define('FPS = 1 * hertz')
except ImportError:
class NoUnitRegistry:
def __init__(self):
pass
def __getattr__(self, item):
return 1
registry = NoUnitRegistry()
return registry
units = _build_unit_registry()

95
libs/knowit/utils.py Normal file
View File

@ -0,0 +1,95 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
from collections import OrderedDict
from six import PY2, string_types, text_type
from . import VIDEO_EXTENSIONS
def recurse_paths(paths):
"""Return a file system encoded list of videofiles.
:param paths:
:type paths: string or list
:return:
:rtype: list
"""
enc_paths = []
if isinstance(paths, (string_types, text_type)):
paths = [p.strip() for p in paths.split(',')] if ',' in paths else paths.split()
encoding = sys.getfilesystemencoding()
for path in paths:
if os.path.isfile(path):
enc_paths.append(path.decode(encoding) if PY2 else path)
if os.path.isdir(path):
for root, directories, filenames in os.walk(path):
for filename in filenames:
if os.path.splitext(filename)[1] in VIDEO_EXTENSIONS:
if PY2 and os.name == 'nt':
fullpath = os.path.join(root, filename.decode(encoding))
else:
fullpath = os.path.join(root, filename).decode(encoding)
enc_paths.append(fullpath)
# Lets remove any dupes since mediainfo is rather slow.
seen = set()
seen_add = seen.add
return [f for f in enc_paths if not (f in seen or seen_add(f))]
def todict(obj, classkey=None):
"""Transform an object to dict."""
if isinstance(obj, string_types):
return obj
elif isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = todict(v, classkey)
return data
elif hasattr(obj, '_ast'):
return todict(obj._ast())
elif hasattr(obj, '__iter__'):
return [todict(v, classkey) for v in obj]
elif hasattr(obj, '__dict__'):
values = [(key, todict(value, classkey))
for key, value in obj.__dict__.items() if not callable(value) and not key.startswith('_')]
data = OrderedDict([(k, v) for k, v in values if v is not None])
if classkey is not None and hasattr(obj, '__class__'):
data[classkey] = obj.__class__.__name__
return data
return obj
def detect_os():
"""Detect os family: windows, macos or unix."""
if os.name in ('nt', 'dos', 'os2', 'ce'):
return 'windows'
if sys.platform == 'darwin':
return 'macos'
return 'unix'
def define_candidate(locations, names, os_family=None, suggested_path=None):
"""Generate candidate list for the given parameters."""
os_family = os_family or detect_os()
for location in (suggested_path, ) + locations[os_family]:
if not location:
continue
if location == '__PATH__':
for name in names[os_family]:
yield name
elif os.path.isfile(location):
yield location
elif os.path.isdir(location):
for name in names[os_family]:
cmd = os.path.join(location, name)
if os.path.isfile(cmd):
yield cmd

View File

@ -1,3 +0,0 @@
Patrick Altman <paltman@gmail.com> (author)
cjlucas https://github.com/cjlucas
Louis Sautier <sautier.louis@gmail.com> (maintainer since 2016)

View File

@ -1,24 +0,0 @@
The MIT License
Copyright (c) 2010-2014, Patrick Altman <paltman@gmail.com>
Copyright (c) 2016, Louis Sautier <sautier.louis@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
http://www.opensource.org/licenses/mit-license.php

View File

@ -1,27 +0,0 @@
pymediainfo
-----------
.. image:: https://img.shields.io/pypi/v/pymediainfo.svg
:target: https://pypi.org/project/pymediainfo
.. image:: https://img.shields.io/pypi/pyversions/pymediainfo.svg
:target: https://pypi.org/project/pymediainfo
.. image:: https://repology.org/badge/tiny-repos/python:pymediainfo.svg
:target: https://repology.org/metapackage/python:pymediainfo
.. image:: https://img.shields.io/pypi/implementation/pymediainfo.svg
:target: https://pypi.org/project/pymediainfo
.. image:: https://api.travis-ci.org/sbraz/pymediainfo.svg?branch=master
:target: https://travis-ci.org/sbraz/pymediainfo
.. image:: https://ci.appveyor.com/api/projects/status/g15a2daem1oub57n/branch/master?svg=true
:target: https://ci.appveyor.com/project/sbraz/pymediainfo
This small package is a wrapper around the MediaInfo library.
It works on Linux, Mac OS X and Windows and is tested with Python 2.7, 3.4, 3.5, 3.6, 3.7, PyPy and PyPy3.
See https://pymediainfo.readthedocs.io/ for more information.

View File

@ -1,320 +0,0 @@
# vim: set fileencoding=utf-8 :
import os
import re
import locale
import json
import ctypes
import sys
from pkg_resources import get_distribution, DistributionNotFound
import xml.etree.ElementTree as ET
try:
import pathlib
except ImportError:
pathlib = None
if sys.version_info < (3,):
import urlparse
else:
import urllib.parse as urlparse
try:
__version__ = get_distribution("pymediainfo").version
except DistributionNotFound:
pass
class Track(object):
"""
An object associated with a media file track.
Each :class:`Track` attribute corresponds to attributes parsed from MediaInfo's output.
All attributes are lower case. Attributes that are present several times such as Duration
yield a second attribute starting with `other_` which is a list of all alternative attribute values.
When a non-existing attribute is accessed, `None` is returned.
Example:
>>> t = mi.tracks[0]
>>> t
<Track track_id='None', track_type='General'>
>>> t.duration
3000
>>> t.to_data()["other_duration"]
['3 s 0 ms', '3 s 0 ms', '3 s 0 ms',
'00:00:03.000', '00:00:03.000']
>>> type(t.non_existing)
NoneType
All available attributes can be obtained by calling :func:`to_data`.
"""
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except:
pass
return None
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
def __init__(self, xml_dom_fragment):
self.track_type = xml_dom_fragment.attrib['type']
for el in xml_dom_fragment:
node_name = el.tag.lower().strip().strip('_')
if node_name == 'id':
node_name = 'track_id'
node_value = el.text
other_node_name = "other_%s" % node_name
if getattr(self, node_name) is None:
setattr(self, node_name, node_value)
else:
if getattr(self, other_node_name) is None:
setattr(self, other_node_name, [node_value, ])
else:
getattr(self, other_node_name).append(node_value)
for o in [d for d in self.__dict__.keys() if d.startswith('other_')]:
try:
primary = o.replace('other_', '')
setattr(self, primary, int(getattr(self, primary)))
except:
for v in getattr(self, o):
try:
current = getattr(self, primary)
setattr(self, primary, int(v))
getattr(self, o).append(current)
break
except:
pass
def __repr__(self):
return("<Track track_id='{}', track_type='{}'>".format(self.track_id, self.track_type))
def to_data(self):
"""
Returns a dict representation of the track attributes.
Example:
>>> sorted(track.to_data().keys())[:3]
['codec', 'codec_extensions_usually_used', 'codec_url']
>>> t.to_data()["file_size"]
5988
:rtype: dict
"""
data = {}
for k, v in self.__dict__.items():
if k != 'xml_dom_fragment':
data[k] = v
return data
class MediaInfo(object):
"""
An object containing information about a media file.
:class:`MediaInfo` objects can be created by directly calling code from
libmediainfo (in this case, the library must be present on the system):
>>> pymediainfo.MediaInfo.parse("/path/to/file.mp4")
Alternatively, objects may be created from MediaInfo's XML output.
Such output can be obtained using the ``XML`` output format on versions older than v17.10
and the ``OLDXML`` format on newer versions.
Using such an XML file, we can create a :class:`MediaInfo` object:
>>> with open("output.xml") as f:
... mi = pymediainfo.MediaInfo(f.read())
:param str xml: XML output obtained from MediaInfo.
:param str encoding_errors: option to pass to :func:`str.encode`'s `errors`
parameter before parsing `xml`.
:raises xml.etree.ElementTree.ParseError: if passed invalid XML.
:var tracks: A list of :py:class:`Track` objects which the media file contains.
For instance:
>>> mi = pymediainfo.MediaInfo.parse("/path/to/file.mp4")
>>> for t in mi.tracks:
... print(t)
<Track track_id='None', track_type='General'>
<Track track_id='1', track_type='Text'>
"""
def __eq__(self, other):
return self.tracks == other.tracks
def __init__(self, xml, encoding_errors="strict"):
xml_dom = ET.fromstring(xml.encode("utf-8", encoding_errors))
self.tracks = []
# This is the case for libmediainfo < 18.03
# https://github.com/sbraz/pymediainfo/issues/57
# https://github.com/MediaArea/MediaInfoLib/commit/575a9a32e6960ea34adb3bc982c64edfa06e95eb
if xml_dom.tag == "File":
xpath = "track"
else:
xpath = "File/track"
for xml_track in xml_dom.iterfind(xpath):
self.tracks.append(Track(xml_track))
@staticmethod
def _get_library(library_file=None):
os_is_nt = os.name in ("nt", "dos", "os2", "ce")
if os_is_nt:
lib_type = ctypes.WinDLL
else:
lib_type = ctypes.CDLL
if library_file is None:
if os_is_nt:
library_names = ("MediaInfo.dll",)
elif sys.platform == "darwin":
library_names = ("libmediainfo.0.dylib", "libmediainfo.dylib")
else:
library_names = ("libmediainfo.so.0",)
script_dir = os.path.dirname(__file__)
# Look for the library file in the script folder
for library in library_names:
lib_path = os.path.join(script_dir, library)
if os.path.isfile(lib_path):
# If we find it, don't try any other filename
library_names = (lib_path,)
break
else:
library_names = (library_file,)
for i, library in enumerate(library_names, start=1):
try:
lib = lib_type(library)
# Define arguments and return types
lib.MediaInfo_Inform.restype = ctypes.c_wchar_p
lib.MediaInfo_New.argtypes = []
lib.MediaInfo_New.restype = ctypes.c_void_p
lib.MediaInfo_Option.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, ctypes.c_wchar_p]
lib.MediaInfo_Option.restype = ctypes.c_wchar_p
lib.MediaInfo_Inform.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
lib.MediaInfo_Inform.restype = ctypes.c_wchar_p
lib.MediaInfo_Open.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p]
lib.MediaInfo_Open.restype = ctypes.c_size_t
lib.MediaInfo_Delete.argtypes = [ctypes.c_void_p]
lib.MediaInfo_Delete.restype = None
lib.MediaInfo_Close.argtypes = [ctypes.c_void_p]
lib.MediaInfo_Close.restype = None
return lib
except OSError:
# If we've tried all possible filenames
if i == len(library_names):
raise
@classmethod
def can_parse(cls, library_file=None):
"""
Checks whether media files can be analyzed using libmediainfo.
:rtype: bool
"""
try:
cls._get_library(library_file)
return True
except:
return False
@classmethod
def parse(cls, filename, library_file=None, cover_data=False,
encoding_errors="strict", parse_speed=0.5, text=False,
full=True, legacy_stream_display=False):
"""
Analyze a media file using libmediainfo.
If libmediainfo is located in a non-standard location, the `library_file` parameter can be used:
>>> pymediainfo.MediaInfo.parse("tests/data/sample.mkv",
... library_file="/path/to/libmediainfo.dylib")
:param filename: path to the media file which will be analyzed.
A URL can also be used if libmediainfo was compiled
with CURL support.
:param str library_file: path to the libmediainfo library, this should only be used if the library cannot be auto-detected.
:param bool cover_data: whether to retrieve cover data as base64.
:param str encoding_errors: option to pass to :func:`str.encode`'s `errors`
parameter before parsing MediaInfo's XML output.
:param float parse_speed: passed to the library as `ParseSpeed`,
this option takes values between 0 and 1.
A higher value will yield more precise results in some cases
but will also increase parsing time.
:param bool text: if ``True``, MediaInfo's text output will be returned instead
of a :class:`MediaInfo` object.
:param bool full: display additional tags, including computer-readable values
for sizes and durations.
:param bool legacy_stream_display: display additional information about streams.
:type filename: str or pathlib.Path
:rtype: str if `text` is ``True``.
:rtype: :class:`MediaInfo` otherwise.
:raises FileNotFoundError: if passed a non-existent file
(Python 3.3), does not work on Windows.
:raises IOError: if passed a non-existent file (Python < 3.3),
does not work on Windows.
:raises RuntimeError: if parsing fails, this should not
happen unless libmediainfo itself fails.
"""
lib = cls._get_library(library_file)
if pathlib is not None and isinstance(filename, pathlib.PurePath):
filename = str(filename)
url = False
else:
url = urlparse.urlparse(filename)
# Try to open the file (if it's not a URL)
# Doesn't work on Windows because paths are URLs
if not (url and url.scheme):
# Test whether the file is readable
with open(filename, "rb"):
pass
# Obtain the library version
lib_version = lib.MediaInfo_Option(None, "Info_Version", "")
lib_version = tuple(int(_) for _ in re.search("^MediaInfoLib - v(\\S+)", lib_version).group(1).split("."))
# The XML option was renamed starting with version 17.10
if lib_version >= (17, 10):
xml_option = "OLDXML"
else:
xml_option = "XML"
# Cover_Data is not extracted by default since version 18.03
# See https://github.com/MediaArea/MediaInfoLib/commit/d8fd88a1c282d1c09388c55ee0b46029e7330690
if cover_data and lib_version >= (18, 3):
lib.MediaInfo_Option(None, "Cover_Data", "base64")
# Create a MediaInfo handle
handle = lib.MediaInfo_New()
lib.MediaInfo_Option(handle, "CharSet", "UTF-8")
# Fix for https://github.com/sbraz/pymediainfo/issues/22
# Python 2 does not change LC_CTYPE
# at startup: https://bugs.python.org/issue6203
if (sys.version_info < (3,) and os.name == "posix"
and locale.getlocale() == (None, None)):
locale.setlocale(locale.LC_CTYPE, locale.getdefaultlocale())
lib.MediaInfo_Option(None, "Inform", "" if text else xml_option)
lib.MediaInfo_Option(None, "Complete", "1" if full else "")
lib.MediaInfo_Option(None, "ParseSpeed", str(parse_speed))
lib.MediaInfo_Option(None, "LegacyStreamDisplay", "1" if legacy_stream_display else "")
if lib.MediaInfo_Open(handle, filename) == 0:
raise RuntimeError("An eror occured while opening {}"
" with libmediainfo".format(filename))
output = lib.MediaInfo_Inform(handle, 0)
# Delete the handle
lib.MediaInfo_Close(handle)
lib.MediaInfo_Delete(handle)
if text:
return output
else:
return cls(output, encoding_errors)
def to_data(self):
"""
Returns a dict representation of the object's :py:class:`Tracks <Track>`.
:rtype: dict
"""
data = {'tracks': []}
for track in self.tracks:
data['tracks'].append(track.to_data())
return data
def to_json(self):
"""
Returns a JSON representation of the object's :py:class:`Tracks <Track>`.
:rtype: str
"""
return json.dumps(self.to_data())

View File

@ -1,2 +0,0 @@
from .pyprobe import VideoFileParser

View File

@ -1,41 +0,0 @@
class BaseParser:
@classmethod
def parse(cls, data, rawMode, includeMissing):
"""Core of the parser classes
Collects all methods prefixed with "value_" and builds a dict of
their return values. Parser classes will inherit from this class.
All methods that begin with "value_" in a parser class will be given
the same `data` argument and are expected to pull their corresponding
value from the collection.
These methods return a tuple - their raw value and formatted value.
The raw value is a string or tuple of string and the formatted value
be of type string, int, float, or tuple.
If no data is found in a method, the raw value is expected to be None,
and for the formatted value, strings will be "null", ints will be 0,
floats will be 0.0.
Args:
data (dict): Raw video data
rawMode (bool): Returns raw values instead of formatted values
includeMissing (bool): If value is missing, return "empty" value
Returns:
dict<str, dict<str, var>>: Parsed data from class methods, may not have every value.
"""
parsers = [getattr(cls, p) for p in dir(cls) if p.startswith("value_")]
info = {}
for parser in parsers:
parsed_raw, parsed_formatted = parser(data)
if parsed_raw == None and not includeMissing:
continue
name = parser.__name__[6:]
if rawMode:
info[name] = parsed_raw
else:
info[name] = parsed_formatted
return info

Some files were not shown because too many files have changed in this diff Show More