From 69acf99781ad54d2e9faf2df87ebddd909289fa0 Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 7 Jan 2020 01:18:46 -0500 Subject: [PATCH] First commit to get the ball rolling for the conversion to PY3. What works: startup, GUI, config saves/loads, pullist recreates/populate, comictagger updated, metatagging works, rechecking files works. Everything else: not fully tested --- Mylar.py | 32 +- README.md | 4 + comictagger.py | 2 + data/interfaces/default/config.html | 2 +- .../PKG-INFO | 240 - .../SOURCES.txt | 16 - .../dependency_links.txt | 1 - .../installed-files.txt | 13 - .../top_level.txt | 2 - .../zip-safe | 1 - lib/ConcurrentLogHandler/cloghandler.py | 349 -- lib/ConcurrentLogHandler/portalocker.py | 141 - lib/ConcurrentLogHandler/stresstest.py | 287 - lib/MultipartPostHandler.py | 88 - lib/apscheduler/__init__.py | 8 - lib/apscheduler/events.py | 94 - lib/apscheduler/executors/__init__.py | 0 lib/apscheduler/executors/asyncio.py | 49 - lib/apscheduler/executors/base.py | 137 - lib/apscheduler/executors/base_py3.py | 41 - lib/apscheduler/executors/debug.py | 20 - lib/apscheduler/executors/gevent.py | 30 - lib/apscheduler/executors/pool.py | 54 - lib/apscheduler/executors/tornado.py | 54 - lib/apscheduler/executors/twisted.py | 25 - lib/apscheduler/job.py | 289 - lib/apscheduler/jobstores/__init__.py | 0 lib/apscheduler/jobstores/base.py | 143 - lib/apscheduler/jobstores/memory.py | 108 - lib/apscheduler/jobstores/mongodb.py | 141 - lib/apscheduler/jobstores/redis.py | 146 - lib/apscheduler/jobstores/rethinkdb.py | 153 - lib/apscheduler/jobstores/sqlalchemy.py | 148 - lib/apscheduler/jobstores/zookeeper.py | 179 - lib/apscheduler/schedulers/__init__.py | 12 - lib/apscheduler/schedulers/asyncio.py | 67 - lib/apscheduler/schedulers/background.py | 41 - lib/apscheduler/schedulers/base.py | 1006 ---- lib/apscheduler/schedulers/blocking.py | 33 - lib/apscheduler/schedulers/gevent.py | 35 - lib/apscheduler/schedulers/qt.py | 42 - lib/apscheduler/schedulers/tornado.py | 63 - lib/apscheduler/schedulers/twisted.py | 62 - lib/apscheduler/triggers/__init__.py | 0 lib/apscheduler/triggers/base.py | 19 - lib/apscheduler/triggers/cron/__init__.py | 206 - lib/apscheduler/triggers/cron/expressions.py | 195 - lib/apscheduler/triggers/cron/fields.py | 100 - lib/apscheduler/triggers/date.py | 51 - lib/apscheduler/triggers/interval.py | 92 - lib/apscheduler/util.py | 385 -- lib/argparse.py | 2386 -------- lib/bs4/__init__.py | 529 -- lib/bs4/builder/__init__.py | 333 -- lib/bs4/builder/_html5lib.py | 426 -- lib/bs4/builder/_htmlparser.py | 314 - lib/bs4/builder/_lxml.py | 258 - lib/bs4/dammit.py | 842 --- lib/bs4/diagnose.py | 219 - lib/bs4/element.py | 1808 ------ lib/bs4/testing.py | 770 --- lib/bs4/tests/__init__.py | 1 - lib/bs4/tests/test_builder_registry.py | 147 - lib/bs4/tests/test_docs.py | 36 - lib/bs4/tests/test_html5lib.py | 130 - lib/bs4/tests/test_htmlparser.py | 34 - lib/bs4/tests/test_lxml.py | 76 - lib/bs4/tests/test_soup.py | 501 -- lib/bs4/tests/test_tree.py | 2050 ------- lib/cfscrape/LICENSE | 21 - lib/cfscrape/__init__.py | 266 - lib/cherrypy/LICENSE.txt | 25 - lib/cherrypy/__init__.py | 652 --- lib/cherrypy/_cpchecker.py | 332 -- lib/cherrypy/_cpcompat.py | 383 -- lib/cherrypy/_cpcompat_subprocess.py | 1544 ----- lib/cherrypy/_cpconfig.py | 317 -- lib/cherrypy/_cpdispatch.py | 686 --- lib/cherrypy/_cperror.py | 609 -- lib/cherrypy/_cplogging.py | 459 -- lib/cherrypy/_cpmodpy.py | 353 -- lib/cherrypy/_cpnative_server.py | 154 - lib/cherrypy/_cpreqbody.py | 1013 ---- lib/cherrypy/_cprequest.py | 973 ---- lib/cherrypy/_cpserver.py | 226 - lib/cherrypy/_cpthreadinglocal.py | 241 - lib/cherrypy/_cptools.py | 529 -- lib/cherrypy/_cptree.py | 299 - lib/cherrypy/_cpwsgi.py | 438 -- lib/cherrypy/_cpwsgi_server.py | 70 - lib/cherrypy/cherryd | 110 - lib/cherrypy/favicon.ico | Bin 1406 -> 0 bytes lib/cherrypy/lib/__init__.py | 85 - lib/cherrypy/lib/auth.py | 97 - lib/cherrypy/lib/auth_basic.py | 90 - lib/cherrypy/lib/auth_digest.py | 390 -- lib/cherrypy/lib/caching.py | 470 -- lib/cherrypy/lib/covercp.py | 387 -- lib/cherrypy/lib/cpstats.py | 687 --- lib/cherrypy/lib/cptools.py | 630 --- lib/cherrypy/lib/encoding.py | 421 -- lib/cherrypy/lib/gctools.py | 217 - lib/cherrypy/lib/http.py | 6 - lib/cherrypy/lib/httpauth.py | 371 -- lib/cherrypy/lib/httputil.py | 536 -- lib/cherrypy/lib/jsontools.py | 96 - lib/cherrypy/lib/lockfile.py | 147 - lib/cherrypy/lib/locking.py | 47 - lib/cherrypy/lib/profiler.py | 216 - lib/cherrypy/lib/reprconf.py | 503 -- lib/cherrypy/lib/sessions.py | 968 ---- lib/cherrypy/lib/static.py | 378 -- lib/cherrypy/lib/xmlrpc.py | 49 - lib/cherrypy/lib/xmlrpcutil.py | 57 - lib/cherrypy/process/__init__.py | 14 - lib/cherrypy/process/plugins.py | 717 --- lib/cherrypy/process/servers.py | 465 -- lib/cherrypy/process/win32.py | 180 - lib/cherrypy/process/wspbus.py | 448 -- lib/cherrypy/scaffold/__init__.py | 61 - lib/cherrypy/scaffold/apache-fcgi.conf | 22 - lib/cherrypy/scaffold/example.conf | 3 - lib/cherrypy/scaffold/site.conf | 14 - .../static/made_with_cherrypy_small.png | Bin 7455 -> 0 bytes lib/cherrypy/test/__init__.py | 25 - lib/cherrypy/test/_test_decorators.py | 41 - lib/cherrypy/test/_test_states_demo.py | 66 - lib/cherrypy/test/benchmark.py | 409 -- lib/cherrypy/test/checkerdemo.py | 47 - lib/cherrypy/test/fastcgi.conf | 18 - lib/cherrypy/test/fcgi.conf | 14 - lib/cherrypy/test/helper.py | 476 -- lib/cherrypy/test/logtest.py | 181 - lib/cherrypy/test/modfastcgi.py | 135 - lib/cherrypy/test/modfcgid.py | 125 - lib/cherrypy/test/modpy.py | 163 - lib/cherrypy/test/modwsgi.py | 148 - lib/cherrypy/test/native-server.ini | 9 - lib/cherrypy/test/sessiondemo.py | 153 - lib/cherrypy/test/static/dirback.jpg | Bin 18238 -> 0 bytes lib/cherrypy/test/static/index.html | 1 - lib/cherrypy/test/style.css | 1 - lib/cherrypy/test/test.pem | 38 - lib/cherrypy/test/test_auth_basic.py | 79 - lib/cherrypy/test/test_auth_digest.py | 115 - lib/cherrypy/test/test_bus.py | 263 - lib/cherrypy/test/test_caching.py | 329 -- lib/cherrypy/test/test_config.py | 249 - lib/cherrypy/test/test_config_server.py | 121 - lib/cherrypy/test/test_conn.py | 734 --- lib/cherrypy/test/test_core.py | 617 -- .../test/test_dynamicobjectmapping.py | 403 -- lib/cherrypy/test/test_encoding.py | 363 -- lib/cherrypy/test/test_etags.py | 81 - lib/cherrypy/test/test_http.py | 168 - lib/cherrypy/test/test_httpauth.py | 151 - lib/cherrypy/test/test_httplib.py | 29 - lib/cherrypy/test/test_json.py | 79 - lib/cherrypy/test/test_logging.py | 149 - lib/cherrypy/test/test_mime.py | 128 - lib/cherrypy/test/test_misc_tools.py | 202 - lib/cherrypy/test/test_objectmapping.py | 403 -- lib/cherrypy/test/test_proxy.py | 129 - lib/cherrypy/test/test_refleaks.py | 119 - lib/cherrypy/test/test_request_obj.py | 722 --- lib/cherrypy/test/test_routes.py | 69 - lib/cherrypy/test/test_session.py | 464 -- lib/cherrypy/test/test_sessionauthenticate.py | 62 - lib/cherrypy/test/test_states.py | 436 -- lib/cherrypy/test/test_static.py | 300 - lib/cherrypy/test/test_tools.py | 393 -- lib/cherrypy/test/test_tutorials.py | 201 - lib/cherrypy/test/test_virtualhost.py | 107 - lib/cherrypy/test/test_wsgi_ns.py | 80 - lib/cherrypy/test/test_wsgi_vhost.py | 36 - lib/cherrypy/test/test_wsgiapps.py | 111 - lib/cherrypy/test/test_xmlrpc.py | 172 - lib/cherrypy/test/webtest.py | 535 -- lib/cherrypy/wsgiserver/__init__.py | 14 - lib/cherrypy/wsgiserver/ssl_builtin.py | 92 - lib/cherrypy/wsgiserver/ssl_pyopenssl.py | 253 - lib/cherrypy/wsgiserver/wsgiserver2.py | 2481 -------- lib/cherrypy/wsgiserver/wsgiserver3.py | 2176 ------- lib/comictaggerlib/autotagmatchwindow.py | 72 +- lib/comictaggerlib/autotagprogresswindow.py | 14 +- lib/comictaggerlib/autotagstartwindow.py | 10 +- lib/comictaggerlib/cli.py | 164 +- lib/comictaggerlib/comicapi/comet.py | 20 +- lib/comictaggerlib/comicapi/comicarchive.py | 199 +- lib/comictaggerlib/comicapi/comicbookinfo.py | 8 +- lib/comictaggerlib/comicapi/comicinfoxml.py | 9 +- lib/comictaggerlib/comicapi/filenameparser.py | 2 +- .../comicapi/genericmetadata.py | 6 +- lib/comictaggerlib/comicapi/issuestring.py | 4 +- lib/comictaggerlib/comicapi/utils.py | 16 +- lib/comictaggerlib/comicvinecacher.py | 24 +- lib/comictaggerlib/comicvinetalker.py | 193 +- lib/comictaggerlib/coverimagewidget.py | 19 +- lib/comictaggerlib/crediteditorwindow.py | 10 +- lib/comictaggerlib/ctversion.py | 9 +- lib/comictaggerlib/exportwindow.py | 8 +- lib/comictaggerlib/filerenamer.py | 16 +- lib/comictaggerlib/fileselectionlist.py | 79 +- lib/comictaggerlib/graphics/autotag.png | Bin 15543 -> 13289 bytes lib/comictaggerlib/graphics/parse.png | Bin 6702 -> 4300 bytes lib/comictaggerlib/imagefetcher.py | 17 +- lib/comictaggerlib/imagehasher.py | 20 +- lib/comictaggerlib/imagepopup.py | 17 +- lib/comictaggerlib/issueidentifier.py | 58 +- lib/comictaggerlib/issueselectionwindow.py | 44 +- lib/comictaggerlib/logwindow.py | 10 +- lib/comictaggerlib/main.py | 63 +- lib/comictaggerlib/matchselectionwindow.py | 38 +- lib/comictaggerlib/optionalmsgdialog.py | 7 +- lib/comictaggerlib/options.py | 33 +- lib/comictaggerlib/pagebrowser.py | 12 +- lib/comictaggerlib/pagelisteditor.py | 27 +- lib/comictaggerlib/pageloader.py | 4 +- lib/comictaggerlib/progresswindow.py | 8 +- lib/comictaggerlib/renamewindow.py | 39 +- lib/comictaggerlib/settings.py | 518 +- lib/comictaggerlib/settingswindow.py | 104 +- lib/comictaggerlib/taggerwindow.py | 387 +- lib/comictaggerlib/ui/qtutils.py | 13 +- lib/comictaggerlib/ui/settingswindow.ui | 219 +- lib/comictaggerlib/versionchecker.py | 10 +- lib/comictaggerlib/volumeselectionwindow.py | 80 +- lib/concurrent/LICENSE | 48 - lib/concurrent/PKG-INFO | 16 - lib/concurrent/__init__.py | 3 - lib/concurrent/futures/__init__.py | 23 - lib/concurrent/futures/_base.py | 631 --- lib/concurrent/futures/process.py | 363 -- lib/concurrent/futures/thread.py | 149 - lib/configobj.py | 10 +- lib/feedparser.py | 3909 ------------- lib/funcsigs/__init__.py | 829 --- lib/funcsigs/version.py | 1 - lib/get_image_size.py | 389 -- lib/httplib2/__init__.py | 1630 ------ lib/httplib2/iri2uri.py | 110 - lib/httplib2/socks.py | 438 -- lib/js2py/LICENSE.md | 19 - lib/js2py/__init__.py | 69 - lib/js2py/base.py | 1937 ------- lib/js2py/constructors/__init__.py | 1 - lib/js2py/constructors/jsarray.py | 38 - lib/js2py/constructors/jsboolean.py | 11 - lib/js2py/constructors/jsdate.py | 362 -- lib/js2py/constructors/jsfunction.py | 49 - lib/js2py/constructors/jsmath.py | 151 - lib/js2py/constructors/jsnumber.py | 18 - lib/js2py/constructors/jsobject.py | 172 - lib/js2py/constructors/jsregexp.py | 11 - lib/js2py/constructors/jsstring.py | 30 - lib/js2py/constructors/time_helpers.py | 183 - lib/js2py/evaljs.py | 250 - lib/js2py/host/__init__.py | 0 lib/js2py/host/console.py | 11 - lib/js2py/host/dom/__init__.py | 0 lib/js2py/host/dom/constants.py | 47 - lib/js2py/host/dom/interface.py | 73 - lib/js2py/host/jseval.py | 50 - lib/js2py/host/jsfunctions.py | 85 - lib/js2py/legecy_translators/__init__.py | 1 - lib/js2py/legecy_translators/constants.py | 294 - lib/js2py/legecy_translators/exps.py | 79 - lib/js2py/legecy_translators/flow.py | 456 -- lib/js2py/legecy_translators/functions.py | 84 - lib/js2py/legecy_translators/jsparser.py | 307 - lib/js2py/legecy_translators/nodevisitor.py | 500 -- lib/js2py/legecy_translators/nparser.py | 2891 ---------- lib/js2py/legecy_translators/objects.py | 287 - lib/js2py/legecy_translators/tokenize.py | 4 - lib/js2py/legecy_translators/translator.py | 143 - lib/js2py/legecy_translators/utils.py | 80 - lib/js2py/prototypes/__init__.py | 1 - lib/js2py/prototypes/jsarray.py | 458 -- lib/js2py/prototypes/jsboolean.py | 16 - lib/js2py/prototypes/jserror.py | 10 - lib/js2py/prototypes/jsfunction.py | 53 - lib/js2py/prototypes/jsjson.py | 210 - lib/js2py/prototypes/jsnumber.py | 100 - lib/js2py/prototypes/jsobject.py | 36 - lib/js2py/prototypes/jsregexp.py | 43 - lib/js2py/prototypes/jsstring.py | 307 - lib/js2py/pyjs.py | 51 - lib/js2py/todo | 18 - lib/js2py/translators/__init__.py | 38 - lib/js2py/translators/friendly_nodes.py | 327 -- lib/js2py/translators/jsregexps.py | 219 - lib/js2py/translators/markdown.js | 1293 ----- lib/js2py/translators/pyjsparser.py | 2888 ---------- lib/js2py/translators/pyjsparserdata.py | 297 - lib/js2py/translators/std_nodes.py | 531 -- lib/js2py/translators/translating_nodes.py | 641 --- lib/js2py/translators/translator.py | 174 - lib/js2py/utils/__init__.py | 0 lib/js2py/utils/definitions.py | 80 - lib/js2py/utils/injector.py | 184 - lib/mako/__init__.py | 8 - lib/mako/_ast_util.py | 851 --- lib/mako/ast.py | 191 - lib/mako/cache.py | 240 - lib/mako/cmd.py | 67 - lib/mako/codegen.py | 1255 ---- lib/mako/compat.py | 201 - lib/mako/exceptions.py | 394 -- lib/mako/ext/__init__.py | 0 lib/mako/ext/autohandler.py | 68 - lib/mako/ext/babelplugin.py | 50 - lib/mako/ext/beaker_cache.py | 76 - lib/mako/ext/extract.py | 108 - lib/mako/ext/linguaplugin.py | 43 - lib/mako/ext/preprocessors.py | 20 - lib/mako/ext/pygmentplugin.py | 127 - lib/mako/ext/turbogears.py | 58 - lib/mako/filters.py | 209 - lib/mako/lexer.py | 449 -- lib/mako/lookup.py | 369 -- lib/mako/parsetree.py | 616 -- lib/mako/pygen.py | 303 - lib/mako/pyparser.py | 233 - lib/mako/runtime.py | 918 --- lib/mako/template.py | 746 --- lib/mako/util.py | 382 -- lib/markupsafe/__init__.py | 298 - lib/markupsafe/_compat.py | 26 - lib/markupsafe/_constants.py | 267 - lib/markupsafe/_native.py | 46 - lib/markupsafe/_speedups.c | 239 - lib/markupsafe/tests.py | 179 - lib/pytz/LICENSE.txt | 19 - lib/pytz/README.txt | 575 -- lib/pytz/__init__.py | 1513 ----- lib/pytz/exceptions.py | 48 - lib/pytz/lazy.py | 168 - lib/pytz/reference.py | 127 - lib/pytz/tzfile.py | 137 - lib/pytz/tzinfo.py | 564 -- lib/pytz/zoneinfo/Africa/Abidjan | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Accra | Bin 840 -> 0 bytes lib/pytz/zoneinfo/Africa/Addis_Ababa | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Africa/Algiers | Bin 760 -> 0 bytes lib/pytz/zoneinfo/Africa/Asmara | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Africa/Asmera | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Africa/Bamako | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Bangui | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Banjul | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Bissau | Bin 208 -> 0 bytes lib/pytz/zoneinfo/Africa/Blantyre | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Brazzaville | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Bujumbura | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Cairo | Bin 2779 -> 0 bytes lib/pytz/zoneinfo/Africa/Casablanca | Bin 1657 -> 0 bytes lib/pytz/zoneinfo/Africa/Ceuta | Bin 2075 -> 0 bytes lib/pytz/zoneinfo/Africa/Conakry | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Dakar | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Dar_es_Salaam | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Africa/Djibouti | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Africa/Douala | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/El_Aaiun | Bin 1487 -> 0 bytes lib/pytz/zoneinfo/Africa/Freetown | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Gaborone | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Harare | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Johannesburg | Bin 271 -> 0 bytes lib/pytz/zoneinfo/Africa/Juba | Bin 683 -> 0 bytes lib/pytz/zoneinfo/Africa/Kampala | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Africa/Khartoum | Bin 683 -> 0 bytes lib/pytz/zoneinfo/Africa/Kigali | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Kinshasa | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Lagos | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Libreville | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Lome | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Luanda | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Lubumbashi | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Lusaka | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Malabo | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Maputo | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Maseru | Bin 271 -> 0 bytes lib/pytz/zoneinfo/Africa/Mbabane | Bin 271 -> 0 bytes lib/pytz/zoneinfo/Africa/Mogadishu | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Africa/Monrovia | Bin 241 -> 0 bytes lib/pytz/zoneinfo/Africa/Nairobi | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Africa/Ndjamena | Bin 225 -> 0 bytes lib/pytz/zoneinfo/Africa/Niamey | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Nouakchott | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Ouagadougou | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Porto-Novo | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Africa/Sao_Tome | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Timbuktu | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Africa/Tripoli | Bin 655 -> 0 bytes lib/pytz/zoneinfo/Africa/Tunis | Bin 710 -> 0 bytes lib/pytz/zoneinfo/Africa/Windhoek | Bin 1582 -> 0 bytes lib/pytz/zoneinfo/America/Adak | Bin 2379 -> 0 bytes lib/pytz/zoneinfo/America/Anchorage | Bin 2384 -> 0 bytes lib/pytz/zoneinfo/America/Anguilla | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/Antigua | Bin 208 -> 0 bytes lib/pytz/zoneinfo/America/Araguaina | Bin 896 -> 0 bytes .../zoneinfo/America/Argentina/Buenos_Aires | Bin 1087 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Catamarca | Bin 1129 -> 0 bytes .../zoneinfo/America/Argentina/ComodRivadavia | Bin 1129 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Cordoba | Bin 1129 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Jujuy | Bin 1145 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/La_Rioja | Bin 1143 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Mendoza | Bin 1173 -> 0 bytes .../zoneinfo/America/Argentina/Rio_Gallegos | Bin 1129 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Salta | Bin 1101 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/San_Juan | Bin 1143 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/San_Luis | Bin 1171 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Tucuman | Bin 1157 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Ushuaia | Bin 1129 -> 0 bytes lib/pytz/zoneinfo/America/Aruba | Bin 208 -> 0 bytes lib/pytz/zoneinfo/America/Asuncion | Bin 2062 -> 0 bytes lib/pytz/zoneinfo/America/Atikokan | Bin 345 -> 0 bytes lib/pytz/zoneinfo/America/Atka | Bin 2379 -> 0 bytes lib/pytz/zoneinfo/America/Bahia | Bin 1036 -> 0 bytes lib/pytz/zoneinfo/America/Bahia_Banderas | Bin 1588 -> 0 bytes lib/pytz/zoneinfo/America/Barbados | Bin 344 -> 0 bytes lib/pytz/zoneinfo/America/Belem | Bin 588 -> 0 bytes lib/pytz/zoneinfo/America/Belize | Bin 976 -> 0 bytes lib/pytz/zoneinfo/America/Blanc-Sablon | Bin 307 -> 0 bytes lib/pytz/zoneinfo/America/Boa_Vista | Bin 644 -> 0 bytes lib/pytz/zoneinfo/America/Bogota | Bin 257 -> 0 bytes lib/pytz/zoneinfo/America/Boise | Bin 2403 -> 0 bytes lib/pytz/zoneinfo/America/Buenos_Aires | Bin 1087 -> 0 bytes lib/pytz/zoneinfo/America/Cambridge_Bay | Bin 2098 -> 0 bytes lib/pytz/zoneinfo/America/Campo_Grande | Bin 2015 -> 0 bytes lib/pytz/zoneinfo/America/Cancun | Bin 1480 -> 0 bytes lib/pytz/zoneinfo/America/Caracas | Bin 266 -> 0 bytes lib/pytz/zoneinfo/America/Catamarca | Bin 1129 -> 0 bytes lib/pytz/zoneinfo/America/Cayenne | Bin 200 -> 0 bytes lib/pytz/zoneinfo/America/Cayman | Bin 203 -> 0 bytes lib/pytz/zoneinfo/America/Chicago | Bin 3585 -> 0 bytes lib/pytz/zoneinfo/America/Chihuahua | Bin 1522 -> 0 bytes lib/pytz/zoneinfo/America/Coral_Harbour | Bin 345 -> 0 bytes lib/pytz/zoneinfo/America/Cordoba | Bin 1129 -> 0 bytes lib/pytz/zoneinfo/America/Costa_Rica | Bin 341 -> 0 bytes lib/pytz/zoneinfo/America/Creston | Bin 233 -> 0 bytes lib/pytz/zoneinfo/America/Cuiaba | Bin 1987 -> 0 bytes lib/pytz/zoneinfo/America/Curacao | Bin 208 -> 0 bytes lib/pytz/zoneinfo/America/Danmarkshavn | Bin 714 -> 0 bytes lib/pytz/zoneinfo/America/Dawson | Bin 2093 -> 0 bytes lib/pytz/zoneinfo/America/Dawson_Creek | Bin 1059 -> 0 bytes lib/pytz/zoneinfo/America/Denver | Bin 2453 -> 0 bytes lib/pytz/zoneinfo/America/Detroit | Bin 2216 -> 0 bytes lib/pytz/zoneinfo/America/Dominica | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/Edmonton | Bin 2402 -> 0 bytes lib/pytz/zoneinfo/America/Eirunepe | Bin 684 -> 0 bytes lib/pytz/zoneinfo/America/El_Salvador | Bin 250 -> 0 bytes lib/pytz/zoneinfo/America/Ensenada | Bin 2356 -> 0 bytes lib/pytz/zoneinfo/America/Fort_Wayne | Bin 1675 -> 0 bytes lib/pytz/zoneinfo/America/Fortaleza | Bin 728 -> 0 bytes lib/pytz/zoneinfo/America/Glace_Bay | Bin 2206 -> 0 bytes lib/pytz/zoneinfo/America/Godthab | Bin 1877 -> 0 bytes lib/pytz/zoneinfo/America/Goose_Bay | Bin 3219 -> 0 bytes lib/pytz/zoneinfo/America/Grand_Turk | Bin 1287 -> 0 bytes lib/pytz/zoneinfo/America/Grenada | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/Guadeloupe | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/Guatemala | Bin 306 -> 0 bytes lib/pytz/zoneinfo/America/Guayaquil | Bin 203 -> 0 bytes lib/pytz/zoneinfo/America/Guyana | Bin 270 -> 0 bytes lib/pytz/zoneinfo/America/Halifax | Bin 3438 -> 0 bytes lib/pytz/zoneinfo/America/Havana | Bin 2437 -> 0 bytes lib/pytz/zoneinfo/America/Hermosillo | Bin 454 -> 0 bytes .../zoneinfo/America/Indiana/Indianapolis | Bin 1675 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Knox | Bin 2437 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Marengo | Bin 1731 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Petersburg | Bin 1913 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Tell_City | Bin 1735 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Vevay | Bin 1423 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Vincennes | Bin 1703 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Winamac | Bin 1787 -> 0 bytes lib/pytz/zoneinfo/America/Indianapolis | Bin 1675 -> 0 bytes lib/pytz/zoneinfo/America/Inuvik | Bin 1928 -> 0 bytes lib/pytz/zoneinfo/America/Iqaluit | Bin 2046 -> 0 bytes lib/pytz/zoneinfo/America/Jamaica | Bin 507 -> 0 bytes lib/pytz/zoneinfo/America/Jujuy | Bin 1145 -> 0 bytes lib/pytz/zoneinfo/America/Juneau | Bin 2362 -> 0 bytes lib/pytz/zoneinfo/America/Kentucky/Louisville | Bin 2781 -> 0 bytes lib/pytz/zoneinfo/America/Kentucky/Monticello | Bin 2361 -> 0 bytes lib/pytz/zoneinfo/America/Knox_IN | Bin 2437 -> 0 bytes lib/pytz/zoneinfo/America/Kralendijk | Bin 208 -> 0 bytes lib/pytz/zoneinfo/America/La_Paz | Bin 243 -> 0 bytes lib/pytz/zoneinfo/America/Lima | Bin 417 -> 0 bytes lib/pytz/zoneinfo/America/Los_Angeles | Bin 2845 -> 0 bytes lib/pytz/zoneinfo/America/Louisville | Bin 2781 -> 0 bytes lib/pytz/zoneinfo/America/Lower_Princes | Bin 208 -> 0 bytes lib/pytz/zoneinfo/America/Maceio | Bin 756 -> 0 bytes lib/pytz/zoneinfo/America/Managua | Bin 463 -> 0 bytes lib/pytz/zoneinfo/America/Manaus | Bin 616 -> 0 bytes lib/pytz/zoneinfo/America/Marigot | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/Martinique | Bin 257 -> 0 bytes lib/pytz/zoneinfo/America/Matamoros | Bin 1416 -> 0 bytes lib/pytz/zoneinfo/America/Mazatlan | Bin 1564 -> 0 bytes lib/pytz/zoneinfo/America/Mendoza | Bin 1173 -> 0 bytes lib/pytz/zoneinfo/America/Menominee | Bin 2283 -> 0 bytes lib/pytz/zoneinfo/America/Merida | Bin 1456 -> 0 bytes lib/pytz/zoneinfo/America/Metlakatla | Bin 716 -> 0 bytes lib/pytz/zoneinfo/America/Mexico_City | Bin 1618 -> 0 bytes lib/pytz/zoneinfo/America/Miquelon | Bin 1684 -> 0 bytes lib/pytz/zoneinfo/America/Moncton | Bin 3163 -> 0 bytes lib/pytz/zoneinfo/America/Monterrey | Bin 1416 -> 0 bytes lib/pytz/zoneinfo/America/Montevideo | Bin 2160 -> 0 bytes lib/pytz/zoneinfo/America/Montreal | Bin 3503 -> 0 bytes lib/pytz/zoneinfo/America/Montserrat | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/Nassau | Bin 2284 -> 0 bytes lib/pytz/zoneinfo/America/New_York | Bin 3545 -> 0 bytes lib/pytz/zoneinfo/America/Nipigon | Bin 2131 -> 0 bytes lib/pytz/zoneinfo/America/Nome | Bin 2376 -> 0 bytes lib/pytz/zoneinfo/America/Noronha | Bin 728 -> 0 bytes lib/pytz/zoneinfo/America/North_Dakota/Beulah | Bin 2389 -> 0 bytes lib/pytz/zoneinfo/America/North_Dakota/Center | Bin 2389 -> 0 bytes .../zoneinfo/America/North_Dakota/New_Salem | Bin 2389 -> 0 bytes lib/pytz/zoneinfo/America/Ojinaga | Bin 1522 -> 0 bytes lib/pytz/zoneinfo/America/Panama | Bin 203 -> 0 bytes lib/pytz/zoneinfo/America/Pangnirtung | Bin 2108 -> 0 bytes lib/pytz/zoneinfo/America/Paramaribo | Bin 308 -> 0 bytes lib/pytz/zoneinfo/America/Phoenix | Bin 353 -> 0 bytes lib/pytz/zoneinfo/America/Port-au-Prince | Bin 1483 -> 0 bytes lib/pytz/zoneinfo/America/Port_of_Spain | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/Porto_Acre | Bin 656 -> 0 bytes lib/pytz/zoneinfo/America/Porto_Velho | Bin 588 -> 0 bytes lib/pytz/zoneinfo/America/Puerto_Rico | Bin 255 -> 0 bytes lib/pytz/zoneinfo/America/Rainy_River | Bin 2131 -> 0 bytes lib/pytz/zoneinfo/America/Rankin_Inlet | Bin 1930 -> 0 bytes lib/pytz/zoneinfo/America/Recife | Bin 728 -> 0 bytes lib/pytz/zoneinfo/America/Regina | Bin 994 -> 0 bytes lib/pytz/zoneinfo/America/Resolute | Bin 1930 -> 0 bytes lib/pytz/zoneinfo/America/Rio_Branco | Bin 656 -> 0 bytes lib/pytz/zoneinfo/America/Rosario | Bin 1129 -> 0 bytes lib/pytz/zoneinfo/America/Santa_Isabel | Bin 2356 -> 0 bytes lib/pytz/zoneinfo/America/Santarem | Bin 626 -> 0 bytes lib/pytz/zoneinfo/America/Santiago | Bin 2531 -> 0 bytes lib/pytz/zoneinfo/America/Santo_Domingo | Bin 489 -> 0 bytes lib/pytz/zoneinfo/America/Sao_Paulo | Bin 2015 -> 0 bytes lib/pytz/zoneinfo/America/Scoresbysund | Bin 1925 -> 0 bytes lib/pytz/zoneinfo/America/Shiprock | Bin 2453 -> 0 bytes lib/pytz/zoneinfo/America/Sitka | Bin 2350 -> 0 bytes lib/pytz/zoneinfo/America/St_Barthelemy | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/St_Johns | Bin 3664 -> 0 bytes lib/pytz/zoneinfo/America/St_Kitts | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/St_Lucia | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/St_Thomas | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/St_Vincent | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/Swift_Current | Bin 574 -> 0 bytes lib/pytz/zoneinfo/America/Tegucigalpa | Bin 278 -> 0 bytes lib/pytz/zoneinfo/America/Thule | Bin 1528 -> 0 bytes lib/pytz/zoneinfo/America/Thunder_Bay | Bin 2211 -> 0 bytes lib/pytz/zoneinfo/America/Tijuana | Bin 2356 -> 0 bytes lib/pytz/zoneinfo/America/Toronto | Bin 3503 -> 0 bytes lib/pytz/zoneinfo/America/Tortola | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/Vancouver | Bin 2901 -> 0 bytes lib/pytz/zoneinfo/America/Virgin | Bin 170 -> 0 bytes lib/pytz/zoneinfo/America/Whitehorse | Bin 2093 -> 0 bytes lib/pytz/zoneinfo/America/Winnipeg | Bin 2891 -> 0 bytes lib/pytz/zoneinfo/America/Yakutat | Bin 2314 -> 0 bytes lib/pytz/zoneinfo/America/Yellowknife | Bin 1980 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Casey | Bin 272 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Davis | Bin 290 -> 0 bytes lib/pytz/zoneinfo/Antarctica/DumontDUrville | Bin 227 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Macquarie | Bin 1530 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Mawson | Bin 204 -> 0 bytes lib/pytz/zoneinfo/Antarctica/McMurdo | Bin 2460 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Palmer | Bin 2054 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Rothera | Bin 173 -> 0 bytes lib/pytz/zoneinfo/Antarctica/South_Pole | Bin 2460 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Syowa | Bin 174 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Troll | Bin 1161 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Vostok | Bin 174 -> 0 bytes lib/pytz/zoneinfo/Arctic/Longyearbyen | Bin 2251 -> 0 bytes lib/pytz/zoneinfo/Asia/Aden | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Asia/Almaty | Bin 936 -> 0 bytes lib/pytz/zoneinfo/Asia/Amman | Bin 1877 -> 0 bytes lib/pytz/zoneinfo/Asia/Anadyr | Bin 1197 -> 0 bytes lib/pytz/zoneinfo/Asia/Aqtau | Bin 1142 -> 0 bytes lib/pytz/zoneinfo/Asia/Aqtobe | Bin 1052 -> 0 bytes lib/pytz/zoneinfo/Asia/Ashgabat | Bin 671 -> 0 bytes lib/pytz/zoneinfo/Asia/Ashkhabad | Bin 671 -> 0 bytes lib/pytz/zoneinfo/Asia/Baghdad | Bin 988 -> 0 bytes lib/pytz/zoneinfo/Asia/Bahrain | Bin 209 -> 0 bytes lib/pytz/zoneinfo/Asia/Baku | Bin 1956 -> 0 bytes lib/pytz/zoneinfo/Asia/Bangkok | Bin 204 -> 0 bytes lib/pytz/zoneinfo/Asia/Beirut | Bin 2175 -> 0 bytes lib/pytz/zoneinfo/Asia/Bishkek | Bin 1061 -> 0 bytes lib/pytz/zoneinfo/Asia/Brunei | Bin 201 -> 0 bytes lib/pytz/zoneinfo/Asia/Calcutta | Bin 291 -> 0 bytes lib/pytz/zoneinfo/Asia/Chita | Bin 1236 -> 0 bytes lib/pytz/zoneinfo/Asia/Choibalsan | Bin 904 -> 0 bytes lib/pytz/zoneinfo/Asia/Chongqing | Bin 414 -> 0 bytes lib/pytz/zoneinfo/Asia/Chungking | Bin 414 -> 0 bytes lib/pytz/zoneinfo/Asia/Colombo | Bin 389 -> 0 bytes lib/pytz/zoneinfo/Asia/Dacca | Bin 390 -> 0 bytes lib/pytz/zoneinfo/Asia/Damascus | Bin 2320 -> 0 bytes lib/pytz/zoneinfo/Asia/Dhaka | Bin 390 -> 0 bytes lib/pytz/zoneinfo/Asia/Dili | Bin 309 -> 0 bytes lib/pytz/zoneinfo/Asia/Dubai | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Asia/Dushanbe | Bin 611 -> 0 bytes lib/pytz/zoneinfo/Asia/Gaza | Bin 2313 -> 0 bytes lib/pytz/zoneinfo/Asia/Harbin | Bin 414 -> 0 bytes lib/pytz/zoneinfo/Asia/Hebron | Bin 2341 -> 0 bytes lib/pytz/zoneinfo/Asia/Ho_Chi_Minh | Bin 373 -> 0 bytes lib/pytz/zoneinfo/Asia/Hong_Kong | Bin 1189 -> 0 bytes lib/pytz/zoneinfo/Asia/Hovd | Bin 848 -> 0 bytes lib/pytz/zoneinfo/Asia/Irkutsk | Bin 1259 -> 0 bytes lib/pytz/zoneinfo/Asia/Istanbul | Bin 2747 -> 0 bytes lib/pytz/zoneinfo/Asia/Jakarta | Bin 370 -> 0 bytes lib/pytz/zoneinfo/Asia/Jayapura | Bin 241 -> 0 bytes lib/pytz/zoneinfo/Asia/Jerusalem | Bin 2265 -> 0 bytes lib/pytz/zoneinfo/Asia/Kabul | Bin 199 -> 0 bytes lib/pytz/zoneinfo/Asia/Kamchatka | Bin 1181 -> 0 bytes lib/pytz/zoneinfo/Asia/Karachi | Bin 403 -> 0 bytes lib/pytz/zoneinfo/Asia/Kashgar | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Asia/Kathmandu | Bin 212 -> 0 bytes lib/pytz/zoneinfo/Asia/Katmandu | Bin 212 -> 0 bytes lib/pytz/zoneinfo/Asia/Khandyga | Bin 1324 -> 0 bytes lib/pytz/zoneinfo/Asia/Kolkata | Bin 291 -> 0 bytes lib/pytz/zoneinfo/Asia/Krasnoyarsk | Bin 1226 -> 0 bytes lib/pytz/zoneinfo/Asia/Kuala_Lumpur | Bin 398 -> 0 bytes lib/pytz/zoneinfo/Asia/Kuching | Bin 519 -> 0 bytes lib/pytz/zoneinfo/Asia/Kuwait | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Asia/Macao | Bin 795 -> 0 bytes lib/pytz/zoneinfo/Asia/Macau | Bin 795 -> 0 bytes lib/pytz/zoneinfo/Asia/Magadan | Bin 1227 -> 0 bytes lib/pytz/zoneinfo/Asia/Makassar | Bin 280 -> 0 bytes lib/pytz/zoneinfo/Asia/Manila | Bin 361 -> 0 bytes lib/pytz/zoneinfo/Asia/Muscat | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Asia/Nicosia | Bin 2016 -> 0 bytes lib/pytz/zoneinfo/Asia/Novokuznetsk | Bin 1248 -> 0 bytes lib/pytz/zoneinfo/Asia/Novosibirsk | Bin 1208 -> 0 bytes lib/pytz/zoneinfo/Asia/Omsk | Bin 1226 -> 0 bytes lib/pytz/zoneinfo/Asia/Oral | Bin 1100 -> 0 bytes lib/pytz/zoneinfo/Asia/Phnom_Penh | Bin 204 -> 0 bytes lib/pytz/zoneinfo/Asia/Pontianak | Bin 375 -> 0 bytes lib/pytz/zoneinfo/Asia/Pyongyang | Bin 279 -> 0 bytes lib/pytz/zoneinfo/Asia/Qatar | Bin 209 -> 0 bytes lib/pytz/zoneinfo/Asia/Qyzylorda | Bin 1082 -> 0 bytes lib/pytz/zoneinfo/Asia/Rangoon | Bin 285 -> 0 bytes lib/pytz/zoneinfo/Asia/Riyadh | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Asia/Saigon | Bin 373 -> 0 bytes lib/pytz/zoneinfo/Asia/Sakhalin | Bin 1227 -> 0 bytes lib/pytz/zoneinfo/Asia/Samarkand | Bin 691 -> 0 bytes lib/pytz/zoneinfo/Asia/Seoul | Bin 571 -> 0 bytes lib/pytz/zoneinfo/Asia/Shanghai | Bin 414 -> 0 bytes lib/pytz/zoneinfo/Asia/Singapore | Bin 428 -> 0 bytes lib/pytz/zoneinfo/Asia/Srednekolymsk | Bin 1237 -> 0 bytes lib/pytz/zoneinfo/Asia/Taipei | Bin 800 -> 0 bytes lib/pytz/zoneinfo/Asia/Tashkent | Bin 681 -> 0 bytes lib/pytz/zoneinfo/Asia/Tbilisi | Bin 1142 -> 0 bytes lib/pytz/zoneinfo/Asia/Tehran | Bin 1661 -> 0 bytes lib/pytz/zoneinfo/Asia/Tel_Aviv | Bin 2265 -> 0 bytes lib/pytz/zoneinfo/Asia/Thimbu | Bin 209 -> 0 bytes lib/pytz/zoneinfo/Asia/Thimphu | Bin 209 -> 0 bytes lib/pytz/zoneinfo/Asia/Tokyo | Bin 355 -> 0 bytes lib/pytz/zoneinfo/Asia/Ujung_Pandang | Bin 280 -> 0 bytes lib/pytz/zoneinfo/Asia/Ulaanbaatar | Bin 848 -> 0 bytes lib/pytz/zoneinfo/Asia/Ulan_Bator | Bin 848 -> 0 bytes lib/pytz/zoneinfo/Asia/Urumqi | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Asia/Ust-Nera | Bin 1293 -> 0 bytes lib/pytz/zoneinfo/Asia/Vientiane | Bin 204 -> 0 bytes lib/pytz/zoneinfo/Asia/Vladivostok | Bin 1227 -> 0 bytes lib/pytz/zoneinfo/Asia/Yakutsk | Bin 1226 -> 0 bytes lib/pytz/zoneinfo/Asia/Yekaterinburg | Bin 1334 -> 0 bytes lib/pytz/zoneinfo/Asia/Yerevan | Bin 1277 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Azores | Bin 3488 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Bermuda | Bin 2004 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Canary | Bin 1913 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Cape_Verde | Bin 254 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Faeroe | Bin 1829 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Faroe | Bin 1829 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Jan_Mayen | Bin 2251 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Madeira | Bin 3478 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Reykjavik | Bin 1167 -> 0 bytes lib/pytz/zoneinfo/Atlantic/South_Georgia | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Atlantic/St_Helena | Bin 170 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Stanley | Bin 1246 -> 0 bytes lib/pytz/zoneinfo/Australia/ACT | Bin 2223 -> 0 bytes lib/pytz/zoneinfo/Australia/Adelaide | Bin 2238 -> 0 bytes lib/pytz/zoneinfo/Australia/Brisbane | Bin 452 -> 0 bytes lib/pytz/zoneinfo/Australia/Broken_Hill | Bin 2274 -> 0 bytes lib/pytz/zoneinfo/Australia/Canberra | Bin 2223 -> 0 bytes lib/pytz/zoneinfo/Australia/Currie | Bin 2223 -> 0 bytes lib/pytz/zoneinfo/Australia/Darwin | Bin 323 -> 0 bytes lib/pytz/zoneinfo/Australia/Eucla | Bin 487 -> 0 bytes lib/pytz/zoneinfo/Australia/Hobart | Bin 2335 -> 0 bytes lib/pytz/zoneinfo/Australia/LHI | Bin 1859 -> 0 bytes lib/pytz/zoneinfo/Australia/Lindeman | Bin 522 -> 0 bytes lib/pytz/zoneinfo/Australia/Lord_Howe | Bin 1859 -> 0 bytes lib/pytz/zoneinfo/Australia/Melbourne | Bin 2223 -> 0 bytes lib/pytz/zoneinfo/Australia/NSW | Bin 2223 -> 0 bytes lib/pytz/zoneinfo/Australia/North | Bin 323 -> 0 bytes lib/pytz/zoneinfo/Australia/Perth | Bin 479 -> 0 bytes lib/pytz/zoneinfo/Australia/Queensland | Bin 452 -> 0 bytes lib/pytz/zoneinfo/Australia/South | Bin 2238 -> 0 bytes lib/pytz/zoneinfo/Australia/Sydney | Bin 2223 -> 0 bytes lib/pytz/zoneinfo/Australia/Tasmania | Bin 2335 -> 0 bytes lib/pytz/zoneinfo/Australia/Victoria | Bin 2223 -> 0 bytes lib/pytz/zoneinfo/Australia/West | Bin 479 -> 0 bytes lib/pytz/zoneinfo/Australia/Yancowinna | Bin 2274 -> 0 bytes lib/pytz/zoneinfo/Brazil/Acre | Bin 656 -> 0 bytes lib/pytz/zoneinfo/Brazil/DeNoronha | Bin 728 -> 0 bytes lib/pytz/zoneinfo/Brazil/East | Bin 2015 -> 0 bytes lib/pytz/zoneinfo/Brazil/West | Bin 616 -> 0 bytes lib/pytz/zoneinfo/CET | Bin 2102 -> 0 bytes lib/pytz/zoneinfo/CST6CDT | Bin 2294 -> 0 bytes lib/pytz/zoneinfo/Canada/Atlantic | Bin 3438 -> 0 bytes lib/pytz/zoneinfo/Canada/Central | Bin 2891 -> 0 bytes lib/pytz/zoneinfo/Canada/East-Saskatchewan | Bin 994 -> 0 bytes lib/pytz/zoneinfo/Canada/Eastern | Bin 3503 -> 0 bytes lib/pytz/zoneinfo/Canada/Mountain | Bin 2402 -> 0 bytes lib/pytz/zoneinfo/Canada/Newfoundland | Bin 3664 -> 0 bytes lib/pytz/zoneinfo/Canada/Pacific | Bin 2901 -> 0 bytes lib/pytz/zoneinfo/Canada/Saskatchewan | Bin 994 -> 0 bytes lib/pytz/zoneinfo/Canada/Yukon | Bin 2093 -> 0 bytes lib/pytz/zoneinfo/Chile/Continental | Bin 2531 -> 0 bytes lib/pytz/zoneinfo/Chile/EasterIsland | Bin 2295 -> 0 bytes lib/pytz/zoneinfo/Cuba | Bin 2437 -> 0 bytes lib/pytz/zoneinfo/EET | Bin 1876 -> 0 bytes lib/pytz/zoneinfo/EST | Bin 127 -> 0 bytes lib/pytz/zoneinfo/EST5EDT | Bin 2294 -> 0 bytes lib/pytz/zoneinfo/Egypt | Bin 2779 -> 0 bytes lib/pytz/zoneinfo/Eire | Bin 3559 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+0 | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+1 | Bin 135 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+10 | Bin 139 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+11 | Bin 139 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+12 | Bin 139 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+2 | Bin 135 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+3 | Bin 135 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+4 | Bin 135 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+5 | Bin 135 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+6 | Bin 135 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+7 | Bin 135 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+8 | Bin 135 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+9 | Bin 135 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-0 | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-1 | Bin 136 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-10 | Bin 140 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-11 | Bin 140 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-12 | Bin 140 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-13 | Bin 140 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-14 | Bin 140 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-2 | Bin 136 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-3 | Bin 136 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-4 | Bin 136 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-5 | Bin 136 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-6 | Bin 136 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-7 | Bin 136 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-8 | Bin 136 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-9 | Bin 136 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT0 | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Etc/Greenwich | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Etc/UCT | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Etc/UTC | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Etc/Universal | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Etc/Zulu | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Europe/Amsterdam | Bin 2943 -> 0 bytes lib/pytz/zoneinfo/Europe/Andorra | Bin 1751 -> 0 bytes lib/pytz/zoneinfo/Europe/Athens | Bin 2271 -> 0 bytes lib/pytz/zoneinfo/Europe/Belfast | Bin 3687 -> 0 bytes lib/pytz/zoneinfo/Europe/Belgrade | Bin 1957 -> 0 bytes lib/pytz/zoneinfo/Europe/Berlin | Bin 2335 -> 0 bytes lib/pytz/zoneinfo/Europe/Bratislava | Bin 2272 -> 0 bytes lib/pytz/zoneinfo/Europe/Brussels | Bin 2970 -> 0 bytes lib/pytz/zoneinfo/Europe/Bucharest | Bin 2221 -> 0 bytes lib/pytz/zoneinfo/Europe/Budapest | Bin 2405 -> 0 bytes lib/pytz/zoneinfo/Europe/Busingen | Bin 1918 -> 0 bytes lib/pytz/zoneinfo/Europe/Chisinau | Bin 2433 -> 0 bytes lib/pytz/zoneinfo/Europe/Copenhagen | Bin 2160 -> 0 bytes lib/pytz/zoneinfo/Europe/Dublin | Bin 3559 -> 0 bytes lib/pytz/zoneinfo/Europe/Gibraltar | Bin 3061 -> 0 bytes lib/pytz/zoneinfo/Europe/Guernsey | Bin 3687 -> 0 bytes lib/pytz/zoneinfo/Europe/Helsinki | Bin 1909 -> 0 bytes lib/pytz/zoneinfo/Europe/Isle_of_Man | Bin 3687 -> 0 bytes lib/pytz/zoneinfo/Europe/Istanbul | Bin 2747 -> 0 bytes lib/pytz/zoneinfo/Europe/Jersey | Bin 3687 -> 0 bytes lib/pytz/zoneinfo/Europe/Kaliningrad | Bin 1550 -> 0 bytes lib/pytz/zoneinfo/Europe/Kiev | Bin 2097 -> 0 bytes lib/pytz/zoneinfo/Europe/Lisbon | Bin 3453 -> 0 bytes lib/pytz/zoneinfo/Europe/Ljubljana | Bin 1957 -> 0 bytes lib/pytz/zoneinfo/Europe/London | Bin 3687 -> 0 bytes lib/pytz/zoneinfo/Europe/Luxembourg | Bin 2974 -> 0 bytes lib/pytz/zoneinfo/Europe/Madrid | Bin 2619 -> 0 bytes lib/pytz/zoneinfo/Europe/Malta | Bin 2629 -> 0 bytes lib/pytz/zoneinfo/Europe/Mariehamn | Bin 1909 -> 0 bytes lib/pytz/zoneinfo/Europe/Minsk | Bin 1368 -> 0 bytes lib/pytz/zoneinfo/Europe/Monaco | Bin 2953 -> 0 bytes lib/pytz/zoneinfo/Europe/Moscow | Bin 1528 -> 0 bytes lib/pytz/zoneinfo/Europe/Nicosia | Bin 2016 -> 0 bytes lib/pytz/zoneinfo/Europe/Oslo | Bin 2251 -> 0 bytes lib/pytz/zoneinfo/Europe/Paris | Bin 2971 -> 0 bytes lib/pytz/zoneinfo/Europe/Podgorica | Bin 1957 -> 0 bytes lib/pytz/zoneinfo/Europe/Prague | Bin 2272 -> 0 bytes lib/pytz/zoneinfo/Europe/Riga | Bin 2235 -> 0 bytes lib/pytz/zoneinfo/Europe/Rome | Bin 2678 -> 0 bytes lib/pytz/zoneinfo/Europe/Samara | Bin 1394 -> 0 bytes lib/pytz/zoneinfo/Europe/San_Marino | Bin 2678 -> 0 bytes lib/pytz/zoneinfo/Europe/Sarajevo | Bin 1957 -> 0 bytes lib/pytz/zoneinfo/Europe/Simferopol | Bin 1504 -> 0 bytes lib/pytz/zoneinfo/Europe/Skopje | Bin 1957 -> 0 bytes lib/pytz/zoneinfo/Europe/Sofia | Bin 2130 -> 0 bytes lib/pytz/zoneinfo/Europe/Stockholm | Bin 1918 -> 0 bytes lib/pytz/zoneinfo/Europe/Tallinn | Bin 2201 -> 0 bytes lib/pytz/zoneinfo/Europe/Tirane | Bin 2098 -> 0 bytes lib/pytz/zoneinfo/Europe/Tiraspol | Bin 2433 -> 0 bytes lib/pytz/zoneinfo/Europe/Uzhgorod | Bin 2103 -> 0 bytes lib/pytz/zoneinfo/Europe/Vaduz | Bin 1918 -> 0 bytes lib/pytz/zoneinfo/Europe/Vatican | Bin 2678 -> 0 bytes lib/pytz/zoneinfo/Europe/Vienna | Bin 2237 -> 0 bytes lib/pytz/zoneinfo/Europe/Vilnius | Bin 2199 -> 0 bytes lib/pytz/zoneinfo/Europe/Volgograd | Bin 1317 -> 0 bytes lib/pytz/zoneinfo/Europe/Warsaw | Bin 2705 -> 0 bytes lib/pytz/zoneinfo/Europe/Zagreb | Bin 1957 -> 0 bytes lib/pytz/zoneinfo/Europe/Zaporozhye | Bin 2111 -> 0 bytes lib/pytz/zoneinfo/Europe/Zurich | Bin 1918 -> 0 bytes lib/pytz/zoneinfo/Factory | Bin 264 -> 0 bytes lib/pytz/zoneinfo/GB | Bin 3687 -> 0 bytes lib/pytz/zoneinfo/GB-Eire | Bin 3687 -> 0 bytes lib/pytz/zoneinfo/GMT | Bin 127 -> 0 bytes lib/pytz/zoneinfo/GMT+0 | Bin 127 -> 0 bytes lib/pytz/zoneinfo/GMT-0 | Bin 127 -> 0 bytes lib/pytz/zoneinfo/GMT0 | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Greenwich | Bin 127 -> 0 bytes lib/pytz/zoneinfo/HST | Bin 128 -> 0 bytes lib/pytz/zoneinfo/Hongkong | Bin 1189 -> 0 bytes lib/pytz/zoneinfo/Iceland | Bin 1167 -> 0 bytes lib/pytz/zoneinfo/Indian/Antananarivo | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Indian/Chagos | Bin 201 -> 0 bytes lib/pytz/zoneinfo/Indian/Christmas | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Indian/Cocos | Bin 152 -> 0 bytes lib/pytz/zoneinfo/Indian/Comoro | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Indian/Kerguelen | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Indian/Mahe | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Indian/Maldives | Bin 204 -> 0 bytes lib/pytz/zoneinfo/Indian/Mauritius | Bin 253 -> 0 bytes lib/pytz/zoneinfo/Indian/Mayotte | Bin 283 -> 0 bytes lib/pytz/zoneinfo/Indian/Reunion | Bin 171 -> 0 bytes lib/pytz/zoneinfo/Iran | Bin 1661 -> 0 bytes lib/pytz/zoneinfo/Israel | Bin 2265 -> 0 bytes lib/pytz/zoneinfo/Jamaica | Bin 507 -> 0 bytes lib/pytz/zoneinfo/Japan | Bin 355 -> 0 bytes lib/pytz/zoneinfo/Kwajalein | Bin 237 -> 0 bytes lib/pytz/zoneinfo/Libya | Bin 655 -> 0 bytes lib/pytz/zoneinfo/MET | Bin 2102 -> 0 bytes lib/pytz/zoneinfo/MST | Bin 127 -> 0 bytes lib/pytz/zoneinfo/MST7MDT | Bin 2294 -> 0 bytes lib/pytz/zoneinfo/Mexico/BajaNorte | Bin 2356 -> 0 bytes lib/pytz/zoneinfo/Mexico/BajaSur | Bin 1564 -> 0 bytes lib/pytz/zoneinfo/Mexico/General | Bin 1618 -> 0 bytes lib/pytz/zoneinfo/NZ | Bin 2460 -> 0 bytes lib/pytz/zoneinfo/NZ-CHAT | Bin 2057 -> 0 bytes lib/pytz/zoneinfo/Navajo | Bin 2453 -> 0 bytes lib/pytz/zoneinfo/PRC | Bin 414 -> 0 bytes lib/pytz/zoneinfo/PST8PDT | Bin 2294 -> 0 bytes lib/pytz/zoneinfo/Pacific/Apia | Bin 1102 -> 0 bytes lib/pytz/zoneinfo/Pacific/Auckland | Bin 2460 -> 0 bytes lib/pytz/zoneinfo/Pacific/Bougainville | Bin 280 -> 0 bytes lib/pytz/zoneinfo/Pacific/Chatham | Bin 2057 -> 0 bytes lib/pytz/zoneinfo/Pacific/Chuuk | Bin 153 -> 0 bytes lib/pytz/zoneinfo/Pacific/Easter | Bin 2295 -> 0 bytes lib/pytz/zoneinfo/Pacific/Efate | Bin 478 -> 0 bytes lib/pytz/zoneinfo/Pacific/Enderbury | Bin 230 -> 0 bytes lib/pytz/zoneinfo/Pacific/Fakaofo | Bin 197 -> 0 bytes lib/pytz/zoneinfo/Pacific/Fiji | Bin 1074 -> 0 bytes lib/pytz/zoneinfo/Pacific/Funafuti | Bin 150 -> 0 bytes lib/pytz/zoneinfo/Pacific/Galapagos | Bin 211 -> 0 bytes lib/pytz/zoneinfo/Pacific/Gambier | Bin 173 -> 0 bytes lib/pytz/zoneinfo/Pacific/Guadalcanal | Bin 172 -> 0 bytes lib/pytz/zoneinfo/Pacific/Guam | Bin 225 -> 0 bytes lib/pytz/zoneinfo/Pacific/Honolulu | Bin 276 -> 0 bytes lib/pytz/zoneinfo/Pacific/Johnston | Bin 276 -> 0 bytes lib/pytz/zoneinfo/Pacific/Kiritimati | Bin 230 -> 0 bytes lib/pytz/zoneinfo/Pacific/Kosrae | Bin 230 -> 0 bytes lib/pytz/zoneinfo/Pacific/Kwajalein | Bin 237 -> 0 bytes lib/pytz/zoneinfo/Pacific/Majuro | Bin 197 -> 0 bytes lib/pytz/zoneinfo/Pacific/Marquesas | Bin 176 -> 0 bytes lib/pytz/zoneinfo/Pacific/Midway | Bin 294 -> 0 bytes lib/pytz/zoneinfo/Pacific/Nauru | Bin 254 -> 0 bytes lib/pytz/zoneinfo/Pacific/Niue | Bin 226 -> 0 bytes lib/pytz/zoneinfo/Pacific/Norfolk | Bin 208 -> 0 bytes lib/pytz/zoneinfo/Pacific/Noumea | Bin 314 -> 0 bytes lib/pytz/zoneinfo/Pacific/Pago_Pago | Bin 272 -> 0 bytes lib/pytz/zoneinfo/Pacific/Palau | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Pacific/Pitcairn | Bin 203 -> 0 bytes lib/pytz/zoneinfo/Pacific/Pohnpei | Bin 153 -> 0 bytes lib/pytz/zoneinfo/Pacific/Ponape | Bin 153 -> 0 bytes lib/pytz/zoneinfo/Pacific/Port_Moresby | Bin 172 -> 0 bytes lib/pytz/zoneinfo/Pacific/Rarotonga | Bin 574 -> 0 bytes lib/pytz/zoneinfo/Pacific/Saipan | Bin 255 -> 0 bytes lib/pytz/zoneinfo/Pacific/Samoa | Bin 272 -> 0 bytes lib/pytz/zoneinfo/Pacific/Tahiti | Bin 174 -> 0 bytes lib/pytz/zoneinfo/Pacific/Tarawa | Bin 153 -> 0 bytes lib/pytz/zoneinfo/Pacific/Tongatapu | Bin 339 -> 0 bytes lib/pytz/zoneinfo/Pacific/Truk | Bin 153 -> 0 bytes lib/pytz/zoneinfo/Pacific/Wake | Bin 153 -> 0 bytes lib/pytz/zoneinfo/Pacific/Wallis | Bin 150 -> 0 bytes lib/pytz/zoneinfo/Pacific/Yap | Bin 153 -> 0 bytes lib/pytz/zoneinfo/Poland | Bin 2705 -> 0 bytes lib/pytz/zoneinfo/Portugal | Bin 3453 -> 0 bytes lib/pytz/zoneinfo/ROC | Bin 800 -> 0 bytes lib/pytz/zoneinfo/ROK | Bin 571 -> 0 bytes lib/pytz/zoneinfo/Singapore | Bin 428 -> 0 bytes lib/pytz/zoneinfo/Turkey | Bin 2747 -> 0 bytes lib/pytz/zoneinfo/UCT | Bin 127 -> 0 bytes lib/pytz/zoneinfo/US/Alaska | Bin 2384 -> 0 bytes lib/pytz/zoneinfo/US/Aleutian | Bin 2379 -> 0 bytes lib/pytz/zoneinfo/US/Arizona | Bin 353 -> 0 bytes lib/pytz/zoneinfo/US/Central | Bin 3585 -> 0 bytes lib/pytz/zoneinfo/US/East-Indiana | Bin 1675 -> 0 bytes lib/pytz/zoneinfo/US/Eastern | Bin 3545 -> 0 bytes lib/pytz/zoneinfo/US/Hawaii | Bin 276 -> 0 bytes lib/pytz/zoneinfo/US/Indiana-Starke | Bin 2437 -> 0 bytes lib/pytz/zoneinfo/US/Michigan | Bin 2216 -> 0 bytes lib/pytz/zoneinfo/US/Mountain | Bin 2453 -> 0 bytes lib/pytz/zoneinfo/US/Pacific | Bin 2845 -> 0 bytes lib/pytz/zoneinfo/US/Pacific-New | Bin 2845 -> 0 bytes lib/pytz/zoneinfo/US/Samoa | Bin 272 -> 0 bytes lib/pytz/zoneinfo/UTC | Bin 127 -> 0 bytes lib/pytz/zoneinfo/Universal | Bin 127 -> 0 bytes lib/pytz/zoneinfo/W-SU | Bin 1528 -> 0 bytes lib/pytz/zoneinfo/WET | Bin 1873 -> 0 bytes lib/pytz/zoneinfo/Zulu | Bin 127 -> 0 bytes lib/pytz/zoneinfo/iso3166.tab | 275 - lib/pytz/zoneinfo/localtime | Bin 127 -> 0 bytes lib/pytz/zoneinfo/posixrules | Bin 3545 -> 0 bytes lib/pytz/zoneinfo/zone.tab | 440 -- lib/pytz/zoneinfo/zone1970.tab | 371 -- lib/requests/__init__.py | 77 - lib/requests/adapters.py | 437 -- lib/requests/api.py | 147 - lib/requests/auth.py | 212 - lib/requests/cacert.pem | 5026 ----------------- lib/requests/certs.py | 25 - lib/requests/compat.py | 62 - lib/requests/cookies.py | 479 -- lib/requests/exceptions.py | 99 - lib/requests/hooks.py | 45 - lib/requests/models.py | 859 --- lib/requests/packages/__init__.py | 3 - lib/requests/packages/chardet/__init__.py | 32 - lib/requests/packages/chardet/big5freq.py | 925 --- lib/requests/packages/chardet/big5prober.py | 42 - lib/requests/packages/chardet/chardetect.py | 80 - .../packages/chardet/chardistribution.py | 231 - .../packages/chardet/charsetgroupprober.py | 106 - .../packages/chardet/charsetprober.py | 62 - .../packages/chardet/codingstatemachine.py | 61 - lib/requests/packages/chardet/compat.py | 34 - lib/requests/packages/chardet/constants.py | 39 - lib/requests/packages/chardet/cp949prober.py | 44 - lib/requests/packages/chardet/escprober.py | 86 - lib/requests/packages/chardet/escsm.py | 242 - lib/requests/packages/chardet/eucjpprober.py | 90 - lib/requests/packages/chardet/euckrfreq.py | 596 -- lib/requests/packages/chardet/euckrprober.py | 42 - lib/requests/packages/chardet/euctwfreq.py | 428 -- lib/requests/packages/chardet/euctwprober.py | 41 - lib/requests/packages/chardet/gb2312freq.py | 472 -- lib/requests/packages/chardet/gb2312prober.py | 41 - lib/requests/packages/chardet/hebrewprober.py | 283 - lib/requests/packages/chardet/jisfreq.py | 569 -- lib/requests/packages/chardet/jpcntx.py | 227 - .../packages/chardet/langbulgarianmodel.py | 229 - .../packages/chardet/langcyrillicmodel.py | 329 -- .../packages/chardet/langgreekmodel.py | 225 - .../packages/chardet/langhebrewmodel.py | 201 - .../packages/chardet/langhungarianmodel.py | 225 - .../packages/chardet/langthaimodel.py | 200 - lib/requests/packages/chardet/latin1prober.py | 139 - .../packages/chardet/mbcharsetprober.py | 86 - .../packages/chardet/mbcsgroupprober.py | 54 - lib/requests/packages/chardet/mbcssm.py | 572 -- .../packages/chardet/sbcharsetprober.py | 120 - .../packages/chardet/sbcsgroupprober.py | 69 - lib/requests/packages/chardet/sjisprober.py | 91 - .../packages/chardet/universaldetector.py | 170 - lib/requests/packages/chardet/utf8prober.py | 76 - lib/requests/packages/urllib3/__init__.py | 69 - lib/requests/packages/urllib3/_collections.py | 323 -- lib/requests/packages/urllib3/connection.py | 264 - .../packages/urllib3/connectionpool.py | 795 --- .../packages/urllib3/contrib/__init__.py | 0 .../packages/urllib3/contrib/ntlmpool.py | 114 - .../packages/urllib3/contrib/pyopenssl.py | 293 - lib/requests/packages/urllib3/exceptions.py | 169 - lib/requests/packages/urllib3/fields.py | 177 - lib/requests/packages/urllib3/filepost.py | 93 - .../packages/urllib3/packages/__init__.py | 4 - .../packages/urllib3/packages/ordered_dict.py | 259 - lib/requests/packages/urllib3/packages/six.py | 385 -- .../packages/ssl_match_hostname/__init__.py | 13 - .../ssl_match_hostname/_implementation.py | 105 - lib/requests/packages/urllib3/poolmanager.py | 280 - lib/requests/packages/urllib3/request.py | 141 - lib/requests/packages/urllib3/response.py | 466 -- .../packages/urllib3/util/__init__.py | 24 - .../packages/urllib3/util/connection.py | 98 - lib/requests/packages/urllib3/util/request.py | 71 - .../packages/urllib3/util/response.py | 22 - lib/requests/packages/urllib3/util/retry.py | 285 - lib/requests/packages/urllib3/util/ssl_.py | 280 - lib/requests/packages/urllib3/util/timeout.py | 240 - lib/requests/packages/urllib3/util/url.py | 214 - lib/requests/sessions.py | 677 --- lib/requests/status_codes.py | 89 - lib/requests/structures.py | 104 - lib/requests/utils.py | 707 --- lib/rtorrent/__init__.py | 145 +- lib/rtorrent/common.py | 4 +- lib/rtorrent/connection.py | 50 +- lib/rtorrent/err.py | 2 +- lib/rtorrent/file.py | 144 +- lib/rtorrent/group.py | 4 +- lib/rtorrent/lib/torrentparser.py | 4 +- lib/rtorrent/lib/xmlrpc/clients/http.py | 4 +- lib/rtorrent/lib/xmlrpc/clients/scgi.py | 16 +- .../lib/xmlrpc/transports/basic_auth.py | 16 +- .../lib/xmlrpc/transports/requests_.py | 24 +- lib/rtorrent/lib/xmlrpc/transports/scgi.py | 17 +- lib/rtorrent/peer.py | 184 +- lib/rtorrent/rpc/__init__.py | 55 +- lib/rtorrent/torrent.py | 610 +- lib/rtorrent/tracker.py | 292 +- lib/simplejson/__init__.py | 481 -- lib/simplejson/decoder.py | 421 -- lib/simplejson/encoder.py | 554 -- lib/simplejson/ordered_dict.py | 119 - lib/simplejson/scanner.py | 77 - lib/simplejson/tests/__init__.py | 67 - lib/simplejson/tests/test_bigint_as_string.py | 55 - lib/simplejson/tests/test_check_circular.py | 30 - lib/simplejson/tests/test_decimal.py | 55 - lib/simplejson/tests/test_decode.py | 83 - lib/simplejson/tests/test_default.py | 9 - lib/simplejson/tests/test_dump.py | 67 - .../tests/test_encode_basestring_ascii.py | 46 - lib/simplejson/tests/test_encode_for_html.py | 32 - lib/simplejson/tests/test_errors.py | 34 - lib/simplejson/tests/test_fail.py | 91 - lib/simplejson/tests/test_float.py | 19 - lib/simplejson/tests/test_indent.py | 86 - lib/simplejson/tests/test_namedtuple.py | 121 - lib/simplejson/tests/test_pass1.py | 76 - lib/simplejson/tests/test_pass2.py | 14 - lib/simplejson/tests/test_pass3.py | 20 - lib/simplejson/tests/test_recursion.py | 67 - lib/simplejson/tests/test_scanstring.py | 117 - lib/simplejson/tests/test_separators.py | 42 - lib/simplejson/tests/test_speedups.py | 20 - lib/simplejson/tests/test_tuple.py | 49 - lib/simplejson/tests/test_unicode.py | 109 - lib/simplejson/tool.py | 39 - lib/six.py | 868 --- lib/tests/__init__.py | 0 lib/tests/test_cli.py | 61 - lib/tzlocal/LICENSE.txt | 121 - lib/tzlocal/README.rst | 80 - lib/tzlocal/__init__.py | 7 - lib/tzlocal/darwin.py | 27 - lib/tzlocal/tests.py | 64 - lib/tzlocal/unix.py | 115 - lib/tzlocal/win32.py | 93 - lib/tzlocal/windows_tz.py | 542 -- mylar/Failed.py | 8 +- mylar/PostProcessor.py | 64 +- mylar/__init__.py | 82 +- mylar/api.py | 21 +- mylar/auth.py | 10 +- mylar/auth32p.py | 4 +- mylar/cache.py | 12 +- mylar/cmtagmylar.py | 36 +- mylar/comicbookdb.py | 57 +- mylar/config.py | 81 +- mylar/cv.py | 19 +- mylar/db.py | 24 +- mylar/dbupdater.py | 2 +- mylar/encrypted.py | 15 +- mylar/filechecker.py | 2287 ++++---- mylar/filers.py | 8 +- mylar/findcomicfeed.py | 17 +- mylar/ftpsshup.py | 16 +- mylar/getcomics.py | 26 +- mylar/helpers.py | 90 +- mylar/importer.py | 54 +- mylar/latest.py | 2 +- mylar/librarysync.py | 6 +- mylar/logger.py | 6 +- mylar/mb.py | 22 +- mylar/newpull.py | 9 +- mylar/notifiers.py | 38 +- mylar/nzbget.py | 6 +- mylar/opds.py | 12 +- mylar/parseit.py | 53 +- mylar/process.py | 6 +- mylar/readinglist.py | 4 +- mylar/rsscheck.py | 30 +- mylar/rsscheckit.py | 2 +- mylar/sabnzbd.py | 4 +- mylar/sabparse.py | 1 - mylar/scheduler.py | 2 +- mylar/search.py | 76 +- mylar/searchit.py | 2 +- mylar/solicit.py | 12 +- mylar/test.py | 6 +- mylar/torrent/clients/deluge.py | 2 +- mylar/torrent/clients/qbittorrent.py | 8 +- mylar/torrent/clients/rtorrent.py | 33 +- mylar/torrent/clients/transmission.py | 4 +- mylar/torrent/clients/utorrent.py | 2 +- mylar/torrent/helpers/variable.py | 4 +- mylar/updater.py | 7 +- mylar/utorrent.py | 2 +- mylar/versioncheck.py | 12 +- mylar/versioncheckit.py | 2 +- mylar/webserve.py | 158 +- mylar/webstart.py | 13 +- mylar/weeklypull.py | 32 +- mylar/weeklypullit.py | 2 +- mylar/wwt.py | 9 +- requirements.txt | 25 + 1121 files changed, 4017 insertions(+), 119374 deletions(-) delete mode 100755 lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/PKG-INFO delete mode 100755 lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/SOURCES.txt delete mode 100755 lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/dependency_links.txt delete mode 100755 lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/installed-files.txt delete mode 100755 lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/top_level.txt delete mode 100755 lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/zip-safe delete mode 100755 lib/ConcurrentLogHandler/cloghandler.py delete mode 100755 lib/ConcurrentLogHandler/portalocker.py delete mode 100755 lib/ConcurrentLogHandler/stresstest.py delete mode 100755 lib/MultipartPostHandler.py delete mode 100644 lib/apscheduler/__init__.py delete mode 100644 lib/apscheduler/events.py delete mode 100644 lib/apscheduler/executors/__init__.py delete mode 100644 lib/apscheduler/executors/asyncio.py delete mode 100644 lib/apscheduler/executors/base.py delete mode 100644 lib/apscheduler/executors/base_py3.py delete mode 100644 lib/apscheduler/executors/debug.py delete mode 100644 lib/apscheduler/executors/gevent.py delete mode 100644 lib/apscheduler/executors/pool.py delete mode 100644 lib/apscheduler/executors/tornado.py delete mode 100644 lib/apscheduler/executors/twisted.py delete mode 100644 lib/apscheduler/job.py delete mode 100644 lib/apscheduler/jobstores/__init__.py delete mode 100644 lib/apscheduler/jobstores/base.py delete mode 100644 lib/apscheduler/jobstores/memory.py delete mode 100644 lib/apscheduler/jobstores/mongodb.py delete mode 100644 lib/apscheduler/jobstores/redis.py delete mode 100644 lib/apscheduler/jobstores/rethinkdb.py delete mode 100644 lib/apscheduler/jobstores/sqlalchemy.py delete mode 100644 lib/apscheduler/jobstores/zookeeper.py delete mode 100644 lib/apscheduler/schedulers/__init__.py delete mode 100644 lib/apscheduler/schedulers/asyncio.py delete mode 100644 lib/apscheduler/schedulers/background.py delete mode 100644 lib/apscheduler/schedulers/base.py delete mode 100644 lib/apscheduler/schedulers/blocking.py delete mode 100644 lib/apscheduler/schedulers/gevent.py delete mode 100644 lib/apscheduler/schedulers/qt.py delete mode 100644 lib/apscheduler/schedulers/tornado.py delete mode 100644 lib/apscheduler/schedulers/twisted.py delete mode 100644 lib/apscheduler/triggers/__init__.py delete mode 100644 lib/apscheduler/triggers/base.py delete mode 100644 lib/apscheduler/triggers/cron/__init__.py delete mode 100644 lib/apscheduler/triggers/cron/expressions.py delete mode 100644 lib/apscheduler/triggers/cron/fields.py delete mode 100644 lib/apscheduler/triggers/date.py delete mode 100644 lib/apscheduler/triggers/interval.py delete mode 100644 lib/apscheduler/util.py delete mode 100755 lib/argparse.py delete mode 100644 lib/bs4/__init__.py delete mode 100644 lib/bs4/builder/__init__.py delete mode 100644 lib/bs4/builder/_html5lib.py delete mode 100644 lib/bs4/builder/_htmlparser.py delete mode 100644 lib/bs4/builder/_lxml.py delete mode 100644 lib/bs4/dammit.py delete mode 100644 lib/bs4/diagnose.py delete mode 100644 lib/bs4/element.py delete mode 100644 lib/bs4/testing.py delete mode 100644 lib/bs4/tests/__init__.py delete mode 100644 lib/bs4/tests/test_builder_registry.py delete mode 100644 lib/bs4/tests/test_docs.py delete mode 100644 lib/bs4/tests/test_html5lib.py delete mode 100644 lib/bs4/tests/test_htmlparser.py delete mode 100644 lib/bs4/tests/test_lxml.py delete mode 100644 lib/bs4/tests/test_soup.py delete mode 100644 lib/bs4/tests/test_tree.py delete mode 100644 lib/cfscrape/LICENSE delete mode 100644 lib/cfscrape/__init__.py delete mode 100755 lib/cherrypy/LICENSE.txt delete mode 100755 lib/cherrypy/__init__.py delete mode 100755 lib/cherrypy/_cpchecker.py delete mode 100755 lib/cherrypy/_cpcompat.py delete mode 100644 lib/cherrypy/_cpcompat_subprocess.py delete mode 100755 lib/cherrypy/_cpconfig.py delete mode 100755 lib/cherrypy/_cpdispatch.py delete mode 100755 lib/cherrypy/_cperror.py delete mode 100755 lib/cherrypy/_cplogging.py delete mode 100755 lib/cherrypy/_cpmodpy.py delete mode 100755 lib/cherrypy/_cpnative_server.py delete mode 100755 lib/cherrypy/_cpreqbody.py delete mode 100755 lib/cherrypy/_cprequest.py delete mode 100755 lib/cherrypy/_cpserver.py delete mode 100755 lib/cherrypy/_cpthreadinglocal.py delete mode 100755 lib/cherrypy/_cptools.py delete mode 100755 lib/cherrypy/_cptree.py delete mode 100755 lib/cherrypy/_cpwsgi.py delete mode 100755 lib/cherrypy/_cpwsgi_server.py delete mode 100755 lib/cherrypy/cherryd delete mode 100755 lib/cherrypy/favicon.ico delete mode 100755 lib/cherrypy/lib/__init__.py delete mode 100755 lib/cherrypy/lib/auth.py delete mode 100755 lib/cherrypy/lib/auth_basic.py delete mode 100755 lib/cherrypy/lib/auth_digest.py delete mode 100755 lib/cherrypy/lib/caching.py delete mode 100755 lib/cherrypy/lib/covercp.py delete mode 100755 lib/cherrypy/lib/cpstats.py delete mode 100755 lib/cherrypy/lib/cptools.py delete mode 100755 lib/cherrypy/lib/encoding.py delete mode 100644 lib/cherrypy/lib/gctools.py delete mode 100755 lib/cherrypy/lib/http.py delete mode 100755 lib/cherrypy/lib/httpauth.py delete mode 100755 lib/cherrypy/lib/httputil.py delete mode 100755 lib/cherrypy/lib/jsontools.py delete mode 100644 lib/cherrypy/lib/lockfile.py delete mode 100644 lib/cherrypy/lib/locking.py delete mode 100755 lib/cherrypy/lib/profiler.py delete mode 100755 lib/cherrypy/lib/reprconf.py delete mode 100755 lib/cherrypy/lib/sessions.py delete mode 100755 lib/cherrypy/lib/static.py delete mode 100755 lib/cherrypy/lib/xmlrpc.py delete mode 100644 lib/cherrypy/lib/xmlrpcutil.py delete mode 100755 lib/cherrypy/process/__init__.py delete mode 100755 lib/cherrypy/process/plugins.py delete mode 100755 lib/cherrypy/process/servers.py delete mode 100755 lib/cherrypy/process/win32.py delete mode 100755 lib/cherrypy/process/wspbus.py delete mode 100755 lib/cherrypy/scaffold/__init__.py delete mode 100755 lib/cherrypy/scaffold/apache-fcgi.conf delete mode 100755 lib/cherrypy/scaffold/example.conf delete mode 100755 lib/cherrypy/scaffold/site.conf delete mode 100755 lib/cherrypy/scaffold/static/made_with_cherrypy_small.png delete mode 100755 lib/cherrypy/test/__init__.py delete mode 100755 lib/cherrypy/test/_test_decorators.py delete mode 100755 lib/cherrypy/test/_test_states_demo.py delete mode 100755 lib/cherrypy/test/benchmark.py delete mode 100755 lib/cherrypy/test/checkerdemo.py delete mode 100755 lib/cherrypy/test/fastcgi.conf delete mode 100755 lib/cherrypy/test/fcgi.conf delete mode 100755 lib/cherrypy/test/helper.py delete mode 100755 lib/cherrypy/test/logtest.py delete mode 100755 lib/cherrypy/test/modfastcgi.py delete mode 100755 lib/cherrypy/test/modfcgid.py delete mode 100755 lib/cherrypy/test/modpy.py delete mode 100755 lib/cherrypy/test/modwsgi.py delete mode 100755 lib/cherrypy/test/native-server.ini delete mode 100755 lib/cherrypy/test/sessiondemo.py delete mode 100755 lib/cherrypy/test/static/dirback.jpg delete mode 100755 lib/cherrypy/test/static/index.html delete mode 100755 lib/cherrypy/test/style.css delete mode 100755 lib/cherrypy/test/test.pem delete mode 100755 lib/cherrypy/test/test_auth_basic.py delete mode 100755 lib/cherrypy/test/test_auth_digest.py delete mode 100755 lib/cherrypy/test/test_bus.py delete mode 100755 lib/cherrypy/test/test_caching.py delete mode 100755 lib/cherrypy/test/test_config.py delete mode 100755 lib/cherrypy/test/test_config_server.py delete mode 100755 lib/cherrypy/test/test_conn.py delete mode 100755 lib/cherrypy/test/test_core.py delete mode 100755 lib/cherrypy/test/test_dynamicobjectmapping.py delete mode 100755 lib/cherrypy/test/test_encoding.py delete mode 100755 lib/cherrypy/test/test_etags.py delete mode 100755 lib/cherrypy/test/test_http.py delete mode 100755 lib/cherrypy/test/test_httpauth.py delete mode 100755 lib/cherrypy/test/test_httplib.py delete mode 100755 lib/cherrypy/test/test_json.py delete mode 100755 lib/cherrypy/test/test_logging.py delete mode 100755 lib/cherrypy/test/test_mime.py delete mode 100755 lib/cherrypy/test/test_misc_tools.py delete mode 100755 lib/cherrypy/test/test_objectmapping.py delete mode 100755 lib/cherrypy/test/test_proxy.py delete mode 100755 lib/cherrypy/test/test_refleaks.py delete mode 100755 lib/cherrypy/test/test_request_obj.py delete mode 100755 lib/cherrypy/test/test_routes.py delete mode 100755 lib/cherrypy/test/test_session.py delete mode 100755 lib/cherrypy/test/test_sessionauthenticate.py delete mode 100755 lib/cherrypy/test/test_states.py delete mode 100755 lib/cherrypy/test/test_static.py delete mode 100755 lib/cherrypy/test/test_tools.py delete mode 100755 lib/cherrypy/test/test_tutorials.py delete mode 100755 lib/cherrypy/test/test_virtualhost.py delete mode 100755 lib/cherrypy/test/test_wsgi_ns.py delete mode 100755 lib/cherrypy/test/test_wsgi_vhost.py delete mode 100755 lib/cherrypy/test/test_wsgiapps.py delete mode 100755 lib/cherrypy/test/test_xmlrpc.py delete mode 100755 lib/cherrypy/test/webtest.py delete mode 100755 lib/cherrypy/wsgiserver/__init__.py delete mode 100755 lib/cherrypy/wsgiserver/ssl_builtin.py delete mode 100755 lib/cherrypy/wsgiserver/ssl_pyopenssl.py delete mode 100644 lib/cherrypy/wsgiserver/wsgiserver2.py delete mode 100644 lib/cherrypy/wsgiserver/wsgiserver3.py mode change 100755 => 100644 lib/comictaggerlib/imagehasher.py mode change 100755 => 100644 lib/comictaggerlib/main.py delete mode 100644 lib/concurrent/LICENSE delete mode 100644 lib/concurrent/PKG-INFO delete mode 100644 lib/concurrent/__init__.py delete mode 100644 lib/concurrent/futures/__init__.py delete mode 100644 lib/concurrent/futures/_base.py delete mode 100644 lib/concurrent/futures/process.py delete mode 100644 lib/concurrent/futures/thread.py delete mode 100755 lib/feedparser.py delete mode 100644 lib/funcsigs/__init__.py delete mode 100644 lib/funcsigs/version.py delete mode 100644 lib/get_image_size.py delete mode 100644 lib/httplib2/__init__.py delete mode 100644 lib/httplib2/iri2uri.py delete mode 100644 lib/httplib2/socks.py delete mode 100644 lib/js2py/LICENSE.md delete mode 100644 lib/js2py/__init__.py delete mode 100644 lib/js2py/base.py delete mode 100644 lib/js2py/constructors/__init__.py delete mode 100644 lib/js2py/constructors/jsarray.py delete mode 100644 lib/js2py/constructors/jsboolean.py delete mode 100644 lib/js2py/constructors/jsdate.py delete mode 100644 lib/js2py/constructors/jsfunction.py delete mode 100644 lib/js2py/constructors/jsmath.py delete mode 100644 lib/js2py/constructors/jsnumber.py delete mode 100644 lib/js2py/constructors/jsobject.py delete mode 100644 lib/js2py/constructors/jsregexp.py delete mode 100644 lib/js2py/constructors/jsstring.py delete mode 100644 lib/js2py/constructors/time_helpers.py delete mode 100644 lib/js2py/evaljs.py delete mode 100644 lib/js2py/host/__init__.py delete mode 100644 lib/js2py/host/console.py delete mode 100644 lib/js2py/host/dom/__init__.py delete mode 100644 lib/js2py/host/dom/constants.py delete mode 100644 lib/js2py/host/dom/interface.py delete mode 100644 lib/js2py/host/jseval.py delete mode 100644 lib/js2py/host/jsfunctions.py delete mode 100644 lib/js2py/legecy_translators/__init__.py delete mode 100644 lib/js2py/legecy_translators/constants.py delete mode 100644 lib/js2py/legecy_translators/exps.py delete mode 100644 lib/js2py/legecy_translators/flow.py delete mode 100644 lib/js2py/legecy_translators/functions.py delete mode 100644 lib/js2py/legecy_translators/jsparser.py delete mode 100644 lib/js2py/legecy_translators/nodevisitor.py delete mode 100644 lib/js2py/legecy_translators/nparser.py delete mode 100644 lib/js2py/legecy_translators/objects.py delete mode 100644 lib/js2py/legecy_translators/tokenize.py delete mode 100644 lib/js2py/legecy_translators/translator.py delete mode 100644 lib/js2py/legecy_translators/utils.py delete mode 100644 lib/js2py/prototypes/__init__.py delete mode 100644 lib/js2py/prototypes/jsarray.py delete mode 100644 lib/js2py/prototypes/jsboolean.py delete mode 100644 lib/js2py/prototypes/jserror.py delete mode 100644 lib/js2py/prototypes/jsfunction.py delete mode 100644 lib/js2py/prototypes/jsjson.py delete mode 100644 lib/js2py/prototypes/jsnumber.py delete mode 100644 lib/js2py/prototypes/jsobject.py delete mode 100644 lib/js2py/prototypes/jsregexp.py delete mode 100644 lib/js2py/prototypes/jsstring.py delete mode 100644 lib/js2py/pyjs.py delete mode 100644 lib/js2py/todo delete mode 100644 lib/js2py/translators/__init__.py delete mode 100644 lib/js2py/translators/friendly_nodes.py delete mode 100644 lib/js2py/translators/jsregexps.py delete mode 100644 lib/js2py/translators/markdown.js delete mode 100644 lib/js2py/translators/pyjsparser.py delete mode 100644 lib/js2py/translators/pyjsparserdata.py delete mode 100644 lib/js2py/translators/std_nodes.py delete mode 100644 lib/js2py/translators/translating_nodes.py delete mode 100644 lib/js2py/translators/translator.py delete mode 100644 lib/js2py/utils/__init__.py delete mode 100644 lib/js2py/utils/definitions.py delete mode 100644 lib/js2py/utils/injector.py delete mode 100644 lib/mako/__init__.py delete mode 100644 lib/mako/_ast_util.py delete mode 100644 lib/mako/ast.py delete mode 100644 lib/mako/cache.py delete mode 100755 lib/mako/cmd.py delete mode 100644 lib/mako/codegen.py delete mode 100644 lib/mako/compat.py delete mode 100644 lib/mako/exceptions.py delete mode 100644 lib/mako/ext/__init__.py delete mode 100644 lib/mako/ext/autohandler.py delete mode 100644 lib/mako/ext/babelplugin.py delete mode 100644 lib/mako/ext/beaker_cache.py delete mode 100644 lib/mako/ext/extract.py delete mode 100644 lib/mako/ext/linguaplugin.py delete mode 100644 lib/mako/ext/preprocessors.py delete mode 100644 lib/mako/ext/pygmentplugin.py delete mode 100644 lib/mako/ext/turbogears.py delete mode 100644 lib/mako/filters.py delete mode 100644 lib/mako/lexer.py delete mode 100644 lib/mako/lookup.py delete mode 100644 lib/mako/parsetree.py delete mode 100644 lib/mako/pygen.py delete mode 100644 lib/mako/pyparser.py delete mode 100644 lib/mako/runtime.py delete mode 100644 lib/mako/template.py delete mode 100644 lib/mako/util.py delete mode 100644 lib/markupsafe/__init__.py delete mode 100644 lib/markupsafe/_compat.py delete mode 100644 lib/markupsafe/_constants.py delete mode 100644 lib/markupsafe/_native.py delete mode 100644 lib/markupsafe/_speedups.c delete mode 100644 lib/markupsafe/tests.py delete mode 100644 lib/pytz/LICENSE.txt delete mode 100644 lib/pytz/README.txt delete mode 100644 lib/pytz/__init__.py delete mode 100644 lib/pytz/exceptions.py delete mode 100644 lib/pytz/lazy.py delete mode 100644 lib/pytz/reference.py delete mode 100644 lib/pytz/tzfile.py delete mode 100644 lib/pytz/tzinfo.py delete mode 100644 lib/pytz/zoneinfo/Africa/Abidjan delete mode 100644 lib/pytz/zoneinfo/Africa/Accra delete mode 100644 lib/pytz/zoneinfo/Africa/Addis_Ababa delete mode 100644 lib/pytz/zoneinfo/Africa/Algiers delete mode 100644 lib/pytz/zoneinfo/Africa/Asmara delete mode 100644 lib/pytz/zoneinfo/Africa/Asmera delete mode 100644 lib/pytz/zoneinfo/Africa/Bamako delete mode 100644 lib/pytz/zoneinfo/Africa/Bangui delete mode 100644 lib/pytz/zoneinfo/Africa/Banjul delete mode 100644 lib/pytz/zoneinfo/Africa/Bissau delete mode 100644 lib/pytz/zoneinfo/Africa/Blantyre delete mode 100644 lib/pytz/zoneinfo/Africa/Brazzaville delete mode 100644 lib/pytz/zoneinfo/Africa/Bujumbura delete mode 100644 lib/pytz/zoneinfo/Africa/Cairo delete mode 100644 lib/pytz/zoneinfo/Africa/Casablanca delete mode 100644 lib/pytz/zoneinfo/Africa/Ceuta delete mode 100644 lib/pytz/zoneinfo/Africa/Conakry delete mode 100644 lib/pytz/zoneinfo/Africa/Dakar delete mode 100644 lib/pytz/zoneinfo/Africa/Dar_es_Salaam delete mode 100644 lib/pytz/zoneinfo/Africa/Djibouti delete mode 100644 lib/pytz/zoneinfo/Africa/Douala delete mode 100644 lib/pytz/zoneinfo/Africa/El_Aaiun delete mode 100644 lib/pytz/zoneinfo/Africa/Freetown delete mode 100644 lib/pytz/zoneinfo/Africa/Gaborone delete mode 100644 lib/pytz/zoneinfo/Africa/Harare delete mode 100644 lib/pytz/zoneinfo/Africa/Johannesburg delete mode 100644 lib/pytz/zoneinfo/Africa/Juba delete mode 100644 lib/pytz/zoneinfo/Africa/Kampala delete mode 100644 lib/pytz/zoneinfo/Africa/Khartoum delete mode 100644 lib/pytz/zoneinfo/Africa/Kigali delete mode 100644 lib/pytz/zoneinfo/Africa/Kinshasa delete mode 100644 lib/pytz/zoneinfo/Africa/Lagos delete mode 100644 lib/pytz/zoneinfo/Africa/Libreville delete mode 100644 lib/pytz/zoneinfo/Africa/Lome delete mode 100644 lib/pytz/zoneinfo/Africa/Luanda delete mode 100644 lib/pytz/zoneinfo/Africa/Lubumbashi delete mode 100644 lib/pytz/zoneinfo/Africa/Lusaka delete mode 100644 lib/pytz/zoneinfo/Africa/Malabo delete mode 100644 lib/pytz/zoneinfo/Africa/Maputo delete mode 100644 lib/pytz/zoneinfo/Africa/Maseru delete mode 100644 lib/pytz/zoneinfo/Africa/Mbabane delete mode 100644 lib/pytz/zoneinfo/Africa/Mogadishu delete mode 100644 lib/pytz/zoneinfo/Africa/Monrovia delete mode 100644 lib/pytz/zoneinfo/Africa/Nairobi delete mode 100644 lib/pytz/zoneinfo/Africa/Ndjamena delete mode 100644 lib/pytz/zoneinfo/Africa/Niamey delete mode 100644 lib/pytz/zoneinfo/Africa/Nouakchott delete mode 100644 lib/pytz/zoneinfo/Africa/Ouagadougou delete mode 100644 lib/pytz/zoneinfo/Africa/Porto-Novo delete mode 100644 lib/pytz/zoneinfo/Africa/Sao_Tome delete mode 100644 lib/pytz/zoneinfo/Africa/Timbuktu delete mode 100644 lib/pytz/zoneinfo/Africa/Tripoli delete mode 100644 lib/pytz/zoneinfo/Africa/Tunis delete mode 100644 lib/pytz/zoneinfo/Africa/Windhoek delete mode 100644 lib/pytz/zoneinfo/America/Adak delete mode 100644 lib/pytz/zoneinfo/America/Anchorage delete mode 100644 lib/pytz/zoneinfo/America/Anguilla delete mode 100644 lib/pytz/zoneinfo/America/Antigua delete mode 100644 lib/pytz/zoneinfo/America/Araguaina delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Buenos_Aires delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Catamarca delete mode 100644 lib/pytz/zoneinfo/America/Argentina/ComodRivadavia delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Cordoba delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Jujuy delete mode 100644 lib/pytz/zoneinfo/America/Argentina/La_Rioja delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Mendoza delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Rio_Gallegos delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Salta delete mode 100644 lib/pytz/zoneinfo/America/Argentina/San_Juan delete mode 100644 lib/pytz/zoneinfo/America/Argentina/San_Luis delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Tucuman delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Ushuaia delete mode 100644 lib/pytz/zoneinfo/America/Aruba delete mode 100644 lib/pytz/zoneinfo/America/Asuncion delete mode 100644 lib/pytz/zoneinfo/America/Atikokan delete mode 100644 lib/pytz/zoneinfo/America/Atka delete mode 100644 lib/pytz/zoneinfo/America/Bahia delete mode 100644 lib/pytz/zoneinfo/America/Bahia_Banderas delete mode 100644 lib/pytz/zoneinfo/America/Barbados delete mode 100644 lib/pytz/zoneinfo/America/Belem delete mode 100644 lib/pytz/zoneinfo/America/Belize delete mode 100644 lib/pytz/zoneinfo/America/Blanc-Sablon delete mode 100644 lib/pytz/zoneinfo/America/Boa_Vista delete mode 100644 lib/pytz/zoneinfo/America/Bogota delete mode 100644 lib/pytz/zoneinfo/America/Boise delete mode 100644 lib/pytz/zoneinfo/America/Buenos_Aires delete mode 100644 lib/pytz/zoneinfo/America/Cambridge_Bay delete mode 100644 lib/pytz/zoneinfo/America/Campo_Grande delete mode 100644 lib/pytz/zoneinfo/America/Cancun delete mode 100644 lib/pytz/zoneinfo/America/Caracas delete mode 100644 lib/pytz/zoneinfo/America/Catamarca delete mode 100644 lib/pytz/zoneinfo/America/Cayenne delete mode 100644 lib/pytz/zoneinfo/America/Cayman delete mode 100644 lib/pytz/zoneinfo/America/Chicago delete mode 100644 lib/pytz/zoneinfo/America/Chihuahua delete mode 100644 lib/pytz/zoneinfo/America/Coral_Harbour delete mode 100644 lib/pytz/zoneinfo/America/Cordoba delete mode 100644 lib/pytz/zoneinfo/America/Costa_Rica delete mode 100644 lib/pytz/zoneinfo/America/Creston delete mode 100644 lib/pytz/zoneinfo/America/Cuiaba delete mode 100644 lib/pytz/zoneinfo/America/Curacao delete mode 100644 lib/pytz/zoneinfo/America/Danmarkshavn delete mode 100644 lib/pytz/zoneinfo/America/Dawson delete mode 100644 lib/pytz/zoneinfo/America/Dawson_Creek delete mode 100644 lib/pytz/zoneinfo/America/Denver delete mode 100644 lib/pytz/zoneinfo/America/Detroit delete mode 100644 lib/pytz/zoneinfo/America/Dominica delete mode 100644 lib/pytz/zoneinfo/America/Edmonton delete mode 100644 lib/pytz/zoneinfo/America/Eirunepe delete mode 100644 lib/pytz/zoneinfo/America/El_Salvador delete mode 100644 lib/pytz/zoneinfo/America/Ensenada delete mode 100644 lib/pytz/zoneinfo/America/Fort_Wayne delete mode 100644 lib/pytz/zoneinfo/America/Fortaleza delete mode 100644 lib/pytz/zoneinfo/America/Glace_Bay delete mode 100644 lib/pytz/zoneinfo/America/Godthab delete mode 100644 lib/pytz/zoneinfo/America/Goose_Bay delete mode 100644 lib/pytz/zoneinfo/America/Grand_Turk delete mode 100644 lib/pytz/zoneinfo/America/Grenada delete mode 100644 lib/pytz/zoneinfo/America/Guadeloupe delete mode 100644 lib/pytz/zoneinfo/America/Guatemala delete mode 100644 lib/pytz/zoneinfo/America/Guayaquil delete mode 100644 lib/pytz/zoneinfo/America/Guyana delete mode 100644 lib/pytz/zoneinfo/America/Halifax delete mode 100644 lib/pytz/zoneinfo/America/Havana delete mode 100644 lib/pytz/zoneinfo/America/Hermosillo delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Indianapolis delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Knox delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Marengo delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Petersburg delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Tell_City delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Vevay delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Vincennes delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Winamac delete mode 100644 lib/pytz/zoneinfo/America/Indianapolis delete mode 100644 lib/pytz/zoneinfo/America/Inuvik delete mode 100644 lib/pytz/zoneinfo/America/Iqaluit delete mode 100644 lib/pytz/zoneinfo/America/Jamaica delete mode 100644 lib/pytz/zoneinfo/America/Jujuy delete mode 100644 lib/pytz/zoneinfo/America/Juneau delete mode 100644 lib/pytz/zoneinfo/America/Kentucky/Louisville delete mode 100644 lib/pytz/zoneinfo/America/Kentucky/Monticello delete mode 100644 lib/pytz/zoneinfo/America/Knox_IN delete mode 100644 lib/pytz/zoneinfo/America/Kralendijk delete mode 100644 lib/pytz/zoneinfo/America/La_Paz delete mode 100644 lib/pytz/zoneinfo/America/Lima delete mode 100644 lib/pytz/zoneinfo/America/Los_Angeles delete mode 100644 lib/pytz/zoneinfo/America/Louisville delete mode 100644 lib/pytz/zoneinfo/America/Lower_Princes delete mode 100644 lib/pytz/zoneinfo/America/Maceio delete mode 100644 lib/pytz/zoneinfo/America/Managua delete mode 100644 lib/pytz/zoneinfo/America/Manaus delete mode 100644 lib/pytz/zoneinfo/America/Marigot delete mode 100644 lib/pytz/zoneinfo/America/Martinique delete mode 100644 lib/pytz/zoneinfo/America/Matamoros delete mode 100644 lib/pytz/zoneinfo/America/Mazatlan delete mode 100644 lib/pytz/zoneinfo/America/Mendoza delete mode 100644 lib/pytz/zoneinfo/America/Menominee delete mode 100644 lib/pytz/zoneinfo/America/Merida delete mode 100644 lib/pytz/zoneinfo/America/Metlakatla delete mode 100644 lib/pytz/zoneinfo/America/Mexico_City delete mode 100644 lib/pytz/zoneinfo/America/Miquelon delete mode 100644 lib/pytz/zoneinfo/America/Moncton delete mode 100644 lib/pytz/zoneinfo/America/Monterrey delete mode 100644 lib/pytz/zoneinfo/America/Montevideo delete mode 100644 lib/pytz/zoneinfo/America/Montreal delete mode 100644 lib/pytz/zoneinfo/America/Montserrat delete mode 100644 lib/pytz/zoneinfo/America/Nassau delete mode 100644 lib/pytz/zoneinfo/America/New_York delete mode 100644 lib/pytz/zoneinfo/America/Nipigon delete mode 100644 lib/pytz/zoneinfo/America/Nome delete mode 100644 lib/pytz/zoneinfo/America/Noronha delete mode 100644 lib/pytz/zoneinfo/America/North_Dakota/Beulah delete mode 100644 lib/pytz/zoneinfo/America/North_Dakota/Center delete mode 100644 lib/pytz/zoneinfo/America/North_Dakota/New_Salem delete mode 100644 lib/pytz/zoneinfo/America/Ojinaga delete mode 100644 lib/pytz/zoneinfo/America/Panama delete mode 100644 lib/pytz/zoneinfo/America/Pangnirtung delete mode 100644 lib/pytz/zoneinfo/America/Paramaribo delete mode 100644 lib/pytz/zoneinfo/America/Phoenix delete mode 100644 lib/pytz/zoneinfo/America/Port-au-Prince delete mode 100644 lib/pytz/zoneinfo/America/Port_of_Spain delete mode 100644 lib/pytz/zoneinfo/America/Porto_Acre delete mode 100644 lib/pytz/zoneinfo/America/Porto_Velho delete mode 100644 lib/pytz/zoneinfo/America/Puerto_Rico delete mode 100644 lib/pytz/zoneinfo/America/Rainy_River delete mode 100644 lib/pytz/zoneinfo/America/Rankin_Inlet delete mode 100644 lib/pytz/zoneinfo/America/Recife delete mode 100644 lib/pytz/zoneinfo/America/Regina delete mode 100644 lib/pytz/zoneinfo/America/Resolute delete mode 100644 lib/pytz/zoneinfo/America/Rio_Branco delete mode 100644 lib/pytz/zoneinfo/America/Rosario delete mode 100644 lib/pytz/zoneinfo/America/Santa_Isabel delete mode 100644 lib/pytz/zoneinfo/America/Santarem delete mode 100644 lib/pytz/zoneinfo/America/Santiago delete mode 100644 lib/pytz/zoneinfo/America/Santo_Domingo delete mode 100644 lib/pytz/zoneinfo/America/Sao_Paulo delete mode 100644 lib/pytz/zoneinfo/America/Scoresbysund delete mode 100644 lib/pytz/zoneinfo/America/Shiprock delete mode 100644 lib/pytz/zoneinfo/America/Sitka delete mode 100644 lib/pytz/zoneinfo/America/St_Barthelemy delete mode 100644 lib/pytz/zoneinfo/America/St_Johns delete mode 100644 lib/pytz/zoneinfo/America/St_Kitts delete mode 100644 lib/pytz/zoneinfo/America/St_Lucia delete mode 100644 lib/pytz/zoneinfo/America/St_Thomas delete mode 100644 lib/pytz/zoneinfo/America/St_Vincent delete mode 100644 lib/pytz/zoneinfo/America/Swift_Current delete mode 100644 lib/pytz/zoneinfo/America/Tegucigalpa delete mode 100644 lib/pytz/zoneinfo/America/Thule delete mode 100644 lib/pytz/zoneinfo/America/Thunder_Bay delete mode 100644 lib/pytz/zoneinfo/America/Tijuana delete mode 100644 lib/pytz/zoneinfo/America/Toronto delete mode 100644 lib/pytz/zoneinfo/America/Tortola delete mode 100644 lib/pytz/zoneinfo/America/Vancouver delete mode 100644 lib/pytz/zoneinfo/America/Virgin delete mode 100644 lib/pytz/zoneinfo/America/Whitehorse delete mode 100644 lib/pytz/zoneinfo/America/Winnipeg delete mode 100644 lib/pytz/zoneinfo/America/Yakutat delete mode 100644 lib/pytz/zoneinfo/America/Yellowknife delete mode 100644 lib/pytz/zoneinfo/Antarctica/Casey delete mode 100644 lib/pytz/zoneinfo/Antarctica/Davis delete mode 100644 lib/pytz/zoneinfo/Antarctica/DumontDUrville delete mode 100644 lib/pytz/zoneinfo/Antarctica/Macquarie delete mode 100644 lib/pytz/zoneinfo/Antarctica/Mawson delete mode 100644 lib/pytz/zoneinfo/Antarctica/McMurdo delete mode 100644 lib/pytz/zoneinfo/Antarctica/Palmer delete mode 100644 lib/pytz/zoneinfo/Antarctica/Rothera delete mode 100644 lib/pytz/zoneinfo/Antarctica/South_Pole delete mode 100644 lib/pytz/zoneinfo/Antarctica/Syowa delete mode 100644 lib/pytz/zoneinfo/Antarctica/Troll delete mode 100644 lib/pytz/zoneinfo/Antarctica/Vostok delete mode 100644 lib/pytz/zoneinfo/Arctic/Longyearbyen delete mode 100644 lib/pytz/zoneinfo/Asia/Aden delete mode 100644 lib/pytz/zoneinfo/Asia/Almaty delete mode 100644 lib/pytz/zoneinfo/Asia/Amman delete mode 100644 lib/pytz/zoneinfo/Asia/Anadyr delete mode 100644 lib/pytz/zoneinfo/Asia/Aqtau delete mode 100644 lib/pytz/zoneinfo/Asia/Aqtobe delete mode 100644 lib/pytz/zoneinfo/Asia/Ashgabat delete mode 100644 lib/pytz/zoneinfo/Asia/Ashkhabad delete mode 100644 lib/pytz/zoneinfo/Asia/Baghdad delete mode 100644 lib/pytz/zoneinfo/Asia/Bahrain delete mode 100644 lib/pytz/zoneinfo/Asia/Baku delete mode 100644 lib/pytz/zoneinfo/Asia/Bangkok delete mode 100644 lib/pytz/zoneinfo/Asia/Beirut delete mode 100644 lib/pytz/zoneinfo/Asia/Bishkek delete mode 100644 lib/pytz/zoneinfo/Asia/Brunei delete mode 100644 lib/pytz/zoneinfo/Asia/Calcutta delete mode 100644 lib/pytz/zoneinfo/Asia/Chita delete mode 100644 lib/pytz/zoneinfo/Asia/Choibalsan delete mode 100644 lib/pytz/zoneinfo/Asia/Chongqing delete mode 100644 lib/pytz/zoneinfo/Asia/Chungking delete mode 100644 lib/pytz/zoneinfo/Asia/Colombo delete mode 100644 lib/pytz/zoneinfo/Asia/Dacca delete mode 100644 lib/pytz/zoneinfo/Asia/Damascus delete mode 100644 lib/pytz/zoneinfo/Asia/Dhaka delete mode 100644 lib/pytz/zoneinfo/Asia/Dili delete mode 100644 lib/pytz/zoneinfo/Asia/Dubai delete mode 100644 lib/pytz/zoneinfo/Asia/Dushanbe delete mode 100644 lib/pytz/zoneinfo/Asia/Gaza delete mode 100644 lib/pytz/zoneinfo/Asia/Harbin delete mode 100644 lib/pytz/zoneinfo/Asia/Hebron delete mode 100644 lib/pytz/zoneinfo/Asia/Ho_Chi_Minh delete mode 100644 lib/pytz/zoneinfo/Asia/Hong_Kong delete mode 100644 lib/pytz/zoneinfo/Asia/Hovd delete mode 100644 lib/pytz/zoneinfo/Asia/Irkutsk delete mode 100644 lib/pytz/zoneinfo/Asia/Istanbul delete mode 100644 lib/pytz/zoneinfo/Asia/Jakarta delete mode 100644 lib/pytz/zoneinfo/Asia/Jayapura delete mode 100644 lib/pytz/zoneinfo/Asia/Jerusalem delete mode 100644 lib/pytz/zoneinfo/Asia/Kabul delete mode 100644 lib/pytz/zoneinfo/Asia/Kamchatka delete mode 100644 lib/pytz/zoneinfo/Asia/Karachi delete mode 100644 lib/pytz/zoneinfo/Asia/Kashgar delete mode 100644 lib/pytz/zoneinfo/Asia/Kathmandu delete mode 100644 lib/pytz/zoneinfo/Asia/Katmandu delete mode 100644 lib/pytz/zoneinfo/Asia/Khandyga delete mode 100644 lib/pytz/zoneinfo/Asia/Kolkata delete mode 100644 lib/pytz/zoneinfo/Asia/Krasnoyarsk delete mode 100644 lib/pytz/zoneinfo/Asia/Kuala_Lumpur delete mode 100644 lib/pytz/zoneinfo/Asia/Kuching delete mode 100644 lib/pytz/zoneinfo/Asia/Kuwait delete mode 100644 lib/pytz/zoneinfo/Asia/Macao delete mode 100644 lib/pytz/zoneinfo/Asia/Macau delete mode 100644 lib/pytz/zoneinfo/Asia/Magadan delete mode 100644 lib/pytz/zoneinfo/Asia/Makassar delete mode 100644 lib/pytz/zoneinfo/Asia/Manila delete mode 100644 lib/pytz/zoneinfo/Asia/Muscat delete mode 100644 lib/pytz/zoneinfo/Asia/Nicosia delete mode 100644 lib/pytz/zoneinfo/Asia/Novokuznetsk delete mode 100644 lib/pytz/zoneinfo/Asia/Novosibirsk delete mode 100644 lib/pytz/zoneinfo/Asia/Omsk delete mode 100644 lib/pytz/zoneinfo/Asia/Oral delete mode 100644 lib/pytz/zoneinfo/Asia/Phnom_Penh delete mode 100644 lib/pytz/zoneinfo/Asia/Pontianak delete mode 100644 lib/pytz/zoneinfo/Asia/Pyongyang delete mode 100644 lib/pytz/zoneinfo/Asia/Qatar delete mode 100644 lib/pytz/zoneinfo/Asia/Qyzylorda delete mode 100644 lib/pytz/zoneinfo/Asia/Rangoon delete mode 100644 lib/pytz/zoneinfo/Asia/Riyadh delete mode 100644 lib/pytz/zoneinfo/Asia/Saigon delete mode 100644 lib/pytz/zoneinfo/Asia/Sakhalin delete mode 100644 lib/pytz/zoneinfo/Asia/Samarkand delete mode 100644 lib/pytz/zoneinfo/Asia/Seoul delete mode 100644 lib/pytz/zoneinfo/Asia/Shanghai delete mode 100644 lib/pytz/zoneinfo/Asia/Singapore delete mode 100644 lib/pytz/zoneinfo/Asia/Srednekolymsk delete mode 100644 lib/pytz/zoneinfo/Asia/Taipei delete mode 100644 lib/pytz/zoneinfo/Asia/Tashkent delete mode 100644 lib/pytz/zoneinfo/Asia/Tbilisi delete mode 100644 lib/pytz/zoneinfo/Asia/Tehran delete mode 100644 lib/pytz/zoneinfo/Asia/Tel_Aviv delete mode 100644 lib/pytz/zoneinfo/Asia/Thimbu delete mode 100644 lib/pytz/zoneinfo/Asia/Thimphu delete mode 100644 lib/pytz/zoneinfo/Asia/Tokyo delete mode 100644 lib/pytz/zoneinfo/Asia/Ujung_Pandang delete mode 100644 lib/pytz/zoneinfo/Asia/Ulaanbaatar delete mode 100644 lib/pytz/zoneinfo/Asia/Ulan_Bator delete mode 100644 lib/pytz/zoneinfo/Asia/Urumqi delete mode 100644 lib/pytz/zoneinfo/Asia/Ust-Nera delete mode 100644 lib/pytz/zoneinfo/Asia/Vientiane delete mode 100644 lib/pytz/zoneinfo/Asia/Vladivostok delete mode 100644 lib/pytz/zoneinfo/Asia/Yakutsk delete mode 100644 lib/pytz/zoneinfo/Asia/Yekaterinburg delete mode 100644 lib/pytz/zoneinfo/Asia/Yerevan delete mode 100644 lib/pytz/zoneinfo/Atlantic/Azores delete mode 100644 lib/pytz/zoneinfo/Atlantic/Bermuda delete mode 100644 lib/pytz/zoneinfo/Atlantic/Canary delete mode 100644 lib/pytz/zoneinfo/Atlantic/Cape_Verde delete mode 100644 lib/pytz/zoneinfo/Atlantic/Faeroe delete mode 100644 lib/pytz/zoneinfo/Atlantic/Faroe delete mode 100644 lib/pytz/zoneinfo/Atlantic/Jan_Mayen delete mode 100644 lib/pytz/zoneinfo/Atlantic/Madeira delete mode 100644 lib/pytz/zoneinfo/Atlantic/Reykjavik delete mode 100644 lib/pytz/zoneinfo/Atlantic/South_Georgia delete mode 100644 lib/pytz/zoneinfo/Atlantic/St_Helena delete mode 100644 lib/pytz/zoneinfo/Atlantic/Stanley delete mode 100644 lib/pytz/zoneinfo/Australia/ACT delete mode 100644 lib/pytz/zoneinfo/Australia/Adelaide delete mode 100644 lib/pytz/zoneinfo/Australia/Brisbane delete mode 100644 lib/pytz/zoneinfo/Australia/Broken_Hill delete mode 100644 lib/pytz/zoneinfo/Australia/Canberra delete mode 100644 lib/pytz/zoneinfo/Australia/Currie delete mode 100644 lib/pytz/zoneinfo/Australia/Darwin delete mode 100644 lib/pytz/zoneinfo/Australia/Eucla delete mode 100644 lib/pytz/zoneinfo/Australia/Hobart delete mode 100644 lib/pytz/zoneinfo/Australia/LHI delete mode 100644 lib/pytz/zoneinfo/Australia/Lindeman delete mode 100644 lib/pytz/zoneinfo/Australia/Lord_Howe delete mode 100644 lib/pytz/zoneinfo/Australia/Melbourne delete mode 100644 lib/pytz/zoneinfo/Australia/NSW delete mode 100644 lib/pytz/zoneinfo/Australia/North delete mode 100644 lib/pytz/zoneinfo/Australia/Perth delete mode 100644 lib/pytz/zoneinfo/Australia/Queensland delete mode 100644 lib/pytz/zoneinfo/Australia/South delete mode 100644 lib/pytz/zoneinfo/Australia/Sydney delete mode 100644 lib/pytz/zoneinfo/Australia/Tasmania delete mode 100644 lib/pytz/zoneinfo/Australia/Victoria delete mode 100644 lib/pytz/zoneinfo/Australia/West delete mode 100644 lib/pytz/zoneinfo/Australia/Yancowinna delete mode 100644 lib/pytz/zoneinfo/Brazil/Acre delete mode 100644 lib/pytz/zoneinfo/Brazil/DeNoronha delete mode 100644 lib/pytz/zoneinfo/Brazil/East delete mode 100644 lib/pytz/zoneinfo/Brazil/West delete mode 100644 lib/pytz/zoneinfo/CET delete mode 100644 lib/pytz/zoneinfo/CST6CDT delete mode 100644 lib/pytz/zoneinfo/Canada/Atlantic delete mode 100644 lib/pytz/zoneinfo/Canada/Central delete mode 100644 lib/pytz/zoneinfo/Canada/East-Saskatchewan delete mode 100644 lib/pytz/zoneinfo/Canada/Eastern delete mode 100644 lib/pytz/zoneinfo/Canada/Mountain delete mode 100644 lib/pytz/zoneinfo/Canada/Newfoundland delete mode 100644 lib/pytz/zoneinfo/Canada/Pacific delete mode 100644 lib/pytz/zoneinfo/Canada/Saskatchewan delete mode 100644 lib/pytz/zoneinfo/Canada/Yukon delete mode 100644 lib/pytz/zoneinfo/Chile/Continental delete mode 100644 lib/pytz/zoneinfo/Chile/EasterIsland delete mode 100644 lib/pytz/zoneinfo/Cuba delete mode 100644 lib/pytz/zoneinfo/EET delete mode 100644 lib/pytz/zoneinfo/EST delete mode 100644 lib/pytz/zoneinfo/EST5EDT delete mode 100644 lib/pytz/zoneinfo/Egypt delete mode 100644 lib/pytz/zoneinfo/Eire delete mode 100644 lib/pytz/zoneinfo/Etc/GMT delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+0 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+1 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+10 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+11 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+12 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+2 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+3 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+4 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+5 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+6 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+7 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+8 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+9 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-0 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-1 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-10 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-11 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-12 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-13 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-14 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-2 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-3 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-4 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-5 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-6 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-7 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-8 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-9 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT0 delete mode 100644 lib/pytz/zoneinfo/Etc/Greenwich delete mode 100644 lib/pytz/zoneinfo/Etc/UCT delete mode 100644 lib/pytz/zoneinfo/Etc/UTC delete mode 100644 lib/pytz/zoneinfo/Etc/Universal delete mode 100644 lib/pytz/zoneinfo/Etc/Zulu delete mode 100644 lib/pytz/zoneinfo/Europe/Amsterdam delete mode 100644 lib/pytz/zoneinfo/Europe/Andorra delete mode 100644 lib/pytz/zoneinfo/Europe/Athens delete mode 100644 lib/pytz/zoneinfo/Europe/Belfast delete mode 100644 lib/pytz/zoneinfo/Europe/Belgrade delete mode 100644 lib/pytz/zoneinfo/Europe/Berlin delete mode 100644 lib/pytz/zoneinfo/Europe/Bratislava delete mode 100644 lib/pytz/zoneinfo/Europe/Brussels delete mode 100644 lib/pytz/zoneinfo/Europe/Bucharest delete mode 100644 lib/pytz/zoneinfo/Europe/Budapest delete mode 100644 lib/pytz/zoneinfo/Europe/Busingen delete mode 100644 lib/pytz/zoneinfo/Europe/Chisinau delete mode 100644 lib/pytz/zoneinfo/Europe/Copenhagen delete mode 100644 lib/pytz/zoneinfo/Europe/Dublin delete mode 100644 lib/pytz/zoneinfo/Europe/Gibraltar delete mode 100644 lib/pytz/zoneinfo/Europe/Guernsey delete mode 100644 lib/pytz/zoneinfo/Europe/Helsinki delete mode 100644 lib/pytz/zoneinfo/Europe/Isle_of_Man delete mode 100644 lib/pytz/zoneinfo/Europe/Istanbul delete mode 100644 lib/pytz/zoneinfo/Europe/Jersey delete mode 100644 lib/pytz/zoneinfo/Europe/Kaliningrad delete mode 100644 lib/pytz/zoneinfo/Europe/Kiev delete mode 100644 lib/pytz/zoneinfo/Europe/Lisbon delete mode 100644 lib/pytz/zoneinfo/Europe/Ljubljana delete mode 100644 lib/pytz/zoneinfo/Europe/London delete mode 100644 lib/pytz/zoneinfo/Europe/Luxembourg delete mode 100644 lib/pytz/zoneinfo/Europe/Madrid delete mode 100644 lib/pytz/zoneinfo/Europe/Malta delete mode 100644 lib/pytz/zoneinfo/Europe/Mariehamn delete mode 100644 lib/pytz/zoneinfo/Europe/Minsk delete mode 100644 lib/pytz/zoneinfo/Europe/Monaco delete mode 100644 lib/pytz/zoneinfo/Europe/Moscow delete mode 100644 lib/pytz/zoneinfo/Europe/Nicosia delete mode 100644 lib/pytz/zoneinfo/Europe/Oslo delete mode 100644 lib/pytz/zoneinfo/Europe/Paris delete mode 100644 lib/pytz/zoneinfo/Europe/Podgorica delete mode 100644 lib/pytz/zoneinfo/Europe/Prague delete mode 100644 lib/pytz/zoneinfo/Europe/Riga delete mode 100644 lib/pytz/zoneinfo/Europe/Rome delete mode 100644 lib/pytz/zoneinfo/Europe/Samara delete mode 100644 lib/pytz/zoneinfo/Europe/San_Marino delete mode 100644 lib/pytz/zoneinfo/Europe/Sarajevo delete mode 100644 lib/pytz/zoneinfo/Europe/Simferopol delete mode 100644 lib/pytz/zoneinfo/Europe/Skopje delete mode 100644 lib/pytz/zoneinfo/Europe/Sofia delete mode 100644 lib/pytz/zoneinfo/Europe/Stockholm delete mode 100644 lib/pytz/zoneinfo/Europe/Tallinn delete mode 100644 lib/pytz/zoneinfo/Europe/Tirane delete mode 100644 lib/pytz/zoneinfo/Europe/Tiraspol delete mode 100644 lib/pytz/zoneinfo/Europe/Uzhgorod delete mode 100644 lib/pytz/zoneinfo/Europe/Vaduz delete mode 100644 lib/pytz/zoneinfo/Europe/Vatican delete mode 100644 lib/pytz/zoneinfo/Europe/Vienna delete mode 100644 lib/pytz/zoneinfo/Europe/Vilnius delete mode 100644 lib/pytz/zoneinfo/Europe/Volgograd delete mode 100644 lib/pytz/zoneinfo/Europe/Warsaw delete mode 100644 lib/pytz/zoneinfo/Europe/Zagreb delete mode 100644 lib/pytz/zoneinfo/Europe/Zaporozhye delete mode 100644 lib/pytz/zoneinfo/Europe/Zurich delete mode 100644 lib/pytz/zoneinfo/Factory delete mode 100644 lib/pytz/zoneinfo/GB delete mode 100644 lib/pytz/zoneinfo/GB-Eire delete mode 100644 lib/pytz/zoneinfo/GMT delete mode 100644 lib/pytz/zoneinfo/GMT+0 delete mode 100644 lib/pytz/zoneinfo/GMT-0 delete mode 100644 lib/pytz/zoneinfo/GMT0 delete mode 100644 lib/pytz/zoneinfo/Greenwich delete mode 100644 lib/pytz/zoneinfo/HST delete mode 100644 lib/pytz/zoneinfo/Hongkong delete mode 100644 lib/pytz/zoneinfo/Iceland delete mode 100644 lib/pytz/zoneinfo/Indian/Antananarivo delete mode 100644 lib/pytz/zoneinfo/Indian/Chagos delete mode 100644 lib/pytz/zoneinfo/Indian/Christmas delete mode 100644 lib/pytz/zoneinfo/Indian/Cocos delete mode 100644 lib/pytz/zoneinfo/Indian/Comoro delete mode 100644 lib/pytz/zoneinfo/Indian/Kerguelen delete mode 100644 lib/pytz/zoneinfo/Indian/Mahe delete mode 100644 lib/pytz/zoneinfo/Indian/Maldives delete mode 100644 lib/pytz/zoneinfo/Indian/Mauritius delete mode 100644 lib/pytz/zoneinfo/Indian/Mayotte delete mode 100644 lib/pytz/zoneinfo/Indian/Reunion delete mode 100644 lib/pytz/zoneinfo/Iran delete mode 100644 lib/pytz/zoneinfo/Israel delete mode 100644 lib/pytz/zoneinfo/Jamaica delete mode 100644 lib/pytz/zoneinfo/Japan delete mode 100644 lib/pytz/zoneinfo/Kwajalein delete mode 100644 lib/pytz/zoneinfo/Libya delete mode 100644 lib/pytz/zoneinfo/MET delete mode 100644 lib/pytz/zoneinfo/MST delete mode 100644 lib/pytz/zoneinfo/MST7MDT delete mode 100644 lib/pytz/zoneinfo/Mexico/BajaNorte delete mode 100644 lib/pytz/zoneinfo/Mexico/BajaSur delete mode 100644 lib/pytz/zoneinfo/Mexico/General delete mode 100644 lib/pytz/zoneinfo/NZ delete mode 100644 lib/pytz/zoneinfo/NZ-CHAT delete mode 100644 lib/pytz/zoneinfo/Navajo delete mode 100644 lib/pytz/zoneinfo/PRC delete mode 100644 lib/pytz/zoneinfo/PST8PDT delete mode 100644 lib/pytz/zoneinfo/Pacific/Apia delete mode 100644 lib/pytz/zoneinfo/Pacific/Auckland delete mode 100644 lib/pytz/zoneinfo/Pacific/Bougainville delete mode 100644 lib/pytz/zoneinfo/Pacific/Chatham delete mode 100644 lib/pytz/zoneinfo/Pacific/Chuuk delete mode 100644 lib/pytz/zoneinfo/Pacific/Easter delete mode 100644 lib/pytz/zoneinfo/Pacific/Efate delete mode 100644 lib/pytz/zoneinfo/Pacific/Enderbury delete mode 100644 lib/pytz/zoneinfo/Pacific/Fakaofo delete mode 100644 lib/pytz/zoneinfo/Pacific/Fiji delete mode 100644 lib/pytz/zoneinfo/Pacific/Funafuti delete mode 100644 lib/pytz/zoneinfo/Pacific/Galapagos delete mode 100644 lib/pytz/zoneinfo/Pacific/Gambier delete mode 100644 lib/pytz/zoneinfo/Pacific/Guadalcanal delete mode 100644 lib/pytz/zoneinfo/Pacific/Guam delete mode 100644 lib/pytz/zoneinfo/Pacific/Honolulu delete mode 100644 lib/pytz/zoneinfo/Pacific/Johnston delete mode 100644 lib/pytz/zoneinfo/Pacific/Kiritimati delete mode 100644 lib/pytz/zoneinfo/Pacific/Kosrae delete mode 100644 lib/pytz/zoneinfo/Pacific/Kwajalein delete mode 100644 lib/pytz/zoneinfo/Pacific/Majuro delete mode 100644 lib/pytz/zoneinfo/Pacific/Marquesas delete mode 100644 lib/pytz/zoneinfo/Pacific/Midway delete mode 100644 lib/pytz/zoneinfo/Pacific/Nauru delete mode 100644 lib/pytz/zoneinfo/Pacific/Niue delete mode 100644 lib/pytz/zoneinfo/Pacific/Norfolk delete mode 100644 lib/pytz/zoneinfo/Pacific/Noumea delete mode 100644 lib/pytz/zoneinfo/Pacific/Pago_Pago delete mode 100644 lib/pytz/zoneinfo/Pacific/Palau delete mode 100644 lib/pytz/zoneinfo/Pacific/Pitcairn delete mode 100644 lib/pytz/zoneinfo/Pacific/Pohnpei delete mode 100644 lib/pytz/zoneinfo/Pacific/Ponape delete mode 100644 lib/pytz/zoneinfo/Pacific/Port_Moresby delete mode 100644 lib/pytz/zoneinfo/Pacific/Rarotonga delete mode 100644 lib/pytz/zoneinfo/Pacific/Saipan delete mode 100644 lib/pytz/zoneinfo/Pacific/Samoa delete mode 100644 lib/pytz/zoneinfo/Pacific/Tahiti delete mode 100644 lib/pytz/zoneinfo/Pacific/Tarawa delete mode 100644 lib/pytz/zoneinfo/Pacific/Tongatapu delete mode 100644 lib/pytz/zoneinfo/Pacific/Truk delete mode 100644 lib/pytz/zoneinfo/Pacific/Wake delete mode 100644 lib/pytz/zoneinfo/Pacific/Wallis delete mode 100644 lib/pytz/zoneinfo/Pacific/Yap delete mode 100644 lib/pytz/zoneinfo/Poland delete mode 100644 lib/pytz/zoneinfo/Portugal delete mode 100644 lib/pytz/zoneinfo/ROC delete mode 100644 lib/pytz/zoneinfo/ROK delete mode 100644 lib/pytz/zoneinfo/Singapore delete mode 100644 lib/pytz/zoneinfo/Turkey delete mode 100644 lib/pytz/zoneinfo/UCT delete mode 100644 lib/pytz/zoneinfo/US/Alaska delete mode 100644 lib/pytz/zoneinfo/US/Aleutian delete mode 100644 lib/pytz/zoneinfo/US/Arizona delete mode 100644 lib/pytz/zoneinfo/US/Central delete mode 100644 lib/pytz/zoneinfo/US/East-Indiana delete mode 100644 lib/pytz/zoneinfo/US/Eastern delete mode 100644 lib/pytz/zoneinfo/US/Hawaii delete mode 100644 lib/pytz/zoneinfo/US/Indiana-Starke delete mode 100644 lib/pytz/zoneinfo/US/Michigan delete mode 100644 lib/pytz/zoneinfo/US/Mountain delete mode 100644 lib/pytz/zoneinfo/US/Pacific delete mode 100644 lib/pytz/zoneinfo/US/Pacific-New delete mode 100644 lib/pytz/zoneinfo/US/Samoa delete mode 100644 lib/pytz/zoneinfo/UTC delete mode 100644 lib/pytz/zoneinfo/Universal delete mode 100644 lib/pytz/zoneinfo/W-SU delete mode 100644 lib/pytz/zoneinfo/WET delete mode 100644 lib/pytz/zoneinfo/Zulu delete mode 100644 lib/pytz/zoneinfo/iso3166.tab delete mode 100644 lib/pytz/zoneinfo/localtime delete mode 100644 lib/pytz/zoneinfo/posixrules delete mode 100644 lib/pytz/zoneinfo/zone.tab delete mode 100644 lib/pytz/zoneinfo/zone1970.tab delete mode 100644 lib/requests/__init__.py delete mode 100644 lib/requests/adapters.py delete mode 100644 lib/requests/api.py delete mode 100644 lib/requests/auth.py delete mode 100644 lib/requests/cacert.pem delete mode 100644 lib/requests/certs.py delete mode 100644 lib/requests/compat.py delete mode 100644 lib/requests/cookies.py delete mode 100644 lib/requests/exceptions.py delete mode 100644 lib/requests/hooks.py delete mode 100644 lib/requests/models.py delete mode 100644 lib/requests/packages/__init__.py delete mode 100644 lib/requests/packages/chardet/__init__.py delete mode 100644 lib/requests/packages/chardet/big5freq.py delete mode 100644 lib/requests/packages/chardet/big5prober.py delete mode 100644 lib/requests/packages/chardet/chardetect.py delete mode 100644 lib/requests/packages/chardet/chardistribution.py delete mode 100644 lib/requests/packages/chardet/charsetgroupprober.py delete mode 100644 lib/requests/packages/chardet/charsetprober.py delete mode 100644 lib/requests/packages/chardet/codingstatemachine.py delete mode 100644 lib/requests/packages/chardet/compat.py delete mode 100644 lib/requests/packages/chardet/constants.py delete mode 100644 lib/requests/packages/chardet/cp949prober.py delete mode 100644 lib/requests/packages/chardet/escprober.py delete mode 100644 lib/requests/packages/chardet/escsm.py delete mode 100644 lib/requests/packages/chardet/eucjpprober.py delete mode 100644 lib/requests/packages/chardet/euckrfreq.py delete mode 100644 lib/requests/packages/chardet/euckrprober.py delete mode 100644 lib/requests/packages/chardet/euctwfreq.py delete mode 100644 lib/requests/packages/chardet/euctwprober.py delete mode 100644 lib/requests/packages/chardet/gb2312freq.py delete mode 100644 lib/requests/packages/chardet/gb2312prober.py delete mode 100644 lib/requests/packages/chardet/hebrewprober.py delete mode 100644 lib/requests/packages/chardet/jisfreq.py delete mode 100644 lib/requests/packages/chardet/jpcntx.py delete mode 100644 lib/requests/packages/chardet/langbulgarianmodel.py delete mode 100644 lib/requests/packages/chardet/langcyrillicmodel.py delete mode 100644 lib/requests/packages/chardet/langgreekmodel.py delete mode 100644 lib/requests/packages/chardet/langhebrewmodel.py delete mode 100644 lib/requests/packages/chardet/langhungarianmodel.py delete mode 100644 lib/requests/packages/chardet/langthaimodel.py delete mode 100644 lib/requests/packages/chardet/latin1prober.py delete mode 100644 lib/requests/packages/chardet/mbcharsetprober.py delete mode 100644 lib/requests/packages/chardet/mbcsgroupprober.py delete mode 100644 lib/requests/packages/chardet/mbcssm.py delete mode 100644 lib/requests/packages/chardet/sbcharsetprober.py delete mode 100644 lib/requests/packages/chardet/sbcsgroupprober.py delete mode 100644 lib/requests/packages/chardet/sjisprober.py delete mode 100644 lib/requests/packages/chardet/universaldetector.py delete mode 100644 lib/requests/packages/chardet/utf8prober.py delete mode 100644 lib/requests/packages/urllib3/__init__.py delete mode 100644 lib/requests/packages/urllib3/_collections.py delete mode 100644 lib/requests/packages/urllib3/connection.py delete mode 100644 lib/requests/packages/urllib3/connectionpool.py delete mode 100644 lib/requests/packages/urllib3/contrib/__init__.py delete mode 100644 lib/requests/packages/urllib3/contrib/ntlmpool.py delete mode 100644 lib/requests/packages/urllib3/contrib/pyopenssl.py delete mode 100644 lib/requests/packages/urllib3/exceptions.py delete mode 100644 lib/requests/packages/urllib3/fields.py delete mode 100644 lib/requests/packages/urllib3/filepost.py delete mode 100644 lib/requests/packages/urllib3/packages/__init__.py delete mode 100644 lib/requests/packages/urllib3/packages/ordered_dict.py delete mode 100644 lib/requests/packages/urllib3/packages/six.py delete mode 100644 lib/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py delete mode 100644 lib/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py delete mode 100644 lib/requests/packages/urllib3/poolmanager.py delete mode 100644 lib/requests/packages/urllib3/request.py delete mode 100644 lib/requests/packages/urllib3/response.py delete mode 100644 lib/requests/packages/urllib3/util/__init__.py delete mode 100644 lib/requests/packages/urllib3/util/connection.py delete mode 100644 lib/requests/packages/urllib3/util/request.py delete mode 100644 lib/requests/packages/urllib3/util/response.py delete mode 100644 lib/requests/packages/urllib3/util/retry.py delete mode 100644 lib/requests/packages/urllib3/util/ssl_.py delete mode 100644 lib/requests/packages/urllib3/util/timeout.py delete mode 100644 lib/requests/packages/urllib3/util/url.py delete mode 100644 lib/requests/sessions.py delete mode 100644 lib/requests/status_codes.py delete mode 100644 lib/requests/structures.py delete mode 100644 lib/requests/utils.py delete mode 100644 lib/simplejson/__init__.py delete mode 100644 lib/simplejson/decoder.py delete mode 100644 lib/simplejson/encoder.py delete mode 100644 lib/simplejson/ordered_dict.py delete mode 100644 lib/simplejson/scanner.py delete mode 100644 lib/simplejson/tests/__init__.py delete mode 100644 lib/simplejson/tests/test_bigint_as_string.py delete mode 100644 lib/simplejson/tests/test_check_circular.py delete mode 100644 lib/simplejson/tests/test_decimal.py delete mode 100644 lib/simplejson/tests/test_decode.py delete mode 100644 lib/simplejson/tests/test_default.py delete mode 100644 lib/simplejson/tests/test_dump.py delete mode 100644 lib/simplejson/tests/test_encode_basestring_ascii.py delete mode 100644 lib/simplejson/tests/test_encode_for_html.py delete mode 100644 lib/simplejson/tests/test_errors.py delete mode 100644 lib/simplejson/tests/test_fail.py delete mode 100644 lib/simplejson/tests/test_float.py delete mode 100644 lib/simplejson/tests/test_indent.py delete mode 100644 lib/simplejson/tests/test_namedtuple.py delete mode 100644 lib/simplejson/tests/test_pass1.py delete mode 100644 lib/simplejson/tests/test_pass2.py delete mode 100644 lib/simplejson/tests/test_pass3.py delete mode 100644 lib/simplejson/tests/test_recursion.py delete mode 100644 lib/simplejson/tests/test_scanstring.py delete mode 100644 lib/simplejson/tests/test_separators.py delete mode 100644 lib/simplejson/tests/test_speedups.py delete mode 100644 lib/simplejson/tests/test_tuple.py delete mode 100644 lib/simplejson/tests/test_unicode.py delete mode 100644 lib/simplejson/tool.py delete mode 100644 lib/six.py delete mode 100644 lib/tests/__init__.py delete mode 100644 lib/tests/test_cli.py delete mode 100644 lib/tzlocal/LICENSE.txt delete mode 100644 lib/tzlocal/README.rst delete mode 100644 lib/tzlocal/__init__.py delete mode 100644 lib/tzlocal/darwin.py delete mode 100644 lib/tzlocal/tests.py delete mode 100644 lib/tzlocal/unix.py delete mode 100644 lib/tzlocal/win32.py delete mode 100644 lib/tzlocal/windows_tz.py create mode 100644 requirements.txt diff --git a/Mylar.py b/Mylar.py index b4b86877..4b9354b4 100755 --- a/Mylar.py +++ b/Mylar.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # This file is part of Mylar. # # Mylar is free software: you can redistribute it and/or modify @@ -15,6 +14,7 @@ # along with Mylar. If not, see . import os, sys, locale +import argparse import errno import shutil import time @@ -63,9 +63,9 @@ def main(): mylar.SYS_ENCODING = 'UTF-8' if not logger.LOG_LANG.startswith('en'): - print 'language detected as non-English (%s). Forcing specific logging module - errors WILL NOT be captured in the logs' % logger.LOG_LANG + print('language detected as non-English (%s). Forcing specific logging module - errors WILL NOT be captured in the logs' % logger.LOG_LANG) else: - print 'log language set to %s' % logger.LOG_LANG + print('log language set to %s' % logger.LOG_LANG) # Set up and gather command line arguments parser = argparse.ArgumentParser(description='Automated Comic Book Downloader') @@ -96,25 +96,25 @@ def main(): if args.maintenance: if all([args.exportjson is None, args.importdatabase is None, args.importjson is None, args.importstatus is False, args.update is False, args.fixslashes is False]): - print 'Expecting subcommand with the maintenance positional argumeent' + print('Expecting subcommand with the maintenance positional argumeent') sys.exit() mylar.MAINTENANCE = True else: mylar.MAINTENANCE = False if args.verbose: - print 'Verbose/Debugging mode enabled...' + print('Verbose/Debugging mode enabled...') mylar.LOG_LEVEL = 2 elif args.quiet: mylar.QUIET = True - print 'Quiet logging mode enabled...' + print('Quiet logging mode enabled...') mylar.LOG_LEVEL = 0 else: mylar.LOG_LEVEL = 1 if args.daemon: if sys.platform == 'win32': - print "Daemonize not supported under Windows, starting normally" + print("Daemonize not supported under Windows, starting normally") else: mylar.DAEMON = True @@ -130,7 +130,7 @@ def main(): mylar.CREATEPID = True try: file(mylar.PIDFILE, 'w').write("pid\n") - except IOError, e: + except IOError as e: raise SystemExit("Unable to write PID file: %s [%d]" % (e.strerror, e.errno)) else: print("Not running in daemon mode. PID file creation disabled.") @@ -177,15 +177,15 @@ def main(): # backup the db and configs before they load. if args.backup: - print '[AUTO-BACKUP] Backing up .db and config.ini files for safety.' + print('[AUTO-BACKUP] Backing up .db and config.ini files for safety.') backupdir = os.path.join(mylar.DATA_DIR, 'backup') try: os.makedirs(backupdir) - print '[AUTO-BACKUP] Directory does not exist for backup - creating : ' + backupdir + print('[AUTO-BACKUP] Directory does not exist for backup - creating : ' + backupdir) except OSError as exception: if exception.errno != errno.EEXIST: - print '[AUTO-BACKUP] Directory already exists.' + print('[AUTO-BACKUP] Directory already exists.') raise i = 0 @@ -200,14 +200,14 @@ def main(): back_1 = os.path.join(backupdir, 'config.ini.1') try: - print '[AUTO-BACKUP] Now Backing up mylar.db file' + print('[AUTO-BACKUP] Now Backing up mylar.db file') if os.path.isfile(back_1): - print '[AUTO-BACKUP] ' + back_1 + ' exists. Deleting and keeping new.' + print('[AUTO-BACKUP] ' + back_1 + ' exists. Deleting and keeping new.') os.remove(back_1) if os.path.isfile(back): - print '[AUTO-BACKUP] Now renaming ' + back + ' to ' + back_1 + print('[AUTO-BACKUP] Now renaming ' + back + ' to ' + back_1) shutil.move(back, back_1) - print '[AUTO-BACKUP] Now copying db file to ' + back + print('[AUTO-BACKUP] Now copying db file to ' + back) shutil.copy(ogfile, back) except OSError as exception: @@ -282,7 +282,7 @@ def main(): http_port = int(mylar.CONFIG.HTTP_PORT) # Check if pyOpenSSL is installed. It is required for certificate generation - # and for CherryPy. + # and for cherrypy. if mylar.CONFIG.ENABLE_HTTPS: try: import OpenSSL diff --git a/README.md b/README.md index 82591ea8..52e2ce47 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ ## ![Mylar Logo](https://github.com/evilhero/mylar/blob/master/data/images/mylarlogo.png) Mylar +## The PY3 branch is a WIP. +## Some things will be completely broken, while other parts may work fine or partially. +## Do not use this branch unless you're willing and able to fix things. + Mylar is an automated Comic Book (cbr/cbz) downloader program for use with NZB and torrents written in python. It supports SABnzbd, NZBGET, and many torrent clients in addition to DDL. It will allow you to monitor weekly pull-lists for items belonging to user-specific series to download, as well as being able to monitor story-arcs. Support for TPB's and GN's is also now available. diff --git a/comictagger.py b/comictagger.py index 1932b46f..bc28772f 100755 --- a/comictagger.py +++ b/comictagger.py @@ -1,5 +1,7 @@ #!/usr/bin/env python from lib.comictaggerlib.main import ctmain +#from comictaggerlib.main import ctmain +#from mylar import logger if __name__ == '__main__': ctmain() diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index 6e467066..7b78249e 100644 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -999,7 +999,7 @@ <% porder = [] - for k,v in sorted(mylar.CONFIG.PROVIDER_ORDER.iteritems(), key=itemgetter(0), reverse=False): + for k,v in sorted(mylar.CONFIG.PROVIDER_ORDER.items(), key=itemgetter(0), reverse=False): porder.append(v) porder = ', '.join(porder) %> diff --git a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/PKG-INFO b/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/PKG-INFO deleted file mode 100755 index 45984f9b..00000000 --- a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/PKG-INFO +++ /dev/null @@ -1,240 +0,0 @@ -Metadata-Version: 1.1 -Name: ConcurrentLogHandler -Version: 0.9.1 -Summary: Concurrent logging handler (drop-in replacement for RotatingFileHandler) -Home-page: http://launchpad.net/python-concurrent-log-handler -Author: Lowell Alleman -Author-email: lowell87@gmail.com -License: http://www.apache.org/licenses/LICENSE-2.0 -Description: - Overview - ======== - This module provides an additional log handler for Python's standard logging - package (PEP 282). This handler will write log events to log file which is - rotated when the log file reaches a certain size. Multiple processes can - safely write to the same log file concurrently. - - Details - ======= - .. _portalocker: http://code.activestate.com/recipes/65203/ - - The ``ConcurrentRotatingFileHandler`` class is a drop-in replacement for - Python's standard log handler ``RotatingFileHandler``. This module uses file - locking so that multiple processes can concurrently log to a single file without - dropping or clobbering log events. This module provides a file rotation scheme - like with ``RotatingFileHanler``. Extra care is taken to ensure that logs - can be safely rotated before the rotation process is started. (This module works - around the file rename issue with ``RotatingFileHandler`` on Windows, where a - rotation failure means that all subsequent log events are dropped). - - This module attempts to preserve log records at all cost. This means that log - files will grow larger than the specified maximum (rotation) size. So if disk - space is tight, you may want to stick with ``RotatingFileHandler``, which will - strictly adhere to the maximum file size. - - If you have multiple instances of a script (or multiple scripts) all running at - the same time and writing to the same log file, then *all* of the scripts should - be using ``ConcurrentRotatingFileHandler``. You should not attempt to mix - and match ``RotatingFileHandler`` and ``ConcurrentRotatingFileHandler``. - - This package bundles `portalocker`_ to deal with file locking. Please be aware - that portalocker only supports Unix (posix) an NT platforms at this time, and - therefore this package only supports those platforms as well. - - Installation - ============ - Use the following command to install this package:: - - pip install ConcurrentLogHandler - - If you are installing from source, you can use:: - - python setup.py install - - - Examples - ======== - - Simple Example - -------------- - Here is a example demonstrating how to use this module directly (from within - Python code):: - - from logging import getLogger, INFO - from cloghandler import ConcurrentRotatingFileHandler - import os - - log = getLogger() - # Use an absolute path to prevent file rotation trouble. - logfile = os.path.abspath("mylogfile.log") - # Rotate log after reaching 512K, keep 5 old copies. - rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 512*1024, 5) - log.addHandler(rotateHandler) - log.setLevel(INFO) - - log.info("Here is a very exciting log message, just for you") - - - Automatic fallback example - -------------------------- - If you are distributing your code and you are unsure if the - `ConcurrentLogHandler` package has been installed everywhere your code will run, - Python makes it easy to gracefully fallback to the built in - `RotatingFileHandler`, here is an example:: - - try: - from cloghandler import ConcurrentRotatingFileHandler as RFHandler - except ImportError: - # Next 2 lines are optional: issue a warning to the user - from warnings import warn - warn("ConcurrentLogHandler package not installed. Using builtin log handler") - from logging.handlers import RotatingFileHandler as RFHandler - - log = getLogger() - rotateHandler = RFHandler("/path/to/mylogfile.log", "a", 1048576, 15) - log.addHandler(rotateHandler) - - - - Config file example - ------------------- - This example shows you how to use this log handler with the logging config file - parser. This allows you to keep your logging configuration code separate from - your application code. - - Example config file: ``logging.ini``:: - - [loggers] - keys=root - - [handlers] - keys=hand01 - - [formatters] - keys=form01 - - [logger_root] - level=NOTSET - handlers=hand01 - - [handler_hand01] - class=handlers.ConcurrentRotatingFileHandler - level=NOTSET - formatter=form01 - args=("rotating.log", "a", 512*1024, 5) - - [formatter_form01] - format=%(asctime)s %(levelname)s %(message)s - - Example Python code: ``app.py``:: - - import logging, logging.config - import cloghandler - - logging.config.fileConfig("logging.ini") - log = logging.getLogger() - log.info("Here is a very exciting log message, just for you") - - - Change Log - ========== - - .. _Red Hat Bug #858912: https://bugzilla.redhat.com/show_bug.cgi?id=858912 - .. _Python Bug #15960: http://bugs.python.org/issue15960 - .. _LP Bug 1199332: https://bugs.launchpad.net/python-concurrent-log-handler/+bug/1199332 - .. _LP Bug 1199333: https://bugs.launchpad.net/python-concurrent-log-handler/+bug/1199333 - - - - 0.9.1: Bug fixes - `LP Bug 1199332`_ and `LP Bug 1199333`_. - * More gracefully handle out of disk space scenarios. Prevent release() from - throwing an exception. - * Handle logging.shutdown() in Python 2.7+. Close the lock file stream via - close(). - * Big thanks to Dan Callaghan for forwarding these issues and patches. - - - 0.9.0: Now requires Python 2.6+ - * Revamp file opening/closing and file-locking internals (inspired by - feedback from Vinay Sajip.) - * Add the 'delay' parameter (delayed log file opening) to better match the - core logging functionality in more recent version of Python. - * For anyone still using Python 2.3-2.5, please use the latest 0.8.x release - - - 0.8.6: Fixed packaging bug with test script - * Fix a small packaging bug from the 0.8.5 release. (Thanks to Björn Häuser - for bringing this to my attention.) - * Updated stresstest.py to always use the correct python version when - launching sub-processes instead of the system's default "python". - - - 0.8.5: Fixed ValueError: I/O operation on closed file - * Thanks to Vince Carney, Arif Kasim, Matt Drew, Nick Coghlan, and - Dan Callaghan for bug reports. Bugs can now be filled here: - https://bugs.launchpad.net/python-concurrent-log-handler. Bugs resolved - `Red Hat Bug #858912`_ and `Python Bug #15960`_ - * Updated ez_setup.py to 0.7.7 - * Updated portalocker to 0.3 (now maintained by Rick van Hattem) - * Initial Python 3 support (needs more testing) - * Fixed minor spelling mistakes - - - 0.8.4: Fixed lock-file naming issue - * Resolved a minor issue where lock-files would be improperly named if the - log file contained ".log" in the middle of the log name. For example, if - you log file was "/var/log/mycompany.logging.mysource.log", the lock file - would be named "/var/log/mycompany.ging.mysource.lock", which is not correct. - Thanks to Dirk Rothe for pointing this out. Since this introduce a slight - lock-file behavior difference, make sure all concurrent writers are updated - to 0.8.4 at the same time if this issue effects you. - * Updated ez_setup.py to 0.6c11 - - - 0.8.3: Fixed a log file rotation bug and updated docs - * Fixed a bug that happens after log rotation when multiple processes are - witting to the same log file. Each process ends up writing to their own - log file ("log.1" or "log.2" instead of "log"). The fix is simply to reopen - the log file and check the size again. I do not believe this bug results in - data loss; however, this certainly was not the desired behavior. (A big - thanks goes to Oliver Tonnhofer for finding, documenting, and providing a - patch for this bug.) - * Cleanup the docs. (aka "the page you are reading right now") I fixed some - silly mistakes and typos... who writes this stuff? - - - 0.8.2: Minor bug fix release (again) - * Found and resolved another issue with older logging packages that do not - support encoding. - - - 0.8.1: Minor bug fix release - * Now importing "codecs" directly; I found some slight differences in the - logging module in different Python 2.4.x releases that caused the module to - fail to load. - - - 0.8.0: Minor feature release - * Add better support for using ``logging.config.fileConfig()``. This class - is now available using ``class=handlers.ConcurrentRotatingFileHandler``. - * Minor changes in how the ``filename`` parameter is handled when given a - relative path. - - - 0.7.4: Minor bug fix - * Fixed a typo in the package description (incorrect class name) - * Added a change log; which you are reading now. - * Fixed the ``close()`` method to no longer assume that stream is still - open. - - To-do - ===== - * This module has had minimal testing in a multi-threaded process. I see no - reason why this should be an issue, but no stress-testing has been done in a - threaded situation. If this is important to you, you could always add - threading support to the ``stresstest.py`` script and send me the patch. - - -Keywords: logging,windows,linux,unix,rotate,portalocker -Platform: nt -Platform: posix -Classifier: Development Status :: 4 - Beta -Classifier: Topic :: System :: Logging -Classifier: Operating System :: POSIX -Classifier: Operating System :: Microsoft :: Windows -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: License :: OSI Approved :: Apache Software License diff --git a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/SOURCES.txt b/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/SOURCES.txt deleted file mode 100755 index 02eee052..00000000 --- a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/SOURCES.txt +++ /dev/null @@ -1,16 +0,0 @@ -.bzrignore -LICENSE -README -do_release.sh -ez_setup.py -pre_commit.sh -setup.cfg -setup.py -stresstest.py -src/cloghandler.py -src/portalocker.py -src/ConcurrentLogHandler.egg-info/PKG-INFO -src/ConcurrentLogHandler.egg-info/SOURCES.txt -src/ConcurrentLogHandler.egg-info/dependency_links.txt -src/ConcurrentLogHandler.egg-info/top_level.txt -src/ConcurrentLogHandler.egg-info/zip-safe \ No newline at end of file diff --git a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/dependency_links.txt b/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/dependency_links.txt deleted file mode 100755 index 8b137891..00000000 --- a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/installed-files.txt b/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/installed-files.txt deleted file mode 100755 index 936d185c..00000000 --- a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/installed-files.txt +++ /dev/null @@ -1,13 +0,0 @@ -../cloghandler.py -../portalocker.py -../cloghandler.pyc -../portalocker.pyc -../../../../tests/stresstest.py -../../../../docs/README -../../../../docs/LICENSE -./ -SOURCES.txt -zip-safe -PKG-INFO -dependency_links.txt -top_level.txt diff --git a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/top_level.txt b/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/top_level.txt deleted file mode 100755 index 4e964b7c..00000000 --- a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/top_level.txt +++ /dev/null @@ -1,2 +0,0 @@ -cloghandler -portalocker diff --git a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/zip-safe b/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/zip-safe deleted file mode 100755 index 8b137891..00000000 --- a/lib/ConcurrentLogHandler/ConcurrentLogHandler-0.9.1-py2.7.egg-info/zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/lib/ConcurrentLogHandler/cloghandler.py b/lib/ConcurrentLogHandler/cloghandler.py deleted file mode 100755 index a4df08a9..00000000 --- a/lib/ConcurrentLogHandler/cloghandler.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright 2013 Lowell Alleman -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" cloghandler.py: A smart replacement for the standard RotatingFileHandler - -ConcurrentRotatingFileHandler: This class is a log handler which is a drop-in -replacement for the python standard log handler 'RotateFileHandler', the primary -difference being that this handler will continue to write to the same file if -the file cannot be rotated for some reason, whereas the RotatingFileHandler will -strictly adhere to the maximum file size. Unfortunately, if you are using the -RotatingFileHandler on Windows, you will find that once an attempted rotation -fails, all subsequent log messages are dropped. The other major advantage of -this module is that multiple processes can safely write to a single log file. - -To put it another way: This module's top priority is preserving your log -records, whereas the standard library attempts to limit disk usage, which can -potentially drop log messages. If you are trying to determine which module to -use, there are number of considerations: What is most important: strict disk -space usage or preservation of log messages? What OSes are you supporting? Can -you afford to have processes blocked by file locks? - -Concurrent access is handled by using file locks, which should ensure that log -messages are not dropped or clobbered. This means that a file lock is acquired -and released for every log message that is written to disk. (On Windows, you may -also run into a temporary situation where the log file must be opened and closed -for each log message.) This can have potentially performance implications. In my -testing, performance was more than adequate, but if you need a high-volume or -low-latency solution, I suggest you look elsewhere. - -This module currently only support the 'nt' and 'posix' platforms due to the -usage of the portalocker module. I do not have access to any other platforms -for testing, patches are welcome. - -See the README file for an example usage of this module. - -This module supports Python 2.6 and later. - -""" - - -__version__ = '0.9.1' -__revision__ = 'lowell87@gmail.com-20130711022321-doutxl7zyzuwss5a 2013-07-10 22:23:21 -0400 [0]' -__author__ = "Lowell Alleman" -__all__ = [ - "ConcurrentRotatingHandler", -] - - -import os -import sys -from random import randint -from logging import Handler, LogRecord -from logging.handlers import BaseRotatingHandler - -try: - import codecs -except ImportError: - codecs = None - - - -# Question/TODO: Should we have a fallback mode if we can't load portalocker / -# we should still be better off than with the standard RotattingFileHandler -# class, right? We do some rename checking... that should prevent some file -# clobbering that the builtin class allows. - -# sibling module than handles all the ugly platform-specific details of file locking -from portalocker import lock, unlock, LOCK_EX, LOCK_NB, LockException - - -# Workaround for handleError() in Python 2.7+ where record is written to stderr -class NullLogRecord(LogRecord): - def __init__(self): - pass - def __getattr__(self, attr): - return None - -class ConcurrentRotatingFileHandler(BaseRotatingHandler): - """ - Handler for logging to a set of files, which switches from one file to the - next when the current file reaches a certain size. Multiple processes can - write to the log file concurrently, but this may mean that the file will - exceed the given size. - """ - def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, - encoding=None, debug=True, delay=0): - """ - Open the specified file and use it as the stream for logging. - - By default, the file grows indefinitely. You can specify particular - values of maxBytes and backupCount to allow the file to rollover at - a predetermined size. - - Rollover occurs whenever the current log file is nearly maxBytes in - length. If backupCount is >= 1, the system will successively create - new files with the same pathname as the base file, but with extensions - ".1", ".2" etc. appended to it. For example, with a backupCount of 5 - and a base file name of "app.log", you would get "app.log", - "app.log.1", "app.log.2", ... through to "app.log.5". The file being - written to is always "app.log" - when it gets filled up, it is closed - and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. - exist, then they are renamed to "app.log.2", "app.log.3" etc. - respectively. - - If maxBytes is zero, rollover never occurs. - - On Windows, it is not possible to rename a file that is currently opened - by another process. This means that it is not possible to rotate the - log files if multiple processes is using the same log file. In this - case, the current log file will continue to grow until the rotation can - be completed successfully. In order for rotation to be possible, all of - the other processes need to close the file first. A mechanism, called - "degraded" mode, has been created for this scenario. In degraded mode, - the log file is closed after each log message is written. So once all - processes have entered degraded mode, the net rotation attempt should - be successful and then normal logging can be resumed. Using the 'delay' - parameter may help reduce contention in some usage patterns. - - This log handler assumes that all concurrent processes logging to a - single file will are using only this class, and that the exact same - parameters are provided to each instance of this class. If, for - example, two different processes are using this class, but with - different values for 'maxBytes' or 'backupCount', then odd behavior is - expected. The same is true if this class is used by one application, but - the RotatingFileHandler is used by another. - """ - # Absolute file name handling done by FileHandler since Python 2.5 - BaseRotatingHandler.__init__(self, filename, mode, encoding, delay) - self.delay = delay - self._rotateFailed = False - self.maxBytes = maxBytes - self.backupCount = backupCount - self._open_lockfile() - # For debug mode, swap out the "_degrade()" method with a more a verbose one. - if debug: - self._degrade = self._degrade_debug - - def _open_lockfile(self): - # Use 'file.lock' and not 'file.log.lock' (Only handles the normal "*.log" case.) - if self.baseFilename.endswith(".log"): - lock_file = self.baseFilename[:-4] - else: - lock_file = self.baseFilename - lock_file += ".lock" - self.stream_lock = open(lock_file,"w") - - def _open(self, mode=None): - """ - Open the current base file with the (original) mode and encoding. - Return the resulting stream. - - Note: Copied from stdlib. Added option to override 'mode' - """ - if mode is None: - mode = self.mode - if self.encoding is None: - stream = open(self.baseFilename, mode) - else: - stream = codecs.open(self.baseFilename, mode, self.encoding) - return stream - - def _close(self): - """ Close file stream. Unlike close(), we don't tear anything down, we - expect the log to be re-opened after rotation.""" - if self.stream: - try: - if not self.stream.closed: - # Flushing probably isn't technically necessary, but it feels right - self.stream.flush() - self.stream.close() - finally: - self.stream = None - - def acquire(self): - """ Acquire thread and file locks. Re-opening log for 'degraded' mode. - """ - # handle thread lock - Handler.acquire(self) - # Issue a file lock. (This is inefficient for multiple active threads - # within a single process. But if you're worried about high-performance, - # you probably aren't using this log handler.) - if self.stream_lock: - # If stream_lock=None, then assume close() was called or something - # else weird and ignore all file-level locks. - if self.stream_lock.closed: - # Daemonization can close all open file descriptors, see - # https://bugzilla.redhat.com/show_bug.cgi?id=952929 - # Try opening the lock file again. Should we warn() here?!? - try: - self._open_lockfile() - except Exception: - self.handleError(NullLogRecord()) - # Don't try to open the stream lock again - self.stream_lock = None - return - lock(self.stream_lock, LOCK_EX) - # Stream will be opened as part by FileHandler.emit() - - def release(self): - """ Release file and thread locks. If in 'degraded' mode, close the - stream to reduce contention until the log files can be rotated. """ - try: - if self._rotateFailed: - self._close() - except Exception: - self.handleError(NullLogRecord()) - finally: - try: - if self.stream_lock and not self.stream_lock.closed: - unlock(self.stream_lock) - except Exception: - self.handleError(NullLogRecord()) - finally: - # release thread lock - Handler.release(self) - - def close(self): - """ - Close log stream and stream_lock. """ - try: - self._close() - if not self.stream_lock.closed: - self.stream_lock.close() - finally: - self.stream_lock = None - Handler.close(self) - - def _degrade(self, degrade, msg, *args): - """ Set degrade mode or not. Ignore msg. """ - self._rotateFailed = degrade - del msg, args # avoid pychecker warnings - - def _degrade_debug(self, degrade, msg, *args): - """ A more colorful version of _degade(). (This is enabled by passing - "debug=True" at initialization). - """ - if degrade: - if not self._rotateFailed: - sys.stderr.write("Degrade mode - ENTERING - (pid=%d) %s\n" % - (os.getpid(), msg % args)) - self._rotateFailed = True - else: - if self._rotateFailed: - sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" % - (os.getpid(), msg % args)) - self._rotateFailed = False - - def doRollover(self): - """ - Do a rollover, as described in __init__(). - """ - self._close() - if self.backupCount <= 0: - # Don't keep any backups, just overwrite the existing backup file - # Locking doesn't much matter here; since we are overwriting it anyway - self.stream = self._open("w") - return - try: - # Determine if we can rename the log file or not. Windows refuses to - # rename an open file, Unix is inode base so it doesn't care. - - # Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable - tmpname = None - while not tmpname or os.path.exists(tmpname): - tmpname = "%s.rotate.%08d" % (self.baseFilename, randint(0,99999999)) - try: - # Do a rename test to determine if we can successfully rename the log file - os.rename(self.baseFilename, tmpname) - except (IOError, OSError): - exc_value = sys.exc_info()[1] - self._degrade(True, "rename failed. File in use? " - "exception=%s", exc_value) - return - - # Q: Is there some way to protect this code from a KeboardInterupt? - # This isn't necessarily a data loss issue, but it certainly does - # break the rotation process during stress testing. - - # There is currently no mechanism in place to handle the situation - # where one of these log files cannot be renamed. (Example, user - # opens "logfile.3" in notepad); we could test rename each file, but - # nobody's complained about this being an issue; so the additional - # code complexity isn't warranted. - for i in range(self.backupCount - 1, 0, -1): - sfn = "%s.%d" % (self.baseFilename, i) - dfn = "%s.%d" % (self.baseFilename, i + 1) - if os.path.exists(sfn): - #print "%s -> %s" % (sfn, dfn) - if os.path.exists(dfn): - os.remove(dfn) - os.rename(sfn, dfn) - dfn = self.baseFilename + ".1" - if os.path.exists(dfn): - os.remove(dfn) - os.rename(tmpname, dfn) - #print "%s -> %s" % (self.baseFilename, dfn) - self._degrade(False, "Rotation completed") - finally: - # Re-open the output stream, but if "delay" is enabled then wait - # until the next emit() call. This could reduce rename contention in - # some usage patterns. - if not self.delay: - self.stream = self._open() - - def shouldRollover(self, record): - """ - Determine if rollover should occur. - - For those that are keeping track. This differs from the standard - library's RotatingLogHandler class. Because there is no promise to keep - the file size under maxBytes we ignore the length of the current record. - """ - del record # avoid pychecker warnings - # Is stream is not yet open, skip rollover check. (Check will occur on - # next message, after emit() calls _open()) - if self.stream is None: - return False - if self._shouldRollover(): - # If some other process already did the rollover (which is possible - # on Unix) the file our stream may now be named "log.1", thus - # triggering another rollover. Avoid this by closing and opening - # "log" again. - self._close() - self.stream = self._open() - return self._shouldRollover() - return False - - def _shouldRollover(self): - if self.maxBytes > 0: # are we rolling over? - self.stream.seek(0, 2) #due to non-posix-compliant Windows feature - if self.stream.tell() >= self.maxBytes: - return True - else: - self._degrade(False, "Rotation done or not needed at this time") - return False - - -# Publish this class to the "logging.handlers" module so that it can be use -# from a logging config file via logging.config.fileConfig(). -import logging.handlers -logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler diff --git a/lib/ConcurrentLogHandler/portalocker.py b/lib/ConcurrentLogHandler/portalocker.py deleted file mode 100755 index 5c0694ec..00000000 --- a/lib/ConcurrentLogHandler/portalocker.py +++ /dev/null @@ -1,141 +0,0 @@ -# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking. -# Requires python 1.5.2 or better. -"""Cross-platform (posix/nt) API for flock-style file locking. - -Synopsis: - - import portalocker - file = open("somefile", "r+") - portalocker.lock(file, portalocker.LOCK_EX) - file.seek(12) - file.write("foo") - file.close() - -If you know what you're doing, you may choose to - - portalocker.unlock(file) - -before closing the file, but why? - -Methods: - - lock( file, flags ) - unlock( file ) - -Constants: - - LOCK_EX - LOCK_SH - LOCK_NB - -Exceptions: - - LockException - -Notes: - -For the 'nt' platform, this module requires the Python Extensions for Windows. -Be aware that this may not work as expected on Windows 95/98/ME. - -History: - -I learned the win32 technique for locking files from sample code -provided by John Nielsen in the documentation -that accompanies the win32 modules. - -Author: Jonathan Feinberg , - Lowell Alleman , - Rick van Hattem -Version: 0.3 -URL: https://github.com/WoLpH/portalocker -""" - - -__all__ = [ - "lock", - "unlock", - "LOCK_EX", - "LOCK_SH", - "LOCK_NB", - "LockException", -] - -import os - -class LockException(Exception): - # Error codes: - LOCK_FAILED = 1 - -if os.name == 'nt': - import win32con - import win32file - import pywintypes - LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK - LOCK_SH = 0 # the default - LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY - # is there any reason not to reuse the following structure? - __overlapped = pywintypes.OVERLAPPED() -elif os.name == 'posix': - import fcntl - LOCK_EX = fcntl.LOCK_EX - LOCK_SH = fcntl.LOCK_SH - LOCK_NB = fcntl.LOCK_NB -else: - raise RuntimeError("PortaLocker only defined for nt and posix platforms") - -if os.name == 'nt': - def lock(file, flags): - hfile = win32file._get_osfhandle(file.fileno()) - try: - win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped) - except pywintypes.error, exc_value: - # error: (33, 'LockFileEx', 'The process cannot access the file because another process has locked a portion of the file.') - if exc_value[0] == 33: - raise LockException(LockException.LOCK_FAILED, exc_value[2]) - else: - # Q: Are there exceptions/codes we should be dealing with here? - raise - - def unlock(file): - hfile = win32file._get_osfhandle(file.fileno()) - try: - win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped) - except pywintypes.error, exc_value: - if exc_value[0] == 158: - # error: (158, 'UnlockFileEx', 'The segment is already unlocked.') - # To match the 'posix' implementation, silently ignore this error - pass - else: - # Q: Are there exceptions/codes we should be dealing with here? - raise - -elif os.name == 'posix': - def lock(file, flags): - try: - fcntl.flock(file.fileno(), flags) - except IOError, exc_value: - # The exception code varies on different systems so we'll catch - # every IO error - raise LockException(*exc_value) - - def unlock(file): - fcntl.flock(file.fileno(), fcntl.LOCK_UN) - - - -if __name__ == '__main__': - from time import time, strftime, localtime - import sys - import portalocker - - log = open('log.txt', "a+") - portalocker.lock(log, portalocker.LOCK_EX) - - timestamp = strftime("%m/%d/%Y %H:%M:%S\n", localtime(time())) - log.write( timestamp ) - - print "Wrote lines. Hit enter to release lock." - dummy = sys.stdin.readline() - - log.close() - diff --git a/lib/ConcurrentLogHandler/stresstest.py b/lib/ConcurrentLogHandler/stresstest.py deleted file mode 100755 index dde5531e..00000000 --- a/lib/ConcurrentLogHandler/stresstest.py +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/env python -""" stresstest.py: A stress-tester for ConcurrentRotatingFileHandler - -This utility spawns a bunch of processes that all try to concurrently write to -the same file. This is pretty much the worst-case scenario for my log handler. -Once all of the processes have completed writing to the log file, the output is -compared to see if any log messages have been lost. - -In the future, I may also add in support for testing with each process having -multiple threads. - - -""" - -__version__ = '$Id$' -__author__ = 'Lowell Alleman' - - -import os -import sys -from subprocess import call, Popen, STDOUT -from time import sleep - -ROTATE_COUNT = 5000 - -# local lib; for testing -from cloghandler import ConcurrentRotatingFileHandler - -class RotateLogStressTester: - def __init__(self, sharedfile, uniquefile, name="LogStressTester", logger_delay=0): - self.sharedfile = sharedfile - self.uniquefile = uniquefile - self.name = name - self.writeLoops = 100000 - self.rotateSize = 128 * 1024 - self.rotateCount = ROTATE_COUNT - self.random_sleep_mode = False - self.debug = False - self.logger_delay = logger_delay - - def getLogHandler(self, fn): - """ Override this method if you want to test a different logging handler - class. """ - return ConcurrentRotatingFileHandler(fn, 'a', self.rotateSize, - self.rotateCount, delay=self.logger_delay, - debug=self.debug) - # To run the test with the standard library's RotatingFileHandler: - # from logging.handlers import RotatingFileHandler - # return RotatingFileHandler(fn, 'a', self.rotateSize, self.rotateCount) - - def start(self): - from logging import getLogger, FileHandler, Formatter, DEBUG - self.log = getLogger(self.name) - self.log.setLevel(DEBUG) - - formatter = Formatter('%(asctime)s [%(process)d:%(threadName)s] %(levelname)-8s %(name)s: %(message)s') - # Unique log handler (single file) - handler = FileHandler(self.uniquefile, "w") - handler.setLevel(DEBUG) - handler.setFormatter(formatter) - self.log.addHandler(handler) - - # If you suspect that the diff stuff isn't working, un comment the next - # line. You should see this show up once per-process. - # self.log.info("Here is a line that should only be in the first output.") - - # Setup output used for testing - handler = self.getLogHandler(self.sharedfile) - handler.setLevel(DEBUG) - handler.setFormatter(formatter) - self.log.addHandler(handler) - - # If this ever becomes a real "Thread", then remove this line: - self.run() - - def run(self): - c = 0 - from random import choice, randint - # Use a bunch of random quotes, numbers, and severity levels to mix it up a bit! - msgs = ["I found %d puppies", "There are %d cats in your hatz", - "my favorite number is %d", "I am %d years old.", "1 + 1 = %d", - "%d/0 = DivideByZero", "blah! %d thingies!", "8 15 16 23 48 %d", - "the worlds largest prime number: %d", "%d happy meals!"] - logfuncts = [self.log.debug, self.log.info, self.log.warn, self.log.error] - - self.log.info("Starting to write random log message. Loop=%d", self.writeLoops) - while c <= self.writeLoops: - c += 1 - msg = choice(msgs) - logfunc = choice(logfuncts) - logfunc(msg, randint(0,99999999)) - - if self.random_sleep_mode and c % 1000 == 0: - # Sleep from 0-15 seconds - s = randint(1,15) - print("PID %d sleeping for %d seconds" % (os.getpid(), s)) - sleep(s) - # break - self.log.info("Done witting random log messages.") - -def iter_lognames(logfile, count): - """ Generator for log file names based on a rotation scheme """ - for i in range(count -1, 0, -1): - yield "%s.%d" % (logfile, i) - yield logfile - -def iter_logs(iterable, missing_ok=False): - """ Generator to extract log entries from shared log file. """ - for fn in iterable: - if os.path.exists(fn): - for line in open(fn): - yield line - elif not missing_ok: - raise ValueError("Missing log file %s" % fn) - -def combine_logs(combinedlog, iterable, mode="w"): - """ write all lines (iterable) into a single log file. """ - fp = open(combinedlog, mode) - for chunk in iterable: - fp.write(chunk) - fp.close() - - - -from optparse import OptionParser -parser = OptionParser(usage="usage: %prog", - version=__version__, - description="Stress test the cloghandler module.") -parser.add_option("--log-calls", metavar="NUM", - action="store", type="int", default=50000, - help="Number of logging entries to write to each log file. " - "Default is %default") -parser.add_option("--random-sleep-mode", - action="store_true", default=False) -parser.add_option("--debug", - action="store_true", default=False) -parser.add_option("--logger-delay", - action="store_true", default=False, - help="Enable the 'delay' mode in the logger class. " - "This means that the log file will be opened on demand.") - - -def main_client(args): - (options, args) = parser.parse_args(args) - if len(args) != 2: - raise ValueError("Require 2 arguments. We have %d args" % len(args)) - (shared, client) = args - - if os.path.isfile(client): - sys.stderr.write("Already a client using output file %s\n" % client) - sys.exit(1) - tester = RotateLogStressTester(shared, client, logger_delay=options.logger_delay) - tester.random_sleep_mode = options.random_sleep_mode - tester.debug = options.debug - tester.writeLoops = options.log_calls - tester.start() - print("We are done pid=%d" % os.getpid()) - - - -class TestManager: - class ChildProc(object): - """ Very simple child container class.""" - __slots__ = [ "popen", "sharedfile", "clientfile" ] - def __init__(self, **kwargs): - self.update(**kwargs) - def update(self, **kwargs): - for key, val in kwargs.items(): - setattr(self, key, val) - - def __init__(self): - self.tests = [] - - def launchPopen(self, *args, **kwargs): - proc = Popen(*args, **kwargs) - cp = self.ChildProc(popen=proc) - self.tests.append(cp) - return cp - - def wait(self, check_interval=3): - """ Wait for all child test processes to complete. """ - print("Waiting while children are out running and playing!") - while True: - sleep(check_interval) - waiting = [] - for cp in self.tests: - if cp.popen.poll() is None: - waiting.append(cp.popen.pid) - if not waiting: - break - print("Waiting on %r " % waiting) - print("All children have stopped.") - - def checkExitCodes(self): - for cp in self.tests: - if cp.popen.poll() != 0: - return False - return True - - - -def unified_diff(a,b, out=sys.stdout): - import difflib - ai = open(a).readlines() - bi = open(b).readlines() - for line in difflib.unified_diff(ai, bi, a, b): - out.write(line) - - - -def main_runner(args): - parser.add_option("--processes", metavar="NUM", - action="store", type="int", default=3, - help="Number of processes to spawn. Default: %default") - parser.add_option("--delay", metavar="secs", - action="store", type="float", default=2.5, - help="Wait SECS before spawning next processes. " - "Default: %default") - parser.add_option("-p", "--path", metavar="DIR", - action="store", default="test", - help="Path to a temporary directory. Default: '%default'") - - - this_script = args[0] - (options, args) = parser.parse_args(args) - options.path = os.path.abspath(options.path) - if not os.path.isdir(options.path): - os.makedirs(options.path) - - manager = TestManager() - shared = os.path.join(options.path, "shared.log") - for client_id in range(options.processes): - client = os.path.join(options.path, "client.log_client%s.log" % client_id) - cmdline = [ sys.executable, this_script, "client", shared, client, - "--log-calls=%d" % options.log_calls ] - if options.random_sleep_mode: - cmdline.append("--random-sleep-mode") - if options.debug: - cmdline.append("--debug") - if options.logger_delay: - cmdline.append("--logger-delay") - - child = manager.launchPopen(cmdline) - child.update(sharedfile=shared, clientfile=client) - sleep(options.delay) - - # Wait for all of the subprocesses to exit - manager.wait() - # Check children exit codes - if not manager.checkExitCodes(): - sys.stderr.write("One or more of the child process has failed.\n" - "Aborting test.\n") - sys.exit(2) - - client_combo = os.path.join(options.path, "client.log.combo") - shared_combo = os.path.join(options.path, "shared.log.combo") - - # Combine all of the log files... - client_files = [ child.clientfile for child in manager.tests ] - - if False: - def sort_em(iterable): - return iterable - else: - sort_em = sorted - - print("Writing out combined client logs...") - combine_logs(client_combo, sort_em(iter_logs(client_files))) - print("done.") - - print("Writing out combined shared logs...") - shared_log_files = iter_lognames(shared, ROTATE_COUNT) - log_lines = iter_logs(shared_log_files, missing_ok=True) - combine_logs(shared_combo, sort_em(log_lines)) - print("done.") - - print("Running internal diff: (If the next line is 'end of diff', then the stress test passed!)") - unified_diff(client_combo, shared_combo) - print(" --- end of diff ----") - - - -if __name__ == '__main__': - if len(sys.argv) > 1 and sys.argv[1].lower() == "client": - main_client(sys.argv[2:]) - else: - main_runner(sys.argv) diff --git a/lib/MultipartPostHandler.py b/lib/MultipartPostHandler.py deleted file mode 100755 index 82fa59c6..00000000 --- a/lib/MultipartPostHandler.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/python - -#### -# 06/2010 Nic Wolfe -# 02/2006 Will Holcomb -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# - -import urllib -import urllib2 -import mimetools, mimetypes -import os, sys - -# Controls how sequences are uncoded. If true, elements may be given multiple values by -# assigning a sequence. -doseq = 1 - -class MultipartPostHandler(urllib2.BaseHandler): - handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first - - def http_request(self, request): - data = request.get_data() - if data is not None and type(data) != str: - v_files = [] - v_vars = [] - try: - for(key, value) in data.items(): - if type(value) in (file, list, tuple): - v_files.append((key, value)) - else: - v_vars.append((key, value)) - except TypeError: - systype, value, traceback = sys.exc_info() - raise TypeError, "not a valid non-string sequence or mapping object", traceback - - if len(v_files) == 0: - data = urllib.urlencode(v_vars, doseq) - else: - boundary, data = MultipartPostHandler.multipart_encode(v_vars, v_files) - contenttype = 'multipart/form-data; boundary=%s' % boundary - if(request.has_header('Content-Type') - and request.get_header('Content-Type').find('multipart/form-data') != 0): - print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data') - request.add_unredirected_header('Content-Type', contenttype) - - request.add_data(data) - return request - - @staticmethod - def multipart_encode(vars, files, boundary = None, buffer = None): - if boundary is None: - boundary = mimetools.choose_boundary() - if buffer is None: - buffer = '' - for(key, value) in vars: - buffer += '--%s\r\n' % boundary - buffer += 'Content-Disposition: form-data; name="%s"' % key - buffer += '\r\n\r\n' + value + '\r\n' - for(key, fd) in files: - - # allow them to pass in a file or a tuple with name & data - if type(fd) == file: - name_in = fd.name - fd.seek(0) - data_in = fd.read() - elif type(fd) in (tuple, list): - name_in, data_in = fd - - filename = os.path.basename(name_in) - contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' - buffer += '--%s\r\n' % boundary - buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename) - buffer += 'Content-Type: %s\r\n' % contenttype - # buffer += 'Content-Length: %s\r\n' % file_size - buffer += '\r\n' + data_in + '\r\n' - buffer += '--%s--\r\n\r\n' % boundary - return boundary, buffer - - https_request = http_request \ No newline at end of file diff --git a/lib/apscheduler/__init__.py b/lib/apscheduler/__init__.py deleted file mode 100644 index 89965478..00000000 --- a/lib/apscheduler/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# These will be removed in APScheduler 4.0. -#release = __import__('pkg_resources').get_distribution('APScheduler').version.split('-')[0] -#version_info = tuple(int(x) if x.isdigit() else x for x in release.split('.')) -#version = __version__ = '.'.join(str(x) for x in version_info[:3]) - -version_info = (3, 3, 1) -release = '3.3.1' -version = __version__ = '3.3.1' diff --git a/lib/apscheduler/events.py b/lib/apscheduler/events.py deleted file mode 100644 index 890763eb..00000000 --- a/lib/apscheduler/events.py +++ /dev/null @@ -1,94 +0,0 @@ -__all__ = ('EVENT_SCHEDULER_STARTED', 'EVENT_SCHEDULER_SHUTDOWN', 'EVENT_SCHEDULER_PAUSED', - 'EVENT_SCHEDULER_RESUMED', 'EVENT_EXECUTOR_ADDED', 'EVENT_EXECUTOR_REMOVED', - 'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED', 'EVENT_ALL_JOBS_REMOVED', - 'EVENT_JOB_ADDED', 'EVENT_JOB_REMOVED', 'EVENT_JOB_MODIFIED', 'EVENT_JOB_EXECUTED', - 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED', 'EVENT_JOB_SUBMITTED', 'EVENT_JOB_MAX_INSTANCES', - 'SchedulerEvent', 'JobEvent', 'JobExecutionEvent') - - -EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0 -EVENT_SCHEDULER_SHUTDOWN = 2 ** 1 -EVENT_SCHEDULER_PAUSED = 2 ** 2 -EVENT_SCHEDULER_RESUMED = 2 ** 3 -EVENT_EXECUTOR_ADDED = 2 ** 4 -EVENT_EXECUTOR_REMOVED = 2 ** 5 -EVENT_JOBSTORE_ADDED = 2 ** 6 -EVENT_JOBSTORE_REMOVED = 2 ** 7 -EVENT_ALL_JOBS_REMOVED = 2 ** 8 -EVENT_JOB_ADDED = 2 ** 9 -EVENT_JOB_REMOVED = 2 ** 10 -EVENT_JOB_MODIFIED = 2 ** 11 -EVENT_JOB_EXECUTED = 2 ** 12 -EVENT_JOB_ERROR = 2 ** 13 -EVENT_JOB_MISSED = 2 ** 14 -EVENT_JOB_SUBMITTED = 2 ** 15 -EVENT_JOB_MAX_INSTANCES = 2 ** 16 -EVENT_ALL = (EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_PAUSED | - EVENT_SCHEDULER_RESUMED | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED | - EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED | EVENT_ALL_JOBS_REMOVED | - EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED | - EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES) - - -class SchedulerEvent(object): - """ - An event that concerns the scheduler itself. - - :ivar code: the type code of this event - :ivar alias: alias of the job store or executor that was added or removed (if applicable) - """ - - def __init__(self, code, alias=None): - super(SchedulerEvent, self).__init__() - self.code = code - self.alias = alias - - def __repr__(self): - return '<%s (code=%d)>' % (self.__class__.__name__, self.code) - - -class JobEvent(SchedulerEvent): - """ - An event that concerns a job. - - :ivar code: the type code of this event - :ivar job_id: identifier of the job in question - :ivar jobstore: alias of the job store containing the job in question - """ - - def __init__(self, code, job_id, jobstore): - super(JobEvent, self).__init__(code) - self.code = code - self.job_id = job_id - self.jobstore = jobstore - - -class JobSubmissionEvent(JobEvent): - """ - An event that concerns the submission of a job to its executor. - - :ivar scheduled_run_times: a list of datetimes when the job was intended to run - """ - - def __init__(self, code, job_id, jobstore, scheduled_run_times): - super(JobSubmissionEvent, self).__init__(code, job_id, jobstore) - self.scheduled_run_times = scheduled_run_times - - -class JobExecutionEvent(JobEvent): - """ - An event that concerns the running of a job within its executor. - - :ivar scheduled_run_time: the time when the job was scheduled to be run - :ivar retval: the return value of the successfully executed job - :ivar exception: the exception raised by the job - :ivar traceback: a formatted traceback for the exception - """ - - def __init__(self, code, job_id, jobstore, scheduled_run_time, retval=None, exception=None, - traceback=None): - super(JobExecutionEvent, self).__init__(code, job_id, jobstore) - self.scheduled_run_time = scheduled_run_time - self.retval = retval - self.exception = exception - self.traceback = traceback diff --git a/lib/apscheduler/executors/__init__.py b/lib/apscheduler/executors/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/apscheduler/executors/asyncio.py b/lib/apscheduler/executors/asyncio.py deleted file mode 100644 index 44794295..00000000 --- a/lib/apscheduler/executors/asyncio.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import absolute_import - -import sys - -from apscheduler.executors.base import BaseExecutor, run_job - -try: - from asyncio import iscoroutinefunction - from apscheduler.executors.base_py3 import run_coroutine_job -except ImportError: - from trollius import iscoroutinefunction - run_coroutine_job = None - - -class AsyncIOExecutor(BaseExecutor): - """ - Runs jobs in the default executor of the event loop. - - If the job function is a native coroutine function, it is scheduled to be run directly in the - event loop as soon as possible. All other functions are run in the event loop's default - executor which is usually a thread pool. - - Plugin alias: ``asyncio`` - """ - - def start(self, scheduler, alias): - super(AsyncIOExecutor, self).start(scheduler, alias) - self._eventloop = scheduler._eventloop - - def _do_submit_job(self, job, run_times): - def callback(f): - try: - events = f.result() - except: - self._run_job_error(job.id, *sys.exc_info()[1:]) - else: - self._run_job_success(job.id, events) - - if iscoroutinefunction(job.func): - if run_coroutine_job is not None: - coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) - f = self._eventloop.create_task(coro) - else: - raise Exception('Executing coroutine based jobs is not supported with Trollius') - else: - f = self._eventloop.run_in_executor(None, run_job, job, job._jobstore_alias, run_times, - self._logger.name) - - f.add_done_callback(callback) diff --git a/lib/apscheduler/executors/base.py b/lib/apscheduler/executors/base.py deleted file mode 100644 index b36a86fc..00000000 --- a/lib/apscheduler/executors/base.py +++ /dev/null @@ -1,137 +0,0 @@ -from abc import ABCMeta, abstractmethod -from collections import defaultdict -from datetime import datetime, timedelta -from traceback import format_tb -import logging -import sys - -from pytz import utc -import six - -from apscheduler.events import ( - JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED) - - -class MaxInstancesReachedError(Exception): - def __init__(self, job): - super(MaxInstancesReachedError, self).__init__( - 'Job "%s" has already reached its maximum number of instances (%d)' % - (job.id, job.max_instances)) - - -class BaseExecutor(six.with_metaclass(ABCMeta, object)): - """Abstract base class that defines the interface that every executor must implement.""" - - _scheduler = None - _lock = None - _logger = logging.getLogger('apscheduler.executors') - - def __init__(self): - super(BaseExecutor, self).__init__() - self._instances = defaultdict(lambda: 0) - - def start(self, scheduler, alias): - """ - Called by the scheduler when the scheduler is being started or when the executor is being - added to an already running scheduler. - - :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting - this executor - :param str|unicode alias: alias of this executor as it was assigned to the scheduler - - """ - self._scheduler = scheduler - self._lock = scheduler._create_lock() - self._logger = logging.getLogger('apscheduler.executors.%s' % alias) - - def shutdown(self, wait=True): - """ - Shuts down this executor. - - :param bool wait: ``True`` to wait until all submitted jobs - have been executed - """ - - def submit_job(self, job, run_times): - """ - Submits job for execution. - - :param Job job: job to execute - :param list[datetime] run_times: list of datetimes specifying - when the job should have been run - :raises MaxInstancesReachedError: if the maximum number of - allowed instances for this job has been reached - - """ - assert self._lock is not None, 'This executor has not been started yet' - with self._lock: - if self._instances[job.id] >= job.max_instances: - raise MaxInstancesReachedError(job) - - self._do_submit_job(job, run_times) - self._instances[job.id] += 1 - - @abstractmethod - def _do_submit_job(self, job, run_times): - """Performs the actual task of scheduling `run_job` to be called.""" - - def _run_job_success(self, job_id, events): - """ - Called by the executor with the list of generated events when :func:`run_job` has been - successfully called. - - """ - with self._lock: - self._instances[job_id] -= 1 - if self._instances[job_id] == 0: - del self._instances[job_id] - - for event in events: - self._scheduler._dispatch_event(event) - - def _run_job_error(self, job_id, exc, traceback=None): - """Called by the executor with the exception if there is an error calling `run_job`.""" - with self._lock: - self._instances[job_id] -= 1 - if self._instances[job_id] == 0: - del self._instances[job_id] - - exc_info = (exc.__class__, exc, traceback) - self._logger.error('Error running job %s', job_id, exc_info=exc_info) - - -def run_job(job, jobstore_alias, run_times, logger_name): - """ - Called by executors to run the job. Returns a list of scheduler events to be dispatched by the - scheduler. - - """ - events = [] - logger = logging.getLogger(logger_name) - for run_time in run_times: - # See if the job missed its run time window, and handle - # possible misfires accordingly - if job.misfire_grace_time is not None: - difference = datetime.now(utc) - run_time - grace_time = timedelta(seconds=job.misfire_grace_time) - if difference > grace_time: - events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, - run_time)) - logger.warning('Run time of job "%s" was missed by %s', job, difference) - continue - - logger.info('Running job "%s" (scheduled at %s)', job, run_time) - try: - retval = job.func(*job.args, **job.kwargs) - except: - exc, tb = sys.exc_info()[1:] - formatted_tb = ''.join(format_tb(tb)) - events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, - exception=exc, traceback=formatted_tb)) - logger.exception('Job "%s" raised an exception', job) - else: - events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, - retval=retval)) - logger.info('Job "%s" executed successfully', job) - - return events diff --git a/lib/apscheduler/executors/base_py3.py b/lib/apscheduler/executors/base_py3.py deleted file mode 100644 index 47124258..00000000 --- a/lib/apscheduler/executors/base_py3.py +++ /dev/null @@ -1,41 +0,0 @@ -import logging -import sys -from datetime import datetime, timedelta -from traceback import format_tb - -from pytz import utc - -from apscheduler.events import ( - JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED) - - -async def run_coroutine_job(job, jobstore_alias, run_times, logger_name): - """Coroutine version of run_job().""" - events = [] - logger = logging.getLogger(logger_name) - for run_time in run_times: - # See if the job missed its run time window, and handle possible misfires accordingly - if job.misfire_grace_time is not None: - difference = datetime.now(utc) - run_time - grace_time = timedelta(seconds=job.misfire_grace_time) - if difference > grace_time: - events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, - run_time)) - logger.warning('Run time of job "%s" was missed by %s', job, difference) - continue - - logger.info('Running job "%s" (scheduled at %s)', job, run_time) - try: - retval = await job.func(*job.args, **job.kwargs) - except: - exc, tb = sys.exc_info()[1:] - formatted_tb = ''.join(format_tb(tb)) - events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, - exception=exc, traceback=formatted_tb)) - logger.exception('Job "%s" raised an exception', job) - else: - events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, - retval=retval)) - logger.info('Job "%s" executed successfully', job) - - return events diff --git a/lib/apscheduler/executors/debug.py b/lib/apscheduler/executors/debug.py deleted file mode 100644 index f6454d52..00000000 --- a/lib/apscheduler/executors/debug.py +++ /dev/null @@ -1,20 +0,0 @@ -import sys - -from apscheduler.executors.base import BaseExecutor, run_job - - -class DebugExecutor(BaseExecutor): - """ - A special executor that executes the target callable directly instead of deferring it to a - thread or process. - - Plugin alias: ``debug`` - """ - - def _do_submit_job(self, job, run_times): - try: - events = run_job(job, job._jobstore_alias, run_times, self._logger.name) - except: - self._run_job_error(job.id, *sys.exc_info()[1:]) - else: - self._run_job_success(job.id, events) diff --git a/lib/apscheduler/executors/gevent.py b/lib/apscheduler/executors/gevent.py deleted file mode 100644 index a12b806a..00000000 --- a/lib/apscheduler/executors/gevent.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import absolute_import -import sys - -from apscheduler.executors.base import BaseExecutor, run_job - - -try: - import gevent -except ImportError: # pragma: nocover - raise ImportError('GeventExecutor requires gevent installed') - - -class GeventExecutor(BaseExecutor): - """ - Runs jobs as greenlets. - - Plugin alias: ``gevent`` - """ - - def _do_submit_job(self, job, run_times): - def callback(greenlet): - try: - events = greenlet.get() - except: - self._run_job_error(job.id, *sys.exc_info()[1:]) - else: - self._run_job_success(job.id, events) - - gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).\ - link(callback) diff --git a/lib/apscheduler/executors/pool.py b/lib/apscheduler/executors/pool.py deleted file mode 100644 index 2f4ef455..00000000 --- a/lib/apscheduler/executors/pool.py +++ /dev/null @@ -1,54 +0,0 @@ -from abc import abstractmethod -import concurrent.futures - -from apscheduler.executors.base import BaseExecutor, run_job - - -class BasePoolExecutor(BaseExecutor): - @abstractmethod - def __init__(self, pool): - super(BasePoolExecutor, self).__init__() - self._pool = pool - - def _do_submit_job(self, job, run_times): - def callback(f): - exc, tb = (f.exception_info() if hasattr(f, 'exception_info') else - (f.exception(), getattr(f.exception(), '__traceback__', None))) - if exc: - self._run_job_error(job.id, exc, tb) - else: - self._run_job_success(job.id, f.result()) - - f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name) - f.add_done_callback(callback) - - def shutdown(self, wait=True): - self._pool.shutdown(wait) - - -class ThreadPoolExecutor(BasePoolExecutor): - """ - An executor that runs jobs in a concurrent.futures thread pool. - - Plugin alias: ``threadpool`` - - :param max_workers: the maximum number of spawned threads. - """ - - def __init__(self, max_workers=10): - pool = concurrent.futures.ThreadPoolExecutor(int(max_workers)) - super(ThreadPoolExecutor, self).__init__(pool) - - -class ProcessPoolExecutor(BasePoolExecutor): - """ - An executor that runs jobs in a concurrent.futures process pool. - - Plugin alias: ``processpool`` - - :param max_workers: the maximum number of spawned processes. - """ - - def __init__(self, max_workers=10): - pool = concurrent.futures.ProcessPoolExecutor(int(max_workers)) - super(ProcessPoolExecutor, self).__init__(pool) diff --git a/lib/apscheduler/executors/tornado.py b/lib/apscheduler/executors/tornado.py deleted file mode 100644 index 6519c3e8..00000000 --- a/lib/apscheduler/executors/tornado.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import absolute_import - -import sys -from concurrent.futures import ThreadPoolExecutor - -from tornado.gen import convert_yielded - -from apscheduler.executors.base import BaseExecutor, run_job - -try: - from inspect import iscoroutinefunction - from apscheduler.executors.base_py3 import run_coroutine_job -except ImportError: - def iscoroutinefunction(func): - return False - - -class TornadoExecutor(BaseExecutor): - """ - Runs jobs either in a thread pool or directly on the I/O loop. - - If the job function is a native coroutine function, it is scheduled to be run directly in the - I/O loop as soon as possible. All other functions are run in a thread pool. - - Plugin alias: ``tornado`` - - :param int max_workers: maximum number of worker threads in the thread pool - """ - - def __init__(self, max_workers=10): - super(TornadoExecutor, self).__init__() - self.executor = ThreadPoolExecutor(max_workers) - - def start(self, scheduler, alias): - super(TornadoExecutor, self).start(scheduler, alias) - self._ioloop = scheduler._ioloop - - def _do_submit_job(self, job, run_times): - def callback(f): - try: - events = f.result() - except: - self._run_job_error(job.id, *sys.exc_info()[1:]) - else: - self._run_job_success(job.id, events) - - if iscoroutinefunction(job.func): - f = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) - else: - f = self.executor.submit(run_job, job, job._jobstore_alias, run_times, - self._logger.name) - - f = convert_yielded(f) - f.add_done_callback(callback) diff --git a/lib/apscheduler/executors/twisted.py b/lib/apscheduler/executors/twisted.py deleted file mode 100644 index c7bcf647..00000000 --- a/lib/apscheduler/executors/twisted.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.executors.base import BaseExecutor, run_job - - -class TwistedExecutor(BaseExecutor): - """ - Runs jobs in the reactor's thread pool. - - Plugin alias: ``twisted`` - """ - - def start(self, scheduler, alias): - super(TwistedExecutor, self).start(scheduler, alias) - self._reactor = scheduler._reactor - - def _do_submit_job(self, job, run_times): - def callback(success, result): - if success: - self._run_job_success(job.id, result) - else: - self._run_job_error(job.id, result.value, result.tb) - - self._reactor.getThreadPool().callInThreadWithCallback( - callback, run_job, job, job._jobstore_alias, run_times, self._logger.name) diff --git a/lib/apscheduler/job.py b/lib/apscheduler/job.py deleted file mode 100644 index b9c305db..00000000 --- a/lib/apscheduler/job.py +++ /dev/null @@ -1,289 +0,0 @@ -from collections import Iterable, Mapping -from uuid import uuid4 - -import six - -from apscheduler.triggers.base import BaseTrigger -from apscheduler.util import ( - ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args, - convert_to_datetime) - - -class Job(object): - """ - Contains the options given when scheduling callables and its current schedule and other state. - This class should never be instantiated by the user. - - :var str id: the unique identifier of this job - :var str name: the description of this job - :var func: the callable to execute - :var tuple|list args: positional arguments to the callable - :var dict kwargs: keyword arguments to the callable - :var bool coalesce: whether to only run the job once when several run times are due - :var trigger: the trigger object that controls the schedule of this job - :var str executor: the name of the executor that will run this job - :var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to - be late - :var int max_instances: the maximum number of concurrently executing instances allowed for this - job - :var datetime.datetime next_run_time: the next scheduled run time of this job - - .. note:: - The ``misfire_grace_time`` has some non-obvious effects on job execution. See the - :ref:`missed-job-executions` section in the documentation for an in-depth explanation. - """ - - __slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref', - 'args', 'kwargs', 'name', 'misfire_grace_time', 'coalesce', 'max_instances', - 'next_run_time') - - def __init__(self, scheduler, id=None, **kwargs): - super(Job, self).__init__() - self._scheduler = scheduler - self._jobstore_alias = None - self._modify(id=id or uuid4().hex, **kwargs) - - def modify(self, **changes): - """ - Makes the given changes to this job and saves it in the associated job store. - - Accepted keyword arguments are the same as the variables on this class. - - .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job` - - :return Job: this job instance - - """ - self._scheduler.modify_job(self.id, self._jobstore_alias, **changes) - return self - - def reschedule(self, trigger, **trigger_args): - """ - Shortcut for switching the trigger on this job. - - .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job` - - :return Job: this job instance - - """ - self._scheduler.reschedule_job(self.id, self._jobstore_alias, trigger, **trigger_args) - return self - - def pause(self): - """ - Temporarily suspend the execution of this job. - - .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job` - - :return Job: this job instance - - """ - self._scheduler.pause_job(self.id, self._jobstore_alias) - return self - - def resume(self): - """ - Resume the schedule of this job if previously paused. - - .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job` - - :return Job: this job instance - - """ - self._scheduler.resume_job(self.id, self._jobstore_alias) - return self - - def remove(self): - """ - Unschedules this job and removes it from its associated job store. - - .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job` - - """ - self._scheduler.remove_job(self.id, self._jobstore_alias) - - @property - def pending(self): - """ - Returns ``True`` if the referenced job is still waiting to be added to its designated job - store. - - """ - return self._jobstore_alias is None - - # - # Private API - # - - def _get_run_times(self, now): - """ - Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive). - - :type now: datetime.datetime - :rtype: list[datetime.datetime] - - """ - run_times = [] - next_run_time = self.next_run_time - while next_run_time and next_run_time <= now: - run_times.append(next_run_time) - next_run_time = self.trigger.get_next_fire_time(next_run_time, now) - - return run_times - - def _modify(self, **changes): - """ - Validates the changes to the Job and makes the modifications if and only if all of them - validate. - - """ - approved = {} - - if 'id' in changes: - value = changes.pop('id') - if not isinstance(value, six.string_types): - raise TypeError("id must be a nonempty string") - if hasattr(self, 'id'): - raise ValueError('The job ID may not be changed') - approved['id'] = value - - if 'func' in changes or 'args' in changes or 'kwargs' in changes: - func = changes.pop('func') if 'func' in changes else self.func - args = changes.pop('args') if 'args' in changes else self.args - kwargs = changes.pop('kwargs') if 'kwargs' in changes else self.kwargs - - if isinstance(func, six.string_types): - func_ref = func - func = ref_to_obj(func) - elif callable(func): - try: - func_ref = obj_to_ref(func) - except ValueError: - # If this happens, this Job won't be serializable - func_ref = None - else: - raise TypeError('func must be a callable or a textual reference to one') - - if not hasattr(self, 'name') and changes.get('name', None) is None: - changes['name'] = get_callable_name(func) - - if isinstance(args, six.string_types) or not isinstance(args, Iterable): - raise TypeError('args must be a non-string iterable') - if isinstance(kwargs, six.string_types) or not isinstance(kwargs, Mapping): - raise TypeError('kwargs must be a dict-like object') - - check_callable_args(func, args, kwargs) - - approved['func'] = func - approved['func_ref'] = func_ref - approved['args'] = args - approved['kwargs'] = kwargs - - if 'name' in changes: - value = changes.pop('name') - if not value or not isinstance(value, six.string_types): - raise TypeError("name must be a nonempty string") - approved['name'] = value - - if 'misfire_grace_time' in changes: - value = changes.pop('misfire_grace_time') - if value is not None and (not isinstance(value, six.integer_types) or value <= 0): - raise TypeError('misfire_grace_time must be either None or a positive integer') - approved['misfire_grace_time'] = value - - if 'coalesce' in changes: - value = bool(changes.pop('coalesce')) - approved['coalesce'] = value - - if 'max_instances' in changes: - value = changes.pop('max_instances') - if not isinstance(value, six.integer_types) or value <= 0: - raise TypeError('max_instances must be a positive integer') - approved['max_instances'] = value - - if 'trigger' in changes: - trigger = changes.pop('trigger') - if not isinstance(trigger, BaseTrigger): - raise TypeError('Expected a trigger instance, got %s instead' % - trigger.__class__.__name__) - - approved['trigger'] = trigger - - if 'executor' in changes: - value = changes.pop('executor') - if not isinstance(value, six.string_types): - raise TypeError('executor must be a string') - approved['executor'] = value - - if 'next_run_time' in changes: - value = changes.pop('next_run_time') - approved['next_run_time'] = convert_to_datetime(value, self._scheduler.timezone, - 'next_run_time') - - if changes: - raise AttributeError('The following are not modifiable attributes of Job: %s' % - ', '.join(changes)) - - for key, value in six.iteritems(approved): - setattr(self, key, value) - - def __getstate__(self): - # Don't allow this Job to be serialized if the function reference could not be determined - if not self.func_ref: - raise ValueError( - 'This Job cannot be serialized since the reference to its callable (%r) could not ' - 'be determined. Consider giving a textual reference (module:function name) ' - 'instead.' % (self.func,)) - - return { - 'version': 1, - 'id': self.id, - 'func': self.func_ref, - 'trigger': self.trigger, - 'executor': self.executor, - 'args': self.args, - 'kwargs': self.kwargs, - 'name': self.name, - 'misfire_grace_time': self.misfire_grace_time, - 'coalesce': self.coalesce, - 'max_instances': self.max_instances, - 'next_run_time': self.next_run_time - } - - def __setstate__(self, state): - if state.get('version', 1) > 1: - raise ValueError('Job has version %s, but only version 1 can be handled' % - state['version']) - - self.id = state['id'] - self.func_ref = state['func'] - self.func = ref_to_obj(self.func_ref) - self.trigger = state['trigger'] - self.executor = state['executor'] - self.args = state['args'] - self.kwargs = state['kwargs'] - self.name = state['name'] - self.misfire_grace_time = state['misfire_grace_time'] - self.coalesce = state['coalesce'] - self.max_instances = state['max_instances'] - self.next_run_time = state['next_run_time'] - - def __eq__(self, other): - if isinstance(other, Job): - return self.id == other.id - return NotImplemented - - def __repr__(self): - return '' % (repr_escape(self.id), repr_escape(self.name)) - - def __str__(self): - return repr_escape(self.__unicode__()) - - def __unicode__(self): - if hasattr(self, 'next_run_time'): - status = ('next run at: ' + datetime_repr(self.next_run_time) if - self.next_run_time else 'paused') - else: - status = 'pending' - - return u'%s (trigger: %s, %s)' % (self.name, self.trigger, status) diff --git a/lib/apscheduler/jobstores/__init__.py b/lib/apscheduler/jobstores/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/apscheduler/jobstores/base.py b/lib/apscheduler/jobstores/base.py deleted file mode 100644 index 9cff66c4..00000000 --- a/lib/apscheduler/jobstores/base.py +++ /dev/null @@ -1,143 +0,0 @@ -from abc import ABCMeta, abstractmethod -import logging - -import six - - -class JobLookupError(KeyError): - """Raised when the job store cannot find a job for update or removal.""" - - def __init__(self, job_id): - super(JobLookupError, self).__init__(u'No job by the id of %s was found' % job_id) - - -class ConflictingIdError(KeyError): - """Raised when the uniqueness of job IDs is being violated.""" - - def __init__(self, job_id): - super(ConflictingIdError, self).__init__( - u'Job identifier (%s) conflicts with an existing job' % job_id) - - -class TransientJobError(ValueError): - """ - Raised when an attempt to add transient (with no func_ref) job to a persistent job store is - detected. - """ - - def __init__(self, job_id): - super(TransientJobError, self).__init__( - u'Job (%s) cannot be added to this job store because a reference to the callable ' - u'could not be determined.' % job_id) - - -class BaseJobStore(six.with_metaclass(ABCMeta)): - """Abstract base class that defines the interface that every job store must implement.""" - - _scheduler = None - _alias = None - _logger = logging.getLogger('apscheduler.jobstores') - - def start(self, scheduler, alias): - """ - Called by the scheduler when the scheduler is being started or when the job store is being - added to an already running scheduler. - - :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting - this job store - :param str|unicode alias: alias of this job store as it was assigned to the scheduler - """ - - self._scheduler = scheduler - self._alias = alias - self._logger = logging.getLogger('apscheduler.jobstores.%s' % alias) - - def shutdown(self): - """Frees any resources still bound to this job store.""" - - def _fix_paused_jobs_sorting(self, jobs): - for i, job in enumerate(jobs): - if job.next_run_time is not None: - if i > 0: - paused_jobs = jobs[:i] - del jobs[:i] - jobs.extend(paused_jobs) - break - - @abstractmethod - def lookup_job(self, job_id): - """ - Returns a specific job, or ``None`` if it isn't found.. - - The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of - the returned job to point to the scheduler and itself, respectively. - - :param str|unicode job_id: identifier of the job - :rtype: Job - """ - - @abstractmethod - def get_due_jobs(self, now): - """ - Returns the list of jobs that have ``next_run_time`` earlier or equal to ``now``. - The returned jobs must be sorted by next run time (ascending). - - :param datetime.datetime now: the current (timezone aware) datetime - :rtype: list[Job] - """ - - @abstractmethod - def get_next_run_time(self): - """ - Returns the earliest run time of all the jobs stored in this job store, or ``None`` if - there are no active jobs. - - :rtype: datetime.datetime - """ - - @abstractmethod - def get_all_jobs(self): - """ - Returns a list of all jobs in this job store. - The returned jobs should be sorted by next run time (ascending). - Paused jobs (next_run_time == None) should be sorted last. - - The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of - the returned jobs to point to the scheduler and itself, respectively. - - :rtype: list[Job] - """ - - @abstractmethod - def add_job(self, job): - """ - Adds the given job to this store. - - :param Job job: the job to add - :raises ConflictingIdError: if there is another job in this store with the same ID - """ - - @abstractmethod - def update_job(self, job): - """ - Replaces the job in the store with the given newer version. - - :param Job job: the job to update - :raises JobLookupError: if the job does not exist - """ - - @abstractmethod - def remove_job(self, job_id): - """ - Removes the given job from this store. - - :param str|unicode job_id: identifier of the job - :raises JobLookupError: if the job does not exist - """ - - @abstractmethod - def remove_all_jobs(self): - """Removes all jobs from this store.""" - - def __repr__(self): - return '<%s>' % self.__class__.__name__ diff --git a/lib/apscheduler/jobstores/memory.py b/lib/apscheduler/jobstores/memory.py deleted file mode 100644 index abfe7c6c..00000000 --- a/lib/apscheduler/jobstores/memory.py +++ /dev/null @@ -1,108 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import datetime_to_utc_timestamp - - -class MemoryJobStore(BaseJobStore): - """ - Stores jobs in an array in RAM. Provides no persistence support. - - Plugin alias: ``memory`` - """ - - def __init__(self): - super(MemoryJobStore, self).__init__() - # list of (job, timestamp), sorted by next_run_time and job id (ascending) - self._jobs = [] - self._jobs_index = {} # id -> (job, timestamp) lookup table - - def lookup_job(self, job_id): - return self._jobs_index.get(job_id, (None, None))[0] - - def get_due_jobs(self, now): - now_timestamp = datetime_to_utc_timestamp(now) - pending = [] - for job, timestamp in self._jobs: - if timestamp is None or timestamp > now_timestamp: - break - pending.append(job) - - return pending - - def get_next_run_time(self): - return self._jobs[0][0].next_run_time if self._jobs else None - - def get_all_jobs(self): - return [j[0] for j in self._jobs] - - def add_job(self, job): - if job.id in self._jobs_index: - raise ConflictingIdError(job.id) - - timestamp = datetime_to_utc_timestamp(job.next_run_time) - index = self._get_job_index(timestamp, job.id) - self._jobs.insert(index, (job, timestamp)) - self._jobs_index[job.id] = (job, timestamp) - - def update_job(self, job): - old_job, old_timestamp = self._jobs_index.get(job.id, (None, None)) - if old_job is None: - raise JobLookupError(job.id) - - # If the next run time has not changed, simply replace the job in its present index. - # Otherwise, reinsert the job to the list to preserve the ordering. - old_index = self._get_job_index(old_timestamp, old_job.id) - new_timestamp = datetime_to_utc_timestamp(job.next_run_time) - if old_timestamp == new_timestamp: - self._jobs[old_index] = (job, new_timestamp) - else: - del self._jobs[old_index] - new_index = self._get_job_index(new_timestamp, job.id) - self._jobs.insert(new_index, (job, new_timestamp)) - - self._jobs_index[old_job.id] = (job, new_timestamp) - - def remove_job(self, job_id): - job, timestamp = self._jobs_index.get(job_id, (None, None)) - if job is None: - raise JobLookupError(job_id) - - index = self._get_job_index(timestamp, job_id) - del self._jobs[index] - del self._jobs_index[job.id] - - def remove_all_jobs(self): - self._jobs = [] - self._jobs_index = {} - - def shutdown(self): - self.remove_all_jobs() - - def _get_job_index(self, timestamp, job_id): - """ - Returns the index of the given job, or if it's not found, the index where the job should be - inserted based on the given timestamp. - - :type timestamp: int - :type job_id: str - - """ - lo, hi = 0, len(self._jobs) - timestamp = float('inf') if timestamp is None else timestamp - while lo < hi: - mid = (lo + hi) // 2 - mid_job, mid_timestamp = self._jobs[mid] - mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp - if mid_timestamp > timestamp: - hi = mid - elif mid_timestamp < timestamp: - lo = mid + 1 - elif mid_job.id > job_id: - hi = mid - elif mid_job.id < job_id: - lo = mid + 1 - else: - return mid - - return lo diff --git a/lib/apscheduler/jobstores/mongodb.py b/lib/apscheduler/jobstores/mongodb.py deleted file mode 100644 index fc88325f..00000000 --- a/lib/apscheduler/jobstores/mongodb.py +++ /dev/null @@ -1,141 +0,0 @@ -from __future__ import absolute_import -import warnings - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime -from apscheduler.job import Job - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -try: - from bson.binary import Binary - from pymongo.errors import DuplicateKeyError - from pymongo import MongoClient, ASCENDING -except ImportError: # pragma: nocover - raise ImportError('MongoDBJobStore requires PyMongo installed') - - -class MongoDBJobStore(BaseJobStore): - """ - Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to - pymongo's `MongoClient - `_. - - Plugin alias: ``mongodb`` - - :param str database: database to store jobs in - :param str collection: collection to store jobs in - :param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of - providing connection arguments - :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the - highest available - """ - - def __init__(self, database='apscheduler', collection='jobs', client=None, - pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): - super(MongoDBJobStore, self).__init__() - self.pickle_protocol = pickle_protocol - - if not database: - raise ValueError('The "database" parameter must not be empty') - if not collection: - raise ValueError('The "collection" parameter must not be empty') - - if client: - self.client = maybe_ref(client) - else: - connect_args.setdefault('w', 1) - self.client = MongoClient(**connect_args) - - self.collection = self.client[database][collection] - - def start(self, scheduler, alias): - super(MongoDBJobStore, self).start(scheduler, alias) - self.collection.ensure_index('next_run_time', sparse=True) - - @property - def connection(self): - warnings.warn('The "connection" member is deprecated -- use "client" instead', - DeprecationWarning) - return self.client - - def lookup_job(self, job_id): - document = self.collection.find_one(job_id, ['job_state']) - return self._reconstitute_job(document['job_state']) if document else None - - def get_due_jobs(self, now): - timestamp = datetime_to_utc_timestamp(now) - return self._get_jobs({'next_run_time': {'$lte': timestamp}}) - - def get_next_run_time(self): - document = self.collection.find_one({'next_run_time': {'$ne': None}}, - projection=['next_run_time'], - sort=[('next_run_time', ASCENDING)]) - return utc_timestamp_to_datetime(document['next_run_time']) if document else None - - def get_all_jobs(self): - jobs = self._get_jobs({}) - self._fix_paused_jobs_sorting(jobs) - return jobs - - def add_job(self, job): - try: - self.collection.insert({ - '_id': job.id, - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) - }) - except DuplicateKeyError: - raise ConflictingIdError(job.id) - - def update_job(self, job): - changes = { - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) - } - result = self.collection.update({'_id': job.id}, {'$set': changes}) - if result and result['n'] == 0: - raise JobLookupError(job.id) - - def remove_job(self, job_id): - result = self.collection.remove(job_id) - if result and result['n'] == 0: - raise JobLookupError(job_id) - - def remove_all_jobs(self): - self.collection.remove() - - def shutdown(self): - self.client.close() - - def _reconstitute_job(self, job_state): - job_state = pickle.loads(job_state) - job = Job.__new__(Job) - job.__setstate__(job_state) - job._scheduler = self._scheduler - job._jobstore_alias = self._alias - return job - - def _get_jobs(self, conditions): - jobs = [] - failed_job_ids = [] - for document in self.collection.find(conditions, ['_id', 'job_state'], - sort=[('next_run_time', ASCENDING)]): - try: - jobs.append(self._reconstitute_job(document['job_state'])) - except: - self._logger.exception('Unable to restore job "%s" -- removing it', - document['_id']) - failed_job_ids.append(document['_id']) - - # Remove all the jobs we failed to restore - if failed_job_ids: - self.collection.remove({'_id': {'$in': failed_job_ids}}) - - return jobs - - def __repr__(self): - return '<%s (client=%s)>' % (self.__class__.__name__, self.client) diff --git a/lib/apscheduler/jobstores/redis.py b/lib/apscheduler/jobstores/redis.py deleted file mode 100644 index 4e092f7d..00000000 --- a/lib/apscheduler/jobstores/redis.py +++ /dev/null @@ -1,146 +0,0 @@ -from __future__ import absolute_import -from datetime import datetime - -from pytz import utc -import six - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime -from apscheduler.job import Job - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -try: - from redis import StrictRedis -except ImportError: # pragma: nocover - raise ImportError('RedisJobStore requires redis installed') - - -class RedisJobStore(BaseJobStore): - """ - Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's - :class:`~redis.StrictRedis`. - - Plugin alias: ``redis`` - - :param int db: the database number to store jobs in - :param str jobs_key: key to store jobs in - :param str run_times_key: key to store the jobs' run times in - :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the - highest available - """ - - def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times', - pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): - super(RedisJobStore, self).__init__() - - if db is None: - raise ValueError('The "db" parameter must not be empty') - if not jobs_key: - raise ValueError('The "jobs_key" parameter must not be empty') - if not run_times_key: - raise ValueError('The "run_times_key" parameter must not be empty') - - self.pickle_protocol = pickle_protocol - self.jobs_key = jobs_key - self.run_times_key = run_times_key - self.redis = StrictRedis(db=int(db), **connect_args) - - def lookup_job(self, job_id): - job_state = self.redis.hget(self.jobs_key, job_id) - return self._reconstitute_job(job_state) if job_state else None - - def get_due_jobs(self, now): - timestamp = datetime_to_utc_timestamp(now) - job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp) - if job_ids: - job_states = self.redis.hmget(self.jobs_key, *job_ids) - return self._reconstitute_jobs(six.moves.zip(job_ids, job_states)) - return [] - - def get_next_run_time(self): - next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True) - if next_run_time: - return utc_timestamp_to_datetime(next_run_time[0][1]) - - def get_all_jobs(self): - job_states = self.redis.hgetall(self.jobs_key) - jobs = self._reconstitute_jobs(six.iteritems(job_states)) - paused_sort_key = datetime(9999, 12, 31, tzinfo=utc) - return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key) - - def add_job(self, job): - if self.redis.hexists(self.jobs_key, job.id): - raise ConflictingIdError(job.id) - - with self.redis.pipeline() as pipe: - pipe.multi() - pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), - self.pickle_protocol)) - if job.next_run_time: - pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id) - pipe.execute() - - def update_job(self, job): - if not self.redis.hexists(self.jobs_key, job.id): - raise JobLookupError(job.id) - - with self.redis.pipeline() as pipe: - pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), - self.pickle_protocol)) - if job.next_run_time: - pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id) - else: - pipe.zrem(self.run_times_key, job.id) - pipe.execute() - - def remove_job(self, job_id): - if not self.redis.hexists(self.jobs_key, job_id): - raise JobLookupError(job_id) - - with self.redis.pipeline() as pipe: - pipe.hdel(self.jobs_key, job_id) - pipe.zrem(self.run_times_key, job_id) - pipe.execute() - - def remove_all_jobs(self): - with self.redis.pipeline() as pipe: - pipe.delete(self.jobs_key) - pipe.delete(self.run_times_key) - pipe.execute() - - def shutdown(self): - self.redis.connection_pool.disconnect() - - def _reconstitute_job(self, job_state): - job_state = pickle.loads(job_state) - job = Job.__new__(Job) - job.__setstate__(job_state) - job._scheduler = self._scheduler - job._jobstore_alias = self._alias - return job - - def _reconstitute_jobs(self, job_states): - jobs = [] - failed_job_ids = [] - for job_id, job_state in job_states: - try: - jobs.append(self._reconstitute_job(job_state)) - except: - self._logger.exception('Unable to restore job "%s" -- removing it', job_id) - failed_job_ids.append(job_id) - - # Remove all the jobs we failed to restore - if failed_job_ids: - with self.redis.pipeline() as pipe: - pipe.hdel(self.jobs_key, *failed_job_ids) - pipe.zrem(self.run_times_key, *failed_job_ids) - pipe.execute() - - return jobs - - def __repr__(self): - return '<%s>' % self.__class__.__name__ diff --git a/lib/apscheduler/jobstores/rethinkdb.py b/lib/apscheduler/jobstores/rethinkdb.py deleted file mode 100644 index 2185c6cc..00000000 --- a/lib/apscheduler/jobstores/rethinkdb.py +++ /dev/null @@ -1,153 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime -from apscheduler.job import Job - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -try: - import rethinkdb as r -except ImportError: # pragma: nocover - raise ImportError('RethinkDBJobStore requires rethinkdb installed') - - -class RethinkDBJobStore(BaseJobStore): - """ - Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to - rethinkdb's `RethinkdbClient `_. - - Plugin alias: ``rethinkdb`` - - :param str database: database to store jobs in - :param str collection: collection to store jobs in - :param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing - connection arguments - :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the - highest available - """ - - def __init__(self, database='apscheduler', table='jobs', client=None, - pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): - super(RethinkDBJobStore, self).__init__() - - if not database: - raise ValueError('The "database" parameter must not be empty') - if not table: - raise ValueError('The "table" parameter must not be empty') - - self.database = database - self.table = table - self.client = client - self.pickle_protocol = pickle_protocol - self.connect_args = connect_args - self.conn = None - - def start(self, scheduler, alias): - super(RethinkDBJobStore, self).start(scheduler, alias) - - if self.client: - self.conn = maybe_ref(self.client) - else: - self.conn = r.connect(db=self.database, **self.connect_args) - - if self.database not in r.db_list().run(self.conn): - r.db_create(self.database).run(self.conn) - - if self.table not in r.table_list().run(self.conn): - r.table_create(self.table).run(self.conn) - - if 'next_run_time' not in r.table(self.table).index_list().run(self.conn): - r.table(self.table).index_create('next_run_time').run(self.conn) - - self.table = r.db(self.database).table(self.table) - - def lookup_job(self, job_id): - results = list(self.table.get_all(job_id).pluck('job_state').run(self.conn)) - return self._reconstitute_job(results[0]['job_state']) if results else None - - def get_due_jobs(self, now): - return self._get_jobs(r.row['next_run_time'] <= datetime_to_utc_timestamp(now)) - - def get_next_run_time(self): - results = list( - self.table - .filter(r.row['next_run_time'] != None) # flake8: noqa - .order_by(r.asc('next_run_time')) - .map(lambda x: x['next_run_time']) - .limit(1) - .run(self.conn) - ) - return utc_timestamp_to_datetime(results[0]) if results else None - - def get_all_jobs(self): - jobs = self._get_jobs() - self._fix_paused_jobs_sorting(jobs) - return jobs - - def add_job(self, job): - job_dict = { - 'id': job.id, - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) - } - results = self.table.insert(job_dict).run(self.conn) - if results['errors'] > 0: - raise ConflictingIdError(job.id) - - def update_job(self, job): - changes = { - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) - } - results = self.table.get_all(job.id).update(changes).run(self.conn) - skipped = False in map(lambda x: results[x] == 0, results.keys()) - if results['skipped'] > 0 or results['errors'] > 0 or not skipped: - raise JobLookupError(job.id) - - def remove_job(self, job_id): - results = self.table.get_all(job_id).delete().run(self.conn) - if results['deleted'] + results['skipped'] != 1: - raise JobLookupError(job_id) - - def remove_all_jobs(self): - self.table.delete().run(self.conn) - - def shutdown(self): - self.conn.close() - - def _reconstitute_job(self, job_state): - job_state = pickle.loads(job_state) - job = Job.__new__(Job) - job.__setstate__(job_state) - job._scheduler = self._scheduler - job._jobstore_alias = self._alias - return job - - def _get_jobs(self, predicate=None): - jobs = [] - failed_job_ids = [] - query = (self.table.filter(r.row['next_run_time'] != None).filter(predicate) if - predicate else self.table) - query = query.order_by('next_run_time', 'id').pluck('id', 'job_state') - - for document in query.run(self.conn): - try: - jobs.append(self._reconstitute_job(document['job_state'])) - except: - self._logger.exception('Unable to restore job "%s" -- removing it', document['id']) - failed_job_ids.append(document['id']) - - # Remove all the jobs we failed to restore - if failed_job_ids: - r.expr(failed_job_ids).for_each( - lambda job_id: self.table.get_all(job_id).delete()).run(self.conn) - - return jobs - - def __repr__(self): - connection = self.conn - return '<%s (connection=%s)>' % (self.__class__.__name__, connection) diff --git a/lib/apscheduler/jobstores/sqlalchemy.py b/lib/apscheduler/jobstores/sqlalchemy.py deleted file mode 100644 index b82696e2..00000000 --- a/lib/apscheduler/jobstores/sqlalchemy.py +++ /dev/null @@ -1,148 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime -from apscheduler.job import Job - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -try: - from sqlalchemy import ( - create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select) - from sqlalchemy.exc import IntegrityError - from sqlalchemy.sql.expression import null -except ImportError: # pragma: nocover - raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed') - - -class SQLAlchemyJobStore(BaseJobStore): - """ - Stores jobs in a database table using SQLAlchemy. - The table will be created if it doesn't exist in the database. - - Plugin alias: ``sqlalchemy`` - - :param str url: connection string (see `SQLAlchemy documentation - `_ - on this) - :param engine: an SQLAlchemy Engine to use instead of creating a new one based on ``url`` - :param str tablename: name of the table to store jobs in - :param metadata: a :class:`~sqlalchemy.MetaData` instance to use instead of creating a new one - :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the - highest available - """ - - def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None, - pickle_protocol=pickle.HIGHEST_PROTOCOL): - super(SQLAlchemyJobStore, self).__init__() - self.pickle_protocol = pickle_protocol - metadata = maybe_ref(metadata) or MetaData() - - if engine: - self.engine = maybe_ref(engine) - elif url: - self.engine = create_engine(url) - else: - raise ValueError('Need either "engine" or "url" defined') - - # 191 = max key length in MySQL for InnoDB/utf8mb4 tables, - # 25 = precision that translates to an 8-byte float - self.jobs_t = Table( - tablename, metadata, - Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True), - Column('next_run_time', Float(25), index=True), - Column('job_state', LargeBinary, nullable=False) - ) - - def start(self, scheduler, alias): - super(SQLAlchemyJobStore, self).start(scheduler, alias) - self.jobs_t.create(self.engine, True) - - def lookup_job(self, job_id): - selectable = select([self.jobs_t.c.job_state]).where(self.jobs_t.c.id == job_id) - job_state = self.engine.execute(selectable).scalar() - return self._reconstitute_job(job_state) if job_state else None - - def get_due_jobs(self, now): - timestamp = datetime_to_utc_timestamp(now) - return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp) - - def get_next_run_time(self): - selectable = select([self.jobs_t.c.next_run_time]).\ - where(self.jobs_t.c.next_run_time != null()).\ - order_by(self.jobs_t.c.next_run_time).limit(1) - next_run_time = self.engine.execute(selectable).scalar() - return utc_timestamp_to_datetime(next_run_time) - - def get_all_jobs(self): - jobs = self._get_jobs() - self._fix_paused_jobs_sorting(jobs) - return jobs - - def add_job(self, job): - insert = self.jobs_t.insert().values(**{ - 'id': job.id, - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) - }) - try: - self.engine.execute(insert) - except IntegrityError: - raise ConflictingIdError(job.id) - - def update_job(self, job): - update = self.jobs_t.update().values(**{ - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) - }).where(self.jobs_t.c.id == job.id) - result = self.engine.execute(update) - if result.rowcount == 0: - raise JobLookupError(id) - - def remove_job(self, job_id): - delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id) - result = self.engine.execute(delete) - if result.rowcount == 0: - raise JobLookupError(job_id) - - def remove_all_jobs(self): - delete = self.jobs_t.delete() - self.engine.execute(delete) - - def shutdown(self): - self.engine.dispose() - - def _reconstitute_job(self, job_state): - job_state = pickle.loads(job_state) - job_state['jobstore'] = self - job = Job.__new__(Job) - job.__setstate__(job_state) - job._scheduler = self._scheduler - job._jobstore_alias = self._alias - return job - - def _get_jobs(self, *conditions): - jobs = [] - selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).\ - order_by(self.jobs_t.c.next_run_time) - selectable = selectable.where(*conditions) if conditions else selectable - failed_job_ids = set() - for row in self.engine.execute(selectable): - try: - jobs.append(self._reconstitute_job(row.job_state)) - except: - self._logger.exception('Unable to restore job "%s" -- removing it', row.id) - failed_job_ids.add(row.id) - - # Remove all the jobs we failed to restore - if failed_job_ids: - delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids)) - self.engine.execute(delete) - - return jobs - - def __repr__(self): - return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url) diff --git a/lib/apscheduler/jobstores/zookeeper.py b/lib/apscheduler/jobstores/zookeeper.py deleted file mode 100644 index 44e2e61f..00000000 --- a/lib/apscheduler/jobstores/zookeeper.py +++ /dev/null @@ -1,179 +0,0 @@ -from __future__ import absolute_import - -import os -from datetime import datetime - -from pytz import utc -from kazoo.exceptions import NoNodeError, NodeExistsError - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime -from apscheduler.job import Job - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -try: - from kazoo.client import KazooClient -except ImportError: # pragma: nocover - raise ImportError('ZooKeeperJobStore requires Kazoo installed') - - -class ZooKeeperJobStore(BaseJobStore): - """ - Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to - kazoo's `KazooClient - `_. - - Plugin alias: ``zookeeper`` - - :param str path: path to store jobs in - :param client: a :class:`~kazoo.client.KazooClient` instance to use instead of - providing connection arguments - :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the - highest available - """ - - def __init__(self, path='/apscheduler', client=None, close_connection_on_exit=False, - pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): - super(ZooKeeperJobStore, self).__init__() - self.pickle_protocol = pickle_protocol - self.close_connection_on_exit = close_connection_on_exit - - if not path: - raise ValueError('The "path" parameter must not be empty') - - self.path = path - - if client: - self.client = maybe_ref(client) - else: - self.client = KazooClient(**connect_args) - self._ensured_path = False - - def _ensure_paths(self): - if not self._ensured_path: - self.client.ensure_path(self.path) - self._ensured_path = True - - def start(self, scheduler, alias): - super(ZooKeeperJobStore, self).start(scheduler, alias) - if not self.client.connected: - self.client.start() - - def lookup_job(self, job_id): - self._ensure_paths() - node_path = os.path.join(self.path, job_id) - try: - content, _ = self.client.get(node_path) - doc = pickle.loads(content) - job = self._reconstitute_job(doc['job_state']) - return job - except: - return None - - def get_due_jobs(self, now): - timestamp = datetime_to_utc_timestamp(now) - jobs = [job_def['job'] for job_def in self._get_jobs() - if job_def['next_run_time'] is not None and job_def['next_run_time'] <= timestamp] - return jobs - - def get_next_run_time(self): - next_runs = [job_def['next_run_time'] for job_def in self._get_jobs() - if job_def['next_run_time'] is not None] - return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None - - def get_all_jobs(self): - jobs = [job_def['job'] for job_def in self._get_jobs()] - self._fix_paused_jobs_sorting(jobs) - return jobs - - def add_job(self, job): - self._ensure_paths() - node_path = os.path.join(self.path, str(job.id)) - value = { - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': job.__getstate__() - } - data = pickle.dumps(value, self.pickle_protocol) - try: - self.client.create(node_path, value=data) - except NodeExistsError: - raise ConflictingIdError(job.id) - - def update_job(self, job): - self._ensure_paths() - node_path = os.path.join(self.path, str(job.id)) - changes = { - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': job.__getstate__() - } - data = pickle.dumps(changes, self.pickle_protocol) - try: - self.client.set(node_path, value=data) - except NoNodeError: - raise JobLookupError(job.id) - - def remove_job(self, job_id): - self._ensure_paths() - node_path = os.path.join(self.path, str(job_id)) - try: - self.client.delete(node_path) - except NoNodeError: - raise JobLookupError(job_id) - - def remove_all_jobs(self): - try: - self.client.delete(self.path, recursive=True) - except NoNodeError: - pass - self._ensured_path = False - - def shutdown(self): - if self.close_connection_on_exit: - self.client.stop() - self.client.close() - - def _reconstitute_job(self, job_state): - job_state = job_state - job = Job.__new__(Job) - job.__setstate__(job_state) - job._scheduler = self._scheduler - job._jobstore_alias = self._alias - return job - - def _get_jobs(self): - self._ensure_paths() - jobs = [] - failed_job_ids = [] - all_ids = self.client.get_children(self.path) - for node_name in all_ids: - try: - node_path = os.path.join(self.path, node_name) - content, _ = self.client.get(node_path) - doc = pickle.loads(content) - job_def = { - 'job_id': node_name, - 'next_run_time': doc['next_run_time'] if doc['next_run_time'] else None, - 'job_state': doc['job_state'], - 'job': self._reconstitute_job(doc['job_state']), - 'creation_time': _.ctime - } - jobs.append(job_def) - except: - self._logger.exception('Unable to restore job "%s" -- removing it' % node_name) - failed_job_ids.append(node_name) - - # Remove all the jobs we failed to restore - if failed_job_ids: - for failed_id in failed_job_ids: - self.remove_job(failed_id) - paused_sort_key = datetime(9999, 12, 31, tzinfo=utc) - return sorted(jobs, key=lambda job_def: (job_def['job'].next_run_time or paused_sort_key, - job_def['creation_time'])) - - def __repr__(self): - self._logger.exception('<%s (client=%s)>' % (self.__class__.__name__, self.client)) - return '<%s (client=%s)>' % (self.__class__.__name__, self.client) diff --git a/lib/apscheduler/schedulers/__init__.py b/lib/apscheduler/schedulers/__init__.py deleted file mode 100644 index bd8a7900..00000000 --- a/lib/apscheduler/schedulers/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -class SchedulerAlreadyRunningError(Exception): - """Raised when attempting to start or configure the scheduler when it's already running.""" - - def __str__(self): - return 'Scheduler is already running' - - -class SchedulerNotRunningError(Exception): - """Raised when attempting to shutdown the scheduler when it's not running.""" - - def __str__(self): - return 'Scheduler is not running' diff --git a/lib/apscheduler/schedulers/asyncio.py b/lib/apscheduler/schedulers/asyncio.py deleted file mode 100644 index a272b1a2..00000000 --- a/lib/apscheduler/schedulers/asyncio.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import absolute_import -from functools import wraps - -from apscheduler.schedulers.base import BaseScheduler -from apscheduler.util import maybe_ref - -try: - import asyncio -except ImportError: # pragma: nocover - try: - import trollius as asyncio - except ImportError: - raise ImportError( - 'AsyncIOScheduler requires either Python 3.4 or the asyncio package installed') - - -def run_in_event_loop(func): - @wraps(func) - def wrapper(self, *args): - self._eventloop.call_soon_threadsafe(func, self, *args) - return wrapper - - -class AsyncIOScheduler(BaseScheduler): - """ - A scheduler that runs on an asyncio (:pep:`3156`) event loop. - - The default executor can run jobs based on native coroutines (``async def``). - - Extra options: - - ============== ============================================================= - ``event_loop`` AsyncIO event loop to use (defaults to the global event loop) - ============== ============================================================= - """ - - _eventloop = None - _timeout = None - - @run_in_event_loop - def shutdown(self, wait=True): - super(AsyncIOScheduler, self).shutdown(wait) - self._stop_timer() - - def _configure(self, config): - self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop() - super(AsyncIOScheduler, self)._configure(config) - - def _start_timer(self, wait_seconds): - self._stop_timer() - if wait_seconds is not None: - self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup) - - def _stop_timer(self): - if self._timeout: - self._timeout.cancel() - del self._timeout - - @run_in_event_loop - def wakeup(self): - self._stop_timer() - wait_seconds = self._process_jobs() - self._start_timer(wait_seconds) - - def _create_default_executor(self): - from apscheduler.executors.asyncio import AsyncIOExecutor - return AsyncIOExecutor() diff --git a/lib/apscheduler/schedulers/background.py b/lib/apscheduler/schedulers/background.py deleted file mode 100644 index 03f29822..00000000 --- a/lib/apscheduler/schedulers/background.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import absolute_import - -from threading import Thread, Event - -from apscheduler.schedulers.base import BaseScheduler -from apscheduler.schedulers.blocking import BlockingScheduler -from apscheduler.util import asbool - - -class BackgroundScheduler(BlockingScheduler): - """ - A scheduler that runs in the background using a separate thread - (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately). - - Extra options: - - ========== ============================================================================= - ``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see - `the documentation - `_ - for further details) - ========== ============================================================================= - """ - - _thread = None - - def _configure(self, config): - self._daemon = asbool(config.pop('daemon', True)) - super(BackgroundScheduler, self)._configure(config) - - def start(self, *args, **kwargs): - self._event = Event() - BaseScheduler.start(self, *args, **kwargs) - self._thread = Thread(target=self._main_loop, name='APScheduler') - self._thread.daemon = self._daemon - self._thread.start() - - def shutdown(self, *args, **kwargs): - super(BackgroundScheduler, self).shutdown(*args, **kwargs) - self._thread.join() - del self._thread diff --git a/lib/apscheduler/schedulers/base.py b/lib/apscheduler/schedulers/base.py deleted file mode 100644 index 93269092..00000000 --- a/lib/apscheduler/schedulers/base.py +++ /dev/null @@ -1,1006 +0,0 @@ -from __future__ import print_function - -from abc import ABCMeta, abstractmethod -from collections import MutableMapping -from threading import RLock -from datetime import datetime, timedelta -from logging import getLogger -import warnings -import sys - -from pkg_resources import iter_entry_points -from tzlocal import get_localzone -import six - -from apscheduler.schedulers import SchedulerAlreadyRunningError, SchedulerNotRunningError -from apscheduler.executors.base import MaxInstancesReachedError, BaseExecutor -from apscheduler.executors.pool import ThreadPoolExecutor -from apscheduler.jobstores.base import ConflictingIdError, JobLookupError, BaseJobStore -from apscheduler.jobstores.memory import MemoryJobStore -from apscheduler.job import Job -from apscheduler.triggers.base import BaseTrigger -from apscheduler.util import asbool, asint, astimezone, maybe_ref, timedelta_seconds, undefined -from apscheduler.events import ( - SchedulerEvent, JobEvent, JobSubmissionEvent, EVENT_SCHEDULER_START, EVENT_SCHEDULER_SHUTDOWN, - EVENT_JOBSTORE_ADDED, EVENT_JOBSTORE_REMOVED, EVENT_ALL, EVENT_JOB_MODIFIED, EVENT_JOB_REMOVED, - EVENT_JOB_ADDED, EVENT_EXECUTOR_ADDED, EVENT_EXECUTOR_REMOVED, EVENT_ALL_JOBS_REMOVED, - EVENT_JOB_SUBMITTED, EVENT_JOB_MAX_INSTANCES, EVENT_SCHEDULER_RESUMED, EVENT_SCHEDULER_PAUSED) - -#: constant indicating a scheduler's stopped state -STATE_STOPPED = 0 -#: constant indicating a scheduler's running state (started and processing jobs) -STATE_RUNNING = 1 -#: constant indicating a scheduler's paused state (started but not processing jobs) -STATE_PAUSED = 2 - - -class BaseScheduler(six.with_metaclass(ABCMeta)): - """ - Abstract base class for all schedulers. - - Takes the following keyword arguments: - - :param str|logging.Logger logger: logger to use for the scheduler's logging (defaults to - apscheduler.scheduler) - :param str|datetime.tzinfo timezone: the default time zone (defaults to the local timezone) - :param int|float jobstore_retry_interval: the minimum number of seconds to wait between - retries in the scheduler's main loop if the job store raises an exception when getting - the list of due jobs - :param dict job_defaults: default values for newly added jobs - :param dict jobstores: a dictionary of job store alias -> job store instance or configuration - dict - :param dict executors: a dictionary of executor alias -> executor instance or configuration - dict - - :ivar int state: current running state of the scheduler (one of the following constants from - ``apscheduler.schedulers.base``: ``STATE_STOPPED``, ``STATE_RUNNING``, ``STATE_PAUSED``) - - .. seealso:: :ref:`scheduler-config` - """ - - _trigger_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.triggers')) - _trigger_classes = {} - _executor_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.executors')) - _executor_classes = {} - _jobstore_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.jobstores')) - _jobstore_classes = {} - - # - # Public API - # - - def __init__(self, gconfig={}, **options): - super(BaseScheduler, self).__init__() - self._executors = {} - self._executors_lock = self._create_lock() - self._jobstores = {} - self._jobstores_lock = self._create_lock() - self._listeners = [] - self._listeners_lock = self._create_lock() - self._pending_jobs = [] - self.state = STATE_STOPPED - self.configure(gconfig, **options) - - def configure(self, gconfig={}, prefix='apscheduler.', **options): - """ - Reconfigures the scheduler with the given options. - - Can only be done when the scheduler isn't running. - - :param dict gconfig: a "global" configuration dictionary whose values can be overridden by - keyword arguments to this method - :param str|unicode prefix: pick only those keys from ``gconfig`` that are prefixed with - this string (pass an empty string or ``None`` to use all keys) - :raises SchedulerAlreadyRunningError: if the scheduler is already running - - """ - if self.state != STATE_STOPPED: - raise SchedulerAlreadyRunningError - - # If a non-empty prefix was given, strip it from the keys in the - # global configuration dict - if prefix: - prefixlen = len(prefix) - gconfig = dict((key[prefixlen:], value) for key, value in six.iteritems(gconfig) - if key.startswith(prefix)) - - # Create a structure from the dotted options - # (e.g. "a.b.c = d" -> {'a': {'b': {'c': 'd'}}}) - config = {} - for key, value in six.iteritems(gconfig): - parts = key.split('.') - parent = config - key = parts.pop(0) - while parts: - parent = parent.setdefault(key, {}) - key = parts.pop(0) - parent[key] = value - - # Override any options with explicit keyword arguments - config.update(options) - self._configure(config) - - def start(self, paused=False): - """ - Start the configured executors and job stores and begin processing scheduled jobs. - - :param bool paused: if ``True``, don't start job processing until :meth:`resume` is called - :raises SchedulerAlreadyRunningError: if the scheduler is already running - - """ - if self.state != STATE_STOPPED: - raise SchedulerAlreadyRunningError - - with self._executors_lock: - # Create a default executor if nothing else is configured - if 'default' not in self._executors: - self.add_executor(self._create_default_executor(), 'default') - - # Start all the executors - for alias, executor in six.iteritems(self._executors): - executor.start(self, alias) - - with self._jobstores_lock: - # Create a default job store if nothing else is configured - if 'default' not in self._jobstores: - self.add_jobstore(self._create_default_jobstore(), 'default') - - # Start all the job stores - for alias, store in six.iteritems(self._jobstores): - store.start(self, alias) - - # Schedule all pending jobs - for job, jobstore_alias, replace_existing in self._pending_jobs: - self._real_add_job(job, jobstore_alias, replace_existing) - del self._pending_jobs[:] - - self.state = STATE_PAUSED if paused else STATE_RUNNING - self._logger.info('Scheduler started') - self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_START)) - - if not paused: - self.wakeup() - - @abstractmethod - def shutdown(self, wait=True): - """ - Shuts down the scheduler, along with its executors and job stores. - - Does not interrupt any currently running jobs. - - :param bool wait: ``True`` to wait until all currently executing jobs have finished - :raises SchedulerNotRunningError: if the scheduler has not been started yet - - """ - if self.state == STATE_STOPPED: - raise SchedulerNotRunningError - - self.state = STATE_STOPPED - - with self._jobstores_lock, self._executors_lock: - # Shut down all executors - for executor in six.itervalues(self._executors): - executor.shutdown(wait) - - # Shut down all job stores - for jobstore in six.itervalues(self._jobstores): - jobstore.shutdown() - - self._logger.info('Scheduler has been shut down') - self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)) - - def pause(self): - """ - Pause job processing in the scheduler. - - This will prevent the scheduler from waking up to do job processing until :meth:`resume` - is called. It will not however stop any already running job processing. - - """ - if self.state == STATE_STOPPED: - raise SchedulerNotRunningError - elif self.state == STATE_RUNNING: - self.state = STATE_PAUSED - self._logger.info('Paused scheduler job processing') - self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_PAUSED)) - - def resume(self): - """Resume job processing in the scheduler.""" - if self.state == STATE_STOPPED: - raise SchedulerNotRunningError - elif self.state == STATE_PAUSED: - self.state = STATE_RUNNING - self._logger.info('Resumed scheduler job processing') - self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_RESUMED)) - self.wakeup() - - @property - def running(self): - """ - Return ``True`` if the scheduler has been started. - - This is a shortcut for ``scheduler.state != STATE_STOPPED``. - - """ - return self.state != STATE_STOPPED - - def add_executor(self, executor, alias='default', **executor_opts): - """ - Adds an executor to this scheduler. - - Any extra keyword arguments will be passed to the executor plugin's constructor, assuming - that the first argument is the name of an executor plugin. - - :param str|unicode|apscheduler.executors.base.BaseExecutor executor: either an executor - instance or the name of an executor plugin - :param str|unicode alias: alias for the scheduler - :raises ValueError: if there is already an executor by the given alias - - """ - with self._executors_lock: - if alias in self._executors: - raise ValueError('This scheduler already has an executor by the alias of "%s"' % - alias) - - if isinstance(executor, BaseExecutor): - self._executors[alias] = executor - elif isinstance(executor, six.string_types): - self._executors[alias] = executor = self._create_plugin_instance( - 'executor', executor, executor_opts) - else: - raise TypeError('Expected an executor instance or a string, got %s instead' % - executor.__class__.__name__) - - # Start the executor right away if the scheduler is running - if self.state != STATE_STOPPED: - executor.start(self, alias) - - self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_ADDED, alias)) - - def remove_executor(self, alias, shutdown=True): - """ - Removes the executor by the given alias from this scheduler. - - :param str|unicode alias: alias of the executor - :param bool shutdown: ``True`` to shut down the executor after - removing it - - """ - with self._executors_lock: - executor = self._lookup_executor(alias) - del self._executors[alias] - - if shutdown: - executor.shutdown() - - self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_REMOVED, alias)) - - def add_jobstore(self, jobstore, alias='default', **jobstore_opts): - """ - Adds a job store to this scheduler. - - Any extra keyword arguments will be passed to the job store plugin's constructor, assuming - that the first argument is the name of a job store plugin. - - :param str|unicode|apscheduler.jobstores.base.BaseJobStore jobstore: job store to be added - :param str|unicode alias: alias for the job store - :raises ValueError: if there is already a job store by the given alias - - """ - with self._jobstores_lock: - if alias in self._jobstores: - raise ValueError('This scheduler already has a job store by the alias of "%s"' % - alias) - - if isinstance(jobstore, BaseJobStore): - self._jobstores[alias] = jobstore - elif isinstance(jobstore, six.string_types): - self._jobstores[alias] = jobstore = self._create_plugin_instance( - 'jobstore', jobstore, jobstore_opts) - else: - raise TypeError('Expected a job store instance or a string, got %s instead' % - jobstore.__class__.__name__) - - # Start the job store right away if the scheduler isn't stopped - if self.state != STATE_STOPPED: - jobstore.start(self, alias) - - # Notify listeners that a new job store has been added - self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_ADDED, alias)) - - # Notify the scheduler so it can scan the new job store for jobs - if self.state != STATE_STOPPED: - self.wakeup() - - def remove_jobstore(self, alias, shutdown=True): - """ - Removes the job store by the given alias from this scheduler. - - :param str|unicode alias: alias of the job store - :param bool shutdown: ``True`` to shut down the job store after removing it - - """ - with self._jobstores_lock: - jobstore = self._lookup_jobstore(alias) - del self._jobstores[alias] - - if shutdown: - jobstore.shutdown() - - self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_REMOVED, alias)) - - def add_listener(self, callback, mask=EVENT_ALL): - """ - add_listener(callback, mask=EVENT_ALL) - - Adds a listener for scheduler events. - - When a matching event occurs, ``callback`` is executed with the event object as its - sole argument. If the ``mask`` parameter is not provided, the callback will receive events - of all types. - - :param callback: any callable that takes one argument - :param int mask: bitmask that indicates which events should be - listened to - - .. seealso:: :mod:`apscheduler.events` - .. seealso:: :ref:`scheduler-events` - - """ - with self._listeners_lock: - self._listeners.append((callback, mask)) - - def remove_listener(self, callback): - """Removes a previously added event listener.""" - - with self._listeners_lock: - for i, (cb, _) in enumerate(self._listeners): - if callback == cb: - del self._listeners[i] - - def add_job(self, func, trigger=None, args=None, kwargs=None, id=None, name=None, - misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, - next_run_time=undefined, jobstore='default', executor='default', - replace_existing=False, **trigger_args): - """ - add_job(func, trigger=None, args=None, kwargs=None, id=None, \ - name=None, misfire_grace_time=undefined, coalesce=undefined, \ - max_instances=undefined, next_run_time=undefined, \ - jobstore='default', executor='default', \ - replace_existing=False, **trigger_args) - - Adds the given job to the job list and wakes up the scheduler if it's already running. - - Any option that defaults to ``undefined`` will be replaced with the corresponding default - value when the job is scheduled (which happens when the scheduler is started, or - immediately if the scheduler is already running). - - The ``func`` argument can be given either as a callable object or a textual reference in - the ``package.module:some.object`` format, where the first half (separated by ``:``) is an - importable module and the second half is a reference to the callable object, relative to - the module. - - The ``trigger`` argument can either be: - #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case - any extra keyword arguments to this method are passed on to the trigger's constructor - #. an instance of a trigger class - - :param func: callable (or a textual reference to one) to run at the given time - :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when - ``func`` is called - :param list|tuple args: list of positional arguments to call func with - :param dict kwargs: dict of keyword arguments to call func with - :param str|unicode id: explicit identifier for the job (for modifying it later) - :param str|unicode name: textual description of the job - :param int misfire_grace_time: seconds after the designated runtime that the job is still - allowed to be run - :param bool coalesce: run once instead of many times if the scheduler determines that the - job should be run more than once in succession - :param int max_instances: maximum number of concurrently running instances allowed for this - job - :param datetime next_run_time: when to first run the job, regardless of the trigger (pass - ``None`` to add the job as paused) - :param str|unicode jobstore: alias of the job store to store the job in - :param str|unicode executor: alias of the executor to run the job with - :param bool replace_existing: ``True`` to replace an existing job with the same ``id`` - (but retain the number of runs from the existing one) - :rtype: Job - - """ - job_kwargs = { - 'trigger': self._create_trigger(trigger, trigger_args), - 'executor': executor, - 'func': func, - 'args': tuple(args) if args is not None else (), - 'kwargs': dict(kwargs) if kwargs is not None else {}, - 'id': id, - 'name': name, - 'misfire_grace_time': misfire_grace_time, - 'coalesce': coalesce, - 'max_instances': max_instances, - 'next_run_time': next_run_time - } - job_kwargs = dict((key, value) for key, value in six.iteritems(job_kwargs) if - value is not undefined) - job = Job(self, **job_kwargs) - - # Don't really add jobs to job stores before the scheduler is up and running - with self._jobstores_lock: - if self.state == STATE_STOPPED: - self._pending_jobs.append((job, jobstore, replace_existing)) - self._logger.info('Adding job tentatively -- it will be properly scheduled when ' - 'the scheduler starts') - else: - self._real_add_job(job, jobstore, replace_existing) - - return job - - def scheduled_job(self, trigger, args=None, kwargs=None, id=None, name=None, - misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, - next_run_time=undefined, jobstore='default', executor='default', - **trigger_args): - """ - scheduled_job(trigger, args=None, kwargs=None, id=None, \ - name=None, misfire_grace_time=undefined, \ - coalesce=undefined, max_instances=undefined, \ - next_run_time=undefined, jobstore='default', \ - executor='default',**trigger_args) - - A decorator version of :meth:`add_job`, except that ``replace_existing`` is always - ``True``. - - .. important:: The ``id`` argument must be given if scheduling a job in a persistent job - store. The scheduler cannot, however, enforce this requirement. - - """ - def inner(func): - self.add_job(func, trigger, args, kwargs, id, name, misfire_grace_time, coalesce, - max_instances, next_run_time, jobstore, executor, True, **trigger_args) - return func - return inner - - def modify_job(self, job_id, jobstore=None, **changes): - """ - Modifies the properties of a single job. - - Modifications are passed to this method as extra keyword arguments. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that contains the job - :return Job: the relevant job instance - - """ - with self._jobstores_lock: - job, jobstore = self._lookup_job(job_id, jobstore) - job._modify(**changes) - if jobstore: - self._lookup_jobstore(jobstore).update_job(job) - - self._dispatch_event(JobEvent(EVENT_JOB_MODIFIED, job_id, jobstore)) - - # Wake up the scheduler since the job's next run time may have been changed - if self.state == STATE_RUNNING: - self.wakeup() - - return job - - def reschedule_job(self, job_id, jobstore=None, trigger=None, **trigger_args): - """ - Constructs a new trigger for a job and updates its next run time. - - Extra keyword arguments are passed directly to the trigger's constructor. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that contains the job - :param trigger: alias of the trigger type or a trigger instance - :return Job: the relevant job instance - - """ - trigger = self._create_trigger(trigger, trigger_args) - now = datetime.now(self.timezone) - next_run_time = trigger.get_next_fire_time(None, now) - return self.modify_job(job_id, jobstore, trigger=trigger, next_run_time=next_run_time) - - def pause_job(self, job_id, jobstore=None): - """ - Causes the given job not to be executed until it is explicitly resumed. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that contains the job - :return Job: the relevant job instance - - """ - return self.modify_job(job_id, jobstore, next_run_time=None) - - def resume_job(self, job_id, jobstore=None): - """ - Resumes the schedule of the given job, or removes the job if its schedule is finished. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that contains the job - :return Job|None: the relevant job instance if the job was rescheduled, or ``None`` if no - next run time could be calculated and the job was removed - - """ - with self._jobstores_lock: - job, jobstore = self._lookup_job(job_id, jobstore) - now = datetime.now(self.timezone) - next_run_time = job.trigger.get_next_fire_time(None, now) - if next_run_time: - return self.modify_job(job_id, jobstore, next_run_time=next_run_time) - else: - self.remove_job(job.id, jobstore) - - def get_jobs(self, jobstore=None, pending=None): - """ - Returns a list of pending jobs (if the scheduler hasn't been started yet) and scheduled - jobs, either from a specific job store or from all of them. - - If the scheduler has not been started yet, only pending jobs can be returned because the - job stores haven't been started yet either. - - :param str|unicode jobstore: alias of the job store - :param bool pending: **DEPRECATED** - :rtype: list[Job] - - """ - if pending is not None: - warnings.warn('The "pending" option is deprecated -- get_jobs() always returns ' - 'pending jobs if the scheduler has been started and scheduled jobs ' - 'otherwise', DeprecationWarning) - - with self._jobstores_lock: - jobs = [] - if self.state == STATE_STOPPED: - for job, alias, replace_existing in self._pending_jobs: - if jobstore is None or alias == jobstore: - jobs.append(job) - else: - for alias, store in six.iteritems(self._jobstores): - if jobstore is None or alias == jobstore: - jobs.extend(store.get_all_jobs()) - - return jobs - - def get_job(self, job_id, jobstore=None): - """ - Returns the Job that matches the given ``job_id``. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that most likely contains the job - :return: the Job by the given ID, or ``None`` if it wasn't found - :rtype: Job - - """ - with self._jobstores_lock: - try: - return self._lookup_job(job_id, jobstore)[0] - except JobLookupError: - return - - def remove_job(self, job_id, jobstore=None): - """ - Removes a job, preventing it from being run any more. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that contains the job - :raises JobLookupError: if the job was not found - - """ - jobstore_alias = None - with self._jobstores_lock: - if self.state == STATE_STOPPED: - # Check if the job is among the pending jobs - if self.state == STATE_STOPPED: - for i, (job, alias, replace_existing) in enumerate(self._pending_jobs): - if job.id == job_id and jobstore in (None, alias): - del self._pending_jobs[i] - jobstore_alias = alias - break - else: - # Otherwise, try to remove it from each store until it succeeds or we run out of - # stores to check - for alias, store in six.iteritems(self._jobstores): - if jobstore in (None, alias): - try: - store.remove_job(job_id) - jobstore_alias = alias - break - except JobLookupError: - continue - - if jobstore_alias is None: - raise JobLookupError(job_id) - - # Notify listeners that a job has been removed - event = JobEvent(EVENT_JOB_REMOVED, job_id, jobstore_alias) - self._dispatch_event(event) - - self._logger.info('Removed job %s', job_id) - - def remove_all_jobs(self, jobstore=None): - """ - Removes all jobs from the specified job store, or all job stores if none is given. - - :param str|unicode jobstore: alias of the job store - - """ - with self._jobstores_lock: - if self.state == STATE_STOPPED: - if jobstore: - self._pending_jobs = [pending for pending in self._pending_jobs if - pending[1] != jobstore] - else: - self._pending_jobs = [] - else: - for alias, store in six.iteritems(self._jobstores): - if jobstore in (None, alias): - store.remove_all_jobs() - - self._dispatch_event(SchedulerEvent(EVENT_ALL_JOBS_REMOVED, jobstore)) - - def print_jobs(self, jobstore=None, out=None): - """ - print_jobs(jobstore=None, out=sys.stdout) - - Prints out a textual listing of all jobs currently scheduled on either all job stores or - just a specific one. - - :param str|unicode jobstore: alias of the job store, ``None`` to list jobs from all stores - :param file out: a file-like object to print to (defaults to **sys.stdout** if nothing is - given) - - """ - out = out or sys.stdout - with self._jobstores_lock: - if self.state == STATE_STOPPED: - print(u'Pending jobs:', file=out) - if self._pending_jobs: - for job, jobstore_alias, replace_existing in self._pending_jobs: - if jobstore in (None, jobstore_alias): - print(u' %s' % job, file=out) - else: - print(u' No pending jobs', file=out) - else: - for alias, store in sorted(six.iteritems(self._jobstores)): - if jobstore in (None, alias): - print(u'Jobstore %s:' % alias, file=out) - jobs = store.get_all_jobs() - if jobs: - for job in jobs: - print(u' %s' % job, file=out) - else: - print(u' No scheduled jobs', file=out) - - @abstractmethod - def wakeup(self): - """ - Notifies the scheduler that there may be jobs due for execution. - Triggers :meth:`_process_jobs` to be run in an implementation specific manner. - """ - - # - # Private API - # - - def _configure(self, config): - # Set general options - self._logger = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler') - self.timezone = astimezone(config.pop('timezone', None)) or get_localzone() - self.jobstore_retry_interval = float(config.pop('jobstore_retry_interval', 10)) - - # Set the job defaults - job_defaults = config.get('job_defaults', {}) - self._job_defaults = { - 'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)), - 'coalesce': asbool(job_defaults.get('coalesce', True)), - 'max_instances': asint(job_defaults.get('max_instances', 1)) - } - - # Configure executors - self._executors.clear() - for alias, value in six.iteritems(config.get('executors', {})): - if isinstance(value, BaseExecutor): - self.add_executor(value, alias) - elif isinstance(value, MutableMapping): - executor_class = value.pop('class', None) - plugin = value.pop('type', None) - if plugin: - executor = self._create_plugin_instance('executor', plugin, value) - elif executor_class: - cls = maybe_ref(executor_class) - executor = cls(**value) - else: - raise ValueError( - 'Cannot create executor "%s" -- either "type" or "class" must be defined' % - alias) - - self.add_executor(executor, alias) - else: - raise TypeError( - "Expected executor instance or dict for executors['%s'], got %s instead" % - (alias, value.__class__.__name__)) - - # Configure job stores - self._jobstores.clear() - for alias, value in six.iteritems(config.get('jobstores', {})): - if isinstance(value, BaseJobStore): - self.add_jobstore(value, alias) - elif isinstance(value, MutableMapping): - jobstore_class = value.pop('class', None) - plugin = value.pop('type', None) - if plugin: - jobstore = self._create_plugin_instance('jobstore', plugin, value) - elif jobstore_class: - cls = maybe_ref(jobstore_class) - jobstore = cls(**value) - else: - raise ValueError( - 'Cannot create job store "%s" -- either "type" or "class" must be ' - 'defined' % alias) - - self.add_jobstore(jobstore, alias) - else: - raise TypeError( - "Expected job store instance or dict for jobstores['%s'], got %s instead" % - (alias, value.__class__.__name__)) - - def _create_default_executor(self): - """Creates a default executor store, specific to the particular scheduler type.""" - return ThreadPoolExecutor() - - def _create_default_jobstore(self): - """Creates a default job store, specific to the particular scheduler type.""" - return MemoryJobStore() - - def _lookup_executor(self, alias): - """ - Returns the executor instance by the given name from the list of executors that were added - to this scheduler. - - :type alias: str - :raises KeyError: if no executor by the given alias is not found - - """ - try: - return self._executors[alias] - except KeyError: - raise KeyError('No such executor: %s' % alias) - - def _lookup_jobstore(self, alias): - """ - Returns the job store instance by the given name from the list of job stores that were - added to this scheduler. - - :type alias: str - :raises KeyError: if no job store by the given alias is not found - - """ - try: - return self._jobstores[alias] - except KeyError: - raise KeyError('No such job store: %s' % alias) - - def _lookup_job(self, job_id, jobstore_alias): - """ - Finds a job by its ID. - - :type job_id: str - :param str jobstore_alias: alias of a job store to look in - :return tuple[Job, str]: a tuple of job, jobstore alias (jobstore alias is None in case of - a pending job) - :raises JobLookupError: if no job by the given ID is found. - - """ - if self.state == STATE_STOPPED: - # Check if the job is among the pending jobs - for job, alias, replace_existing in self._pending_jobs: - if job.id == job_id: - return job, None - else: - # Look in all job stores - for alias, store in six.iteritems(self._jobstores): - if jobstore_alias in (None, alias): - job = store.lookup_job(job_id) - if job is not None: - return job, alias - - raise JobLookupError(job_id) - - def _dispatch_event(self, event): - """ - Dispatches the given event to interested listeners. - - :param SchedulerEvent event: the event to send - - """ - with self._listeners_lock: - listeners = tuple(self._listeners) - - for cb, mask in listeners: - if event.code & mask: - try: - cb(event) - except: - self._logger.exception('Error notifying listener') - - def _real_add_job(self, job, jobstore_alias, replace_existing): - """ - :param Job job: the job to add - :param bool replace_existing: ``True`` to use update_job() in case the job already exists - in the store - - """ - # Fill in undefined values with defaults - replacements = {} - for key, value in six.iteritems(self._job_defaults): - if not hasattr(job, key): - replacements[key] = value - - # Calculate the next run time if there is none defined - if not hasattr(job, 'next_run_time'): - now = datetime.now(self.timezone) - replacements['next_run_time'] = job.trigger.get_next_fire_time(None, now) - - # Apply any replacements - job._modify(**replacements) - - # Add the job to the given job store - store = self._lookup_jobstore(jobstore_alias) - try: - store.add_job(job) - except ConflictingIdError: - if replace_existing: - store.update_job(job) - else: - raise - - # Mark the job as no longer pending - job._jobstore_alias = jobstore_alias - - # Notify listeners that a new job has been added - event = JobEvent(EVENT_JOB_ADDED, job.id, jobstore_alias) - self._dispatch_event(event) - - self._logger.info('Added job "%s" to job store "%s"', job.name, jobstore_alias) - - # Notify the scheduler about the new job - if self.state == STATE_RUNNING: - self.wakeup() - - def _create_plugin_instance(self, type_, alias, constructor_kwargs): - """Creates an instance of the given plugin type, loading the plugin first if necessary.""" - plugin_container, class_container, base_class = { - 'trigger': (self._trigger_plugins, self._trigger_classes, BaseTrigger), - 'jobstore': (self._jobstore_plugins, self._jobstore_classes, BaseJobStore), - 'executor': (self._executor_plugins, self._executor_classes, BaseExecutor) - }[type_] - - try: - plugin_cls = class_container[alias] - except KeyError: - if alias in plugin_container: - plugin_cls = class_container[alias] = plugin_container[alias].load() - if not issubclass(plugin_cls, base_class): - raise TypeError('The {0} entry point does not point to a {0} class'. - format(type_)) - else: - raise LookupError('No {0} by the name "{1}" was found'.format(type_, alias)) - - return plugin_cls(**constructor_kwargs) - - def _create_trigger(self, trigger, trigger_args): - if isinstance(trigger, BaseTrigger): - return trigger - elif trigger is None: - trigger = 'date' - elif not isinstance(trigger, six.string_types): - raise TypeError('Expected a trigger instance or string, got %s instead' % - trigger.__class__.__name__) - - # Use the scheduler's time zone if nothing else is specified - trigger_args.setdefault('timezone', self.timezone) - - # Instantiate the trigger class - return self._create_plugin_instance('trigger', trigger, trigger_args) - - def _create_lock(self): - """Creates a reentrant lock object.""" - return RLock() - - def _process_jobs(self): - """ - Iterates through jobs in every jobstore, starts jobs that are due and figures out how long - to wait for the next round. - - If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least - ``jobstore_retry_interval`` seconds. - - """ - if self.state == STATE_PAUSED: - self._logger.debug('Scheduler is paused -- not processing jobs') - return None - - self._logger.debug('Looking for jobs to run') - now = datetime.now(self.timezone) - next_wakeup_time = None - events = [] - - with self._jobstores_lock: - for jobstore_alias, jobstore in six.iteritems(self._jobstores): - try: - due_jobs = jobstore.get_due_jobs(now) - except Exception as e: - # Schedule a wakeup at least in jobstore_retry_interval seconds - self._logger.warning('Error getting due jobs from job store %r: %s', - jobstore_alias, e) - retry_wakeup_time = now + timedelta(seconds=self.jobstore_retry_interval) - if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: - next_wakeup_time = retry_wakeup_time - - continue - - for job in due_jobs: - # Look up the job's executor - try: - executor = self._lookup_executor(job.executor) - except: - self._logger.error( - 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' - 'job store', job.executor, job) - self.remove_job(job.id, jobstore_alias) - continue - - run_times = job._get_run_times(now) - run_times = run_times[-1:] if run_times and job.coalesce else run_times - if run_times: - try: - executor.submit_job(job, run_times) - except MaxInstancesReachedError: - self._logger.warning( - 'Execution of job "%s" skipped: maximum number of running ' - 'instances reached (%d)', job, job.max_instances) - event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, - jobstore_alias, run_times) - events.append(event) - except: - self._logger.exception('Error submitting job "%s" to executor "%s"', - job, job.executor) - else: - event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, - run_times) - events.append(event) - - # Update the job if it has a next execution time. - # Otherwise remove it from the job store. - job_next_run = job.trigger.get_next_fire_time(run_times[-1], now) - if job_next_run: - job._modify(next_run_time=job_next_run) - jobstore.update_job(job) - else: - self.remove_job(job.id, jobstore_alias) - - # Set a new next wakeup time if there isn't one yet or - # the jobstore has an even earlier one - jobstore_next_run_time = jobstore.get_next_run_time() - if jobstore_next_run_time and (next_wakeup_time is None or - jobstore_next_run_time < next_wakeup_time): - next_wakeup_time = jobstore_next_run_time.astimezone(self.timezone) - - # Dispatch collected events - for event in events: - self._dispatch_event(event) - - # Determine the delay until this method should be called again - if self.state == STATE_PAUSED: - wait_seconds = None - self._logger.debug('Scheduler is paused; waiting until resume() is called') - elif next_wakeup_time is None: - wait_seconds = None - self._logger.debug('No jobs; waiting until a job is added') - else: - wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0) - self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, - wait_seconds) - - return wait_seconds diff --git a/lib/apscheduler/schedulers/blocking.py b/lib/apscheduler/schedulers/blocking.py deleted file mode 100644 index e6171575..00000000 --- a/lib/apscheduler/schedulers/blocking.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import absolute_import - -from threading import Event - -from apscheduler.schedulers.base import BaseScheduler, STATE_STOPPED -from apscheduler.util import TIMEOUT_MAX - - -class BlockingScheduler(BaseScheduler): - """ - A scheduler that runs in the foreground - (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block). - """ - _event = None - - def start(self, *args, **kwargs): - self._event = Event() - super(BlockingScheduler, self).start(*args, **kwargs) - self._main_loop() - - def shutdown(self, wait=True): - super(BlockingScheduler, self).shutdown(wait) - self._event.set() - - def _main_loop(self): - wait_seconds = TIMEOUT_MAX - while self.state != STATE_STOPPED: - self._event.wait(wait_seconds) - self._event.clear() - wait_seconds = self._process_jobs() - - def wakeup(self): - self._event.set() diff --git a/lib/apscheduler/schedulers/gevent.py b/lib/apscheduler/schedulers/gevent.py deleted file mode 100644 index d48ed74a..00000000 --- a/lib/apscheduler/schedulers/gevent.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.schedulers.blocking import BlockingScheduler -from apscheduler.schedulers.base import BaseScheduler - -try: - from gevent.event import Event - from gevent.lock import RLock - import gevent -except ImportError: # pragma: nocover - raise ImportError('GeventScheduler requires gevent installed') - - -class GeventScheduler(BlockingScheduler): - """A scheduler that runs as a Gevent greenlet.""" - - _greenlet = None - - def start(self, *args, **kwargs): - self._event = Event() - BaseScheduler.start(self, *args, **kwargs) - self._greenlet = gevent.spawn(self._main_loop) - return self._greenlet - - def shutdown(self, *args, **kwargs): - super(GeventScheduler, self).shutdown(*args, **kwargs) - self._greenlet.join() - del self._greenlet - - def _create_lock(self): - return RLock() - - def _create_default_executor(self): - from apscheduler.executors.gevent import GeventExecutor - return GeventExecutor() diff --git a/lib/apscheduler/schedulers/qt.py b/lib/apscheduler/schedulers/qt.py deleted file mode 100644 index 092533e9..00000000 --- a/lib/apscheduler/schedulers/qt.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.schedulers.base import BaseScheduler - -try: - from PyQt5.QtCore import QObject, QTimer -except ImportError: # pragma: nocover - try: - from PyQt4.QtCore import QObject, QTimer - except ImportError: - try: - from PySide.QtCore import QObject, QTimer # flake8: noqa - except ImportError: - raise ImportError('QtScheduler requires either PyQt5, PyQt4 or PySide installed') - - -class QtScheduler(BaseScheduler): - """A scheduler that runs in a Qt event loop.""" - - _timer = None - - def shutdown(self, *args, **kwargs): - super(QtScheduler, self).shutdown(*args, **kwargs) - self._stop_timer() - - def _start_timer(self, wait_seconds): - self._stop_timer() - if wait_seconds is not None: - self._timer = QTimer.singleShot(wait_seconds * 1000, self._process_jobs) - - def _stop_timer(self): - if self._timer: - if self._timer.isActive(): - self._timer.stop() - del self._timer - - def wakeup(self): - self._start_timer(0) - - def _process_jobs(self): - wait_seconds = super(QtScheduler, self)._process_jobs() - self._start_timer(wait_seconds) diff --git a/lib/apscheduler/schedulers/tornado.py b/lib/apscheduler/schedulers/tornado.py deleted file mode 100644 index 0a9171f2..00000000 --- a/lib/apscheduler/schedulers/tornado.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import absolute_import - -from datetime import timedelta -from functools import wraps - -from apscheduler.schedulers.base import BaseScheduler -from apscheduler.util import maybe_ref - -try: - from tornado.ioloop import IOLoop -except ImportError: # pragma: nocover - raise ImportError('TornadoScheduler requires tornado installed') - - -def run_in_ioloop(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - self._ioloop.add_callback(func, self, *args, **kwargs) - return wrapper - - -class TornadoScheduler(BaseScheduler): - """ - A scheduler that runs on a Tornado IOLoop. - - The default executor can run jobs based on native coroutines (``async def``). - - =========== =============================================================== - ``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop) - =========== =============================================================== - """ - - _ioloop = None - _timeout = None - - @run_in_ioloop - def shutdown(self, wait=True): - super(TornadoScheduler, self).shutdown(wait) - self._stop_timer() - - def _configure(self, config): - self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current() - super(TornadoScheduler, self)._configure(config) - - def _start_timer(self, wait_seconds): - self._stop_timer() - if wait_seconds is not None: - self._timeout = self._ioloop.add_timeout(timedelta(seconds=wait_seconds), self.wakeup) - - def _stop_timer(self): - if self._timeout: - self._ioloop.remove_timeout(self._timeout) - del self._timeout - - def _create_default_executor(self): - from apscheduler.executors.tornado import TornadoExecutor - return TornadoExecutor() - - @run_in_ioloop - def wakeup(self): - self._stop_timer() - wait_seconds = self._process_jobs() - self._start_timer(wait_seconds) diff --git a/lib/apscheduler/schedulers/twisted.py b/lib/apscheduler/schedulers/twisted.py deleted file mode 100644 index 6b43a84b..00000000 --- a/lib/apscheduler/schedulers/twisted.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import absolute_import - -from functools import wraps - -from apscheduler.schedulers.base import BaseScheduler -from apscheduler.util import maybe_ref - -try: - from twisted.internet import reactor as default_reactor -except ImportError: # pragma: nocover - raise ImportError('TwistedScheduler requires Twisted installed') - - -def run_in_reactor(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - self._reactor.callFromThread(func, self, *args, **kwargs) - return wrapper - - -class TwistedScheduler(BaseScheduler): - """ - A scheduler that runs on a Twisted reactor. - - Extra options: - - =========== ======================================================== - ``reactor`` Reactor instance to use (defaults to the global reactor) - =========== ======================================================== - """ - - _reactor = None - _delayedcall = None - - def _configure(self, config): - self._reactor = maybe_ref(config.pop('reactor', default_reactor)) - super(TwistedScheduler, self)._configure(config) - - @run_in_reactor - def shutdown(self, wait=True): - super(TwistedScheduler, self).shutdown(wait) - self._stop_timer() - - def _start_timer(self, wait_seconds): - self._stop_timer() - if wait_seconds is not None: - self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup) - - def _stop_timer(self): - if self._delayedcall and self._delayedcall.active(): - self._delayedcall.cancel() - del self._delayedcall - - @run_in_reactor - def wakeup(self): - self._stop_timer() - wait_seconds = self._process_jobs() - self._start_timer(wait_seconds) - - def _create_default_executor(self): - from apscheduler.executors.twisted import TwistedExecutor - return TwistedExecutor() diff --git a/lib/apscheduler/triggers/__init__.py b/lib/apscheduler/triggers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/apscheduler/triggers/base.py b/lib/apscheduler/triggers/base.py deleted file mode 100644 index ba98632e..00000000 --- a/lib/apscheduler/triggers/base.py +++ /dev/null @@ -1,19 +0,0 @@ -from abc import ABCMeta, abstractmethod - -import six - - -class BaseTrigger(six.with_metaclass(ABCMeta)): - """Abstract base class that defines the interface that every trigger must implement.""" - - __slots__ = () - - @abstractmethod - def get_next_fire_time(self, previous_fire_time, now): - """ - Returns the next datetime to fire on, If no such datetime can be calculated, returns - ``None``. - - :param datetime.datetime previous_fire_time: the previous time the trigger was fired - :param datetime.datetime now: current datetime - """ diff --git a/lib/apscheduler/triggers/cron/__init__.py b/lib/apscheduler/triggers/cron/__init__.py deleted file mode 100644 index eccee0c0..00000000 --- a/lib/apscheduler/triggers/cron/__init__.py +++ /dev/null @@ -1,206 +0,0 @@ -from datetime import datetime, timedelta - -from tzlocal import get_localzone -import six - -from apscheduler.triggers.base import BaseTrigger -from apscheduler.triggers.cron.fields import ( - BaseField, WeekField, DayOfMonthField, DayOfWeekField, DEFAULT_VALUES) -from apscheduler.util import datetime_ceil, convert_to_datetime, datetime_repr, astimezone - - -class CronTrigger(BaseTrigger): - """ - Triggers when current time matches all specified time constraints, - similarly to how the UNIX cron scheduler works. - - :param int|str year: 4-digit year - :param int|str month: month (1-12) - :param int|str day: day of the (1-31) - :param int|str week: ISO week (1-53) - :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun) - :param int|str hour: hour (0-23) - :param int|str minute: minute (0-59) - :param int|str second: second (0-59) - :param datetime|str start_date: earliest possible date/time to trigger on (inclusive) - :param datetime|str end_date: latest possible date/time to trigger on (inclusive) - :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults - to scheduler timezone) - - .. note:: The first weekday is always **monday**. - """ - - FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second') - FIELDS_MAP = { - 'year': BaseField, - 'month': BaseField, - 'week': WeekField, - 'day': DayOfMonthField, - 'day_of_week': DayOfWeekField, - 'hour': BaseField, - 'minute': BaseField, - 'second': BaseField - } - - __slots__ = 'timezone', 'start_date', 'end_date', 'fields' - - def __init__(self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, - minute=None, second=None, start_date=None, end_date=None, timezone=None): - if timezone: - self.timezone = astimezone(timezone) - elif isinstance(start_date, datetime) and start_date.tzinfo: - self.timezone = start_date.tzinfo - elif isinstance(end_date, datetime) and end_date.tzinfo: - self.timezone = end_date.tzinfo - else: - self.timezone = get_localzone() - - self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') - self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date') - - values = dict((key, value) for (key, value) in six.iteritems(locals()) - if key in self.FIELD_NAMES and value is not None) - self.fields = [] - assign_defaults = False - for field_name in self.FIELD_NAMES: - if field_name in values: - exprs = values.pop(field_name) - is_default = False - assign_defaults = not values - elif assign_defaults: - exprs = DEFAULT_VALUES[field_name] - is_default = True - else: - exprs = '*' - is_default = True - - field_class = self.FIELDS_MAP[field_name] - field = field_class(field_name, exprs, is_default) - self.fields.append(field) - - def _increment_field_value(self, dateval, fieldnum): - """ - Increments the designated field and resets all less significant fields to their minimum - values. - - :type dateval: datetime - :type fieldnum: int - :return: a tuple containing the new date, and the number of the field that was actually - incremented - :rtype: tuple - """ - - values = {} - i = 0 - while i < len(self.fields): - field = self.fields[i] - if not field.REAL: - if i == fieldnum: - fieldnum -= 1 - i -= 1 - else: - i += 1 - continue - - if i < fieldnum: - values[field.name] = field.get_value(dateval) - i += 1 - elif i > fieldnum: - values[field.name] = field.get_min(dateval) - i += 1 - else: - value = field.get_value(dateval) - maxval = field.get_max(dateval) - if value == maxval: - fieldnum -= 1 - i -= 1 - else: - values[field.name] = value + 1 - i += 1 - - difference = datetime(**values) - dateval.replace(tzinfo=None) - return self.timezone.normalize(dateval + difference), fieldnum - - def _set_field_value(self, dateval, fieldnum, new_value): - values = {} - for i, field in enumerate(self.fields): - if field.REAL: - if i < fieldnum: - values[field.name] = field.get_value(dateval) - elif i > fieldnum: - values[field.name] = field.get_min(dateval) - else: - values[field.name] = new_value - - return self.timezone.localize(datetime(**values)) - - def get_next_fire_time(self, previous_fire_time, now): - if previous_fire_time: - start_date = min(now, previous_fire_time + timedelta(microseconds=1)) - if start_date == previous_fire_time: - start_date += timedelta(microseconds=1) - else: - start_date = max(now, self.start_date) if self.start_date else now - - fieldnum = 0 - next_date = datetime_ceil(start_date).astimezone(self.timezone) - while 0 <= fieldnum < len(self.fields): - field = self.fields[fieldnum] - curr_value = field.get_value(next_date) - next_value = field.get_next_value(next_date) - - if next_value is None: - # No valid value was found - next_date, fieldnum = self._increment_field_value(next_date, fieldnum - 1) - elif next_value > curr_value: - # A valid, but higher than the starting value, was found - if field.REAL: - next_date = self._set_field_value(next_date, fieldnum, next_value) - fieldnum += 1 - else: - next_date, fieldnum = self._increment_field_value(next_date, fieldnum) - else: - # A valid value was found, no changes necessary - fieldnum += 1 - - # Return if the date has rolled past the end date - if self.end_date and next_date > self.end_date: - return None - - if fieldnum >= 0: - return next_date - - def __getstate__(self): - return { - 'version': 1, - 'timezone': self.timezone, - 'start_date': self.start_date, - 'end_date': self.end_date, - 'fields': self.fields - } - - def __setstate__(self, state): - # This is for compatibility with APScheduler 3.0.x - if isinstance(state, tuple): - state = state[1] - - if state.get('version', 1) > 1: - raise ValueError( - 'Got serialized data for version %s of %s, but only version 1 can be handled' % - (state['version'], self.__class__.__name__)) - - self.timezone = state['timezone'] - self.start_date = state['start_date'] - self.end_date = state['end_date'] - self.fields = state['fields'] - - def __str__(self): - options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] - return 'cron[%s]' % (', '.join(options)) - - def __repr__(self): - options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] - if self.start_date: - options.append("start_date='%s'" % datetime_repr(self.start_date)) - return "<%s (%s, timezone='%s')>" % ( - self.__class__.__name__, ', '.join(options), self.timezone) diff --git a/lib/apscheduler/triggers/cron/expressions.py b/lib/apscheduler/triggers/cron/expressions.py deleted file mode 100644 index 21493d54..00000000 --- a/lib/apscheduler/triggers/cron/expressions.py +++ /dev/null @@ -1,195 +0,0 @@ -"""This module contains the expressions applicable for CronTrigger's fields.""" - -from calendar import monthrange -import re - -from apscheduler.util import asint - -__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression', - 'WeekdayPositionExpression', 'LastDayOfMonthExpression') - - -WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] - - -class AllExpression(object): - value_re = re.compile(r'\*(?:/(?P\d+))?$') - - def __init__(self, step=None): - self.step = asint(step) - if self.step == 0: - raise ValueError('Increment must be higher than 0') - - def get_next_value(self, date, field): - start = field.get_value(date) - minval = field.get_min(date) - maxval = field.get_max(date) - start = max(start, minval) - - if not self.step: - next = start - else: - distance_to_next = (self.step - (start - minval)) % self.step - next = start + distance_to_next - - if next <= maxval: - return next - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.step == other.step - - def __str__(self): - if self.step: - return '*/%d' % self.step - return '*' - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, self.step) - - -class RangeExpression(AllExpression): - value_re = re.compile( - r'(?P\d+)(?:-(?P\d+))?(?:/(?P\d+))?$') - - def __init__(self, first, last=None, step=None): - AllExpression.__init__(self, step) - first = asint(first) - last = asint(last) - if last is None and step is None: - last = first - if last is not None and first > last: - raise ValueError('The minimum value in a range must not be higher than the maximum') - self.first = first - self.last = last - - def get_next_value(self, date, field): - startval = field.get_value(date) - minval = field.get_min(date) - maxval = field.get_max(date) - - # Apply range limits - minval = max(minval, self.first) - maxval = min(maxval, self.last) if self.last is not None else maxval - nextval = max(minval, startval) - - # Apply the step if defined - if self.step: - distance_to_next = (self.step - (nextval - minval)) % self.step - nextval += distance_to_next - - return nextval if nextval <= maxval else None - - def __eq__(self, other): - return (isinstance(other, self.__class__) and self.first == other.first and - self.last == other.last) - - def __str__(self): - if self.last != self.first and self.last is not None: - range = '%d-%d' % (self.first, self.last) - else: - range = str(self.first) - - if self.step: - return '%s/%d' % (range, self.step) - return range - - def __repr__(self): - args = [str(self.first)] - if self.last != self.first and self.last is not None or self.step: - args.append(str(self.last)) - if self.step: - args.append(str(self.step)) - return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) - - -class WeekdayRangeExpression(RangeExpression): - value_re = re.compile(r'(?P[a-z]+)(?:-(?P[a-z]+))?', re.IGNORECASE) - - def __init__(self, first, last=None): - try: - first_num = WEEKDAYS.index(first.lower()) - except ValueError: - raise ValueError('Invalid weekday name "%s"' % first) - - if last: - try: - last_num = WEEKDAYS.index(last.lower()) - except ValueError: - raise ValueError('Invalid weekday name "%s"' % last) - else: - last_num = None - - RangeExpression.__init__(self, first_num, last_num) - - def __str__(self): - if self.last != self.first and self.last is not None: - return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last]) - return WEEKDAYS[self.first] - - def __repr__(self): - args = ["'%s'" % WEEKDAYS[self.first]] - if self.last != self.first and self.last is not None: - args.append("'%s'" % WEEKDAYS[self.last]) - return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) - - -class WeekdayPositionExpression(AllExpression): - options = ['1st', '2nd', '3rd', '4th', '5th', 'last'] - value_re = re.compile(r'(?P%s) +(?P(?:\d+|\w+))' % - '|'.join(options), re.IGNORECASE) - - def __init__(self, option_name, weekday_name): - try: - self.option_num = self.options.index(option_name.lower()) - except ValueError: - raise ValueError('Invalid weekday position "%s"' % option_name) - - try: - self.weekday = WEEKDAYS.index(weekday_name.lower()) - except ValueError: - raise ValueError('Invalid weekday name "%s"' % weekday_name) - - def get_next_value(self, date, field): - # Figure out the weekday of the month's first day and the number of days in that month - first_day_wday, last_day = monthrange(date.year, date.month) - - # Calculate which day of the month is the first of the target weekdays - first_hit_day = self.weekday - first_day_wday + 1 - if first_hit_day <= 0: - first_hit_day += 7 - - # Calculate what day of the month the target weekday would be - if self.option_num < 5: - target_day = first_hit_day + self.option_num * 7 - else: - target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7 - - if target_day <= last_day and target_day >= date.day: - return target_day - - def __eq__(self, other): - return (super(WeekdayPositionExpression, self).__eq__(other) and - self.option_num == other.option_num and self.weekday == other.weekday) - - def __str__(self): - return '%s %s' % (self.options[self.option_num], WEEKDAYS[self.weekday]) - - def __repr__(self): - return "%s('%s', '%s')" % (self.__class__.__name__, self.options[self.option_num], - WEEKDAYS[self.weekday]) - - -class LastDayOfMonthExpression(AllExpression): - value_re = re.compile(r'last', re.IGNORECASE) - - def __init__(self): - pass - - def get_next_value(self, date, field): - return monthrange(date.year, date.month)[1] - - def __str__(self): - return 'last' - - def __repr__(self): - return "%s()" % self.__class__.__name__ diff --git a/lib/apscheduler/triggers/cron/fields.py b/lib/apscheduler/triggers/cron/fields.py deleted file mode 100644 index 892bc13f..00000000 --- a/lib/apscheduler/triggers/cron/fields.py +++ /dev/null @@ -1,100 +0,0 @@ -"""Fields represent CronTrigger options which map to :class:`~datetime.datetime` fields.""" - -from calendar import monthrange - -from apscheduler.triggers.cron.expressions import ( - AllExpression, RangeExpression, WeekdayPositionExpression, LastDayOfMonthExpression, - WeekdayRangeExpression) - - -__all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField', - 'DayOfMonthField', 'DayOfWeekField') - - -MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0, - 'minute': 0, 'second': 0} -MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53, 'day_of_week': 6, 'hour': 23, - 'minute': 59, 'second': 59} -DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0, - 'minute': 0, 'second': 0} - - -class BaseField(object): - REAL = True - COMPILERS = [AllExpression, RangeExpression] - - def __init__(self, name, exprs, is_default=False): - self.name = name - self.is_default = is_default - self.compile_expressions(exprs) - - def get_min(self, dateval): - return MIN_VALUES[self.name] - - def get_max(self, dateval): - return MAX_VALUES[self.name] - - def get_value(self, dateval): - return getattr(dateval, self.name) - - def get_next_value(self, dateval): - smallest = None - for expr in self.expressions: - value = expr.get_next_value(dateval, self) - if smallest is None or (value is not None and value < smallest): - smallest = value - - return smallest - - def compile_expressions(self, exprs): - self.expressions = [] - - # Split a comma-separated expression list, if any - exprs = str(exprs).strip() - if ',' in exprs: - for expr in exprs.split(','): - self.compile_expression(expr) - else: - self.compile_expression(exprs) - - def compile_expression(self, expr): - for compiler in self.COMPILERS: - match = compiler.value_re.match(expr) - if match: - compiled_expr = compiler(**match.groupdict()) - self.expressions.append(compiled_expr) - return - - raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name)) - - def __eq__(self, other): - return isinstance(self, self.__class__) and self.expressions == other.expressions - - def __str__(self): - expr_strings = (str(e) for e in self.expressions) - return ','.join(expr_strings) - - def __repr__(self): - return "%s('%s', '%s')" % (self.__class__.__name__, self.name, self) - - -class WeekField(BaseField): - REAL = False - - def get_value(self, dateval): - return dateval.isocalendar()[1] - - -class DayOfMonthField(BaseField): - COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression] - - def get_max(self, dateval): - return monthrange(dateval.year, dateval.month)[1] - - -class DayOfWeekField(BaseField): - REAL = False - COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression] - - def get_value(self, dateval): - return dateval.weekday() diff --git a/lib/apscheduler/triggers/date.py b/lib/apscheduler/triggers/date.py deleted file mode 100644 index 07681008..00000000 --- a/lib/apscheduler/triggers/date.py +++ /dev/null @@ -1,51 +0,0 @@ -from datetime import datetime - -from tzlocal import get_localzone - -from apscheduler.triggers.base import BaseTrigger -from apscheduler.util import convert_to_datetime, datetime_repr, astimezone - - -class DateTrigger(BaseTrigger): - """ - Triggers once on the given datetime. If ``run_date`` is left empty, current time is used. - - :param datetime|str run_date: the date/time to run the job at - :param datetime.tzinfo|str timezone: time zone for ``run_date`` if it doesn't have one already - """ - - __slots__ = 'run_date' - - def __init__(self, run_date=None, timezone=None): - timezone = astimezone(timezone) or get_localzone() - if run_date is not None: - self.run_date = convert_to_datetime(run_date, timezone, 'run_date') - else: - self.run_date = datetime.now(timezone) - - def get_next_fire_time(self, previous_fire_time, now): - return self.run_date if previous_fire_time is None else None - - def __getstate__(self): - return { - 'version': 1, - 'run_date': self.run_date - } - - def __setstate__(self, state): - # This is for compatibility with APScheduler 3.0.x - if isinstance(state, tuple): - state = state[1] - - if state.get('version', 1) > 1: - raise ValueError( - 'Got serialized data for version %s of %s, but only version 1 can be handled' % - (state['version'], self.__class__.__name__)) - - self.run_date = state['run_date'] - - def __str__(self): - return 'date[%s]' % datetime_repr(self.run_date) - - def __repr__(self): - return "<%s (run_date='%s')>" % (self.__class__.__name__, datetime_repr(self.run_date)) diff --git a/lib/apscheduler/triggers/interval.py b/lib/apscheduler/triggers/interval.py deleted file mode 100644 index fec912a2..00000000 --- a/lib/apscheduler/triggers/interval.py +++ /dev/null @@ -1,92 +0,0 @@ -from datetime import timedelta, datetime -from math import ceil - -from tzlocal import get_localzone - -from apscheduler.triggers.base import BaseTrigger -from apscheduler.util import convert_to_datetime, timedelta_seconds, datetime_repr, astimezone - - -class IntervalTrigger(BaseTrigger): - """ - Triggers on specified intervals, starting on ``start_date`` if specified, ``datetime.now()`` + - interval otherwise. - - :param int weeks: number of weeks to wait - :param int days: number of days to wait - :param int hours: number of hours to wait - :param int minutes: number of minutes to wait - :param int seconds: number of seconds to wait - :param datetime|str start_date: starting point for the interval calculation - :param datetime|str end_date: latest possible date/time to trigger on - :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations - """ - - __slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length' - - def __init__(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, - end_date=None, timezone=None): - self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, - seconds=seconds) - self.interval_length = timedelta_seconds(self.interval) - if self.interval_length == 0: - self.interval = timedelta(seconds=1) - self.interval_length = 1 - - if timezone: - self.timezone = astimezone(timezone) - elif isinstance(start_date, datetime) and start_date.tzinfo: - self.timezone = start_date.tzinfo - elif isinstance(end_date, datetime) and end_date.tzinfo: - self.timezone = end_date.tzinfo - else: - self.timezone = get_localzone() - - start_date = start_date or (datetime.now(self.timezone) + self.interval) - self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') - self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date') - - def get_next_fire_time(self, previous_fire_time, now): - if previous_fire_time: - next_fire_time = previous_fire_time + self.interval - elif self.start_date > now: - next_fire_time = self.start_date - else: - timediff_seconds = timedelta_seconds(now - self.start_date) - next_interval_num = int(ceil(timediff_seconds / self.interval_length)) - next_fire_time = self.start_date + self.interval * next_interval_num - - if not self.end_date or next_fire_time <= self.end_date: - return self.timezone.normalize(next_fire_time) - - def __getstate__(self): - return { - 'version': 1, - 'timezone': self.timezone, - 'start_date': self.start_date, - 'end_date': self.end_date, - 'interval': self.interval - } - - def __setstate__(self, state): - # This is for compatibility with APScheduler 3.0.x - if isinstance(state, tuple): - state = state[1] - - if state.get('version', 1) > 1: - raise ValueError( - 'Got serialized data for version %s of %s, but only version 1 can be handled' % - (state['version'], self.__class__.__name__)) - - self.timezone = state['timezone'] - self.start_date = state['start_date'] - self.end_date = state['end_date'] - self.interval = state['interval'] - self.interval_length = timedelta_seconds(self.interval) - - def __str__(self): - return 'interval[%s]' % str(self.interval) - - def __repr__(self): - return "<%s (interval=%r, start_date='%s', timezone='%s')>" % ( - self.__class__.__name__, self.interval, datetime_repr(self.start_date), self.timezone) diff --git a/lib/apscheduler/util.py b/lib/apscheduler/util.py deleted file mode 100644 index 63ac8ac8..00000000 --- a/lib/apscheduler/util.py +++ /dev/null @@ -1,385 +0,0 @@ -"""This module contains several handy functions primarily meant for internal use.""" - -from __future__ import division -from datetime import date, datetime, time, timedelta, tzinfo -from calendar import timegm -import re -from functools import partial - -from pytz import timezone, utc -import six - -try: - from inspect import signature -except ImportError: # pragma: nocover - from funcsigs import signature - -try: - from threading import TIMEOUT_MAX -except ImportError: - TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows - -__all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp', - 'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name', - 'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args') - - -class _Undefined(object): - def __nonzero__(self): - return False - - def __bool__(self): - return False - - def __repr__(self): - return '' - - -undefined = _Undefined() #: a unique object that only signifies that no value is defined - - -def asint(text): - """ - Safely converts a string to an integer, returning ``None`` if the string is ``None``. - - :type text: str - :rtype: int - - """ - if text is not None: - return int(text) - - -def asbool(obj): - """ - Interprets an object as a boolean value. - - :rtype: bool - - """ - if isinstance(obj, str): - obj = obj.strip().lower() - if obj in ('true', 'yes', 'on', 'y', 't', '1'): - return True - if obj in ('false', 'no', 'off', 'n', 'f', '0'): - return False - raise ValueError('Unable to interpret value "%s" as boolean' % obj) - return bool(obj) - - -def astimezone(obj): - """ - Interprets an object as a timezone. - - :rtype: tzinfo - - """ - if isinstance(obj, six.string_types): - return timezone(obj) - if isinstance(obj, tzinfo): - if not hasattr(obj, 'localize') or not hasattr(obj, 'normalize'): - raise TypeError('Only timezones from the pytz library are supported') - if obj.zone == 'local': - raise ValueError( - 'Unable to determine the name of the local timezone -- you must explicitly ' - 'specify the name of the local timezone. Please refrain from using timezones like ' - 'EST to prevent problems with daylight saving time. Instead, use a locale based ' - 'timezone name (such as Europe/Helsinki).') - return obj - if obj is not None: - raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__) - - -_DATE_REGEX = re.compile( - r'(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})' - r'(?: (?P\d{1,2}):(?P\d{1,2}):(?P\d{1,2})' - r'(?:\.(?P\d{1,6}))?)?') - - -def convert_to_datetime(input, tz, arg_name): - """ - Converts the given object to a timezone aware datetime object. - - If a timezone aware datetime object is passed, it is returned unmodified. - If a native datetime object is passed, it is given the specified timezone. - If the input is a string, it is parsed as a datetime with the given timezone. - - Date strings are accepted in three different forms: date only (Y-m-d), date with time - (Y-m-d H:M:S) or with date+time with microseconds (Y-m-d H:M:S.micro). - - :param str|datetime input: the datetime or string to convert to a timezone aware datetime - :param datetime.tzinfo tz: timezone to interpret ``input`` in - :param str arg_name: the name of the argument (used in an error message) - :rtype: datetime - - """ - if input is None: - return - elif isinstance(input, datetime): - datetime_ = input - elif isinstance(input, date): - datetime_ = datetime.combine(input, time()) - elif isinstance(input, six.string_types): - m = _DATE_REGEX.match(input) - if not m: - raise ValueError('Invalid date string') - values = [(k, int(v or 0)) for k, v in m.groupdict().items()] - values = dict(values) - datetime_ = datetime(**values) - else: - raise TypeError('Unsupported type for %s: %s' % (arg_name, input.__class__.__name__)) - - if datetime_.tzinfo is not None: - return datetime_ - if tz is None: - raise ValueError( - 'The "tz" argument must be specified if %s has no timezone information' % arg_name) - if isinstance(tz, six.string_types): - tz = timezone(tz) - - try: - return tz.localize(datetime_, is_dst=None) - except AttributeError: - raise TypeError( - 'Only pytz timezones are supported (need the localize() and normalize() methods)') - - -def datetime_to_utc_timestamp(timeval): - """ - Converts a datetime instance to a timestamp. - - :type timeval: datetime - :rtype: float - - """ - if timeval is not None: - return timegm(timeval.utctimetuple()) + timeval.microsecond / 1000000 - - -def utc_timestamp_to_datetime(timestamp): - """ - Converts the given timestamp to a datetime instance. - - :type timestamp: float - :rtype: datetime - - """ - if timestamp is not None: - return datetime.fromtimestamp(timestamp, utc) - - -def timedelta_seconds(delta): - """ - Converts the given timedelta to seconds. - - :type delta: timedelta - :rtype: float - - """ - return delta.days * 24 * 60 * 60 + delta.seconds + \ - delta.microseconds / 1000000.0 - - -def datetime_ceil(dateval): - """ - Rounds the given datetime object upwards. - - :type dateval: datetime - - """ - if dateval.microsecond > 0: - return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond) - return dateval - - -def datetime_repr(dateval): - return dateval.strftime('%Y-%m-%d %H:%M:%S %Z') if dateval else 'None' - - -def get_callable_name(func): - """ - Returns the best available display name for the given function/callable. - - :rtype: str - - """ - # the easy case (on Python 3.3+) - if hasattr(func, '__qualname__'): - return func.__qualname__ - - # class methods, bound and unbound methods - f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None) - if f_self and hasattr(func, '__name__'): - f_class = f_self if isinstance(f_self, type) else f_self.__class__ - else: - f_class = getattr(func, 'im_class', None) - - if f_class and hasattr(func, '__name__'): - return '%s.%s' % (f_class.__name__, func.__name__) - - # class or class instance - if hasattr(func, '__call__'): - # class - if hasattr(func, '__name__'): - return func.__name__ - - # instance of a class with a __call__ method - return func.__class__.__name__ - - raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func) - - -def obj_to_ref(obj): - """ - Returns the path to the given callable. - - :rtype: str - :raises TypeError: if the given object is not callable - :raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested - function - - """ - if isinstance(obj, partial): - raise ValueError('Cannot create a reference to a partial()') - - name = get_callable_name(obj) - if '' in name: - raise ValueError('Cannot create a reference to a lambda') - if '' in name: - raise ValueError('Cannot create a reference to a nested function') - - return '%s:%s' % (obj.__module__, name) - - -def ref_to_obj(ref): - """ - Returns the object pointed to by ``ref``. - - :type ref: str - - """ - if not isinstance(ref, six.string_types): - raise TypeError('References must be strings') - if ':' not in ref: - raise ValueError('Invalid reference') - - modulename, rest = ref.split(':', 1) - try: - obj = __import__(modulename, fromlist=[rest]) - except ImportError: - raise LookupError('Error resolving reference %s: could not import module' % ref) - - try: - for name in rest.split('.'): - obj = getattr(obj, name) - return obj - except Exception: - raise LookupError('Error resolving reference %s: error looking up object' % ref) - - -def maybe_ref(ref): - """ - Returns the object that the given reference points to, if it is indeed a reference. - If it is not a reference, the object is returned as-is. - - """ - if not isinstance(ref, str): - return ref - return ref_to_obj(ref) - - -if six.PY2: - def repr_escape(string): - if isinstance(string, six.text_type): - return string.encode('ascii', 'backslashreplace') - return string -else: - def repr_escape(string): - return string - - -def check_callable_args(func, args, kwargs): - """ - Ensures that the given callable can be called with the given arguments. - - :type args: tuple - :type kwargs: dict - - """ - pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs - positional_only_kwargs = [] # positional-only parameters that have a match in kwargs - unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs - unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs - unmatched_args = list(args) # args that didn't match any of the parameters in the signature - # kwargs that didn't match any of the parameters in the signature - unmatched_kwargs = list(kwargs) - # indicates if the signature defines *args and **kwargs respectively - has_varargs = has_var_kwargs = False - - try: - sig = signature(func) - except ValueError: - # signature() doesn't work against every kind of callable - return - - for param in six.itervalues(sig.parameters): - if param.kind == param.POSITIONAL_OR_KEYWORD: - if param.name in unmatched_kwargs and unmatched_args: - pos_kwargs_conflicts.append(param.name) - elif unmatched_args: - del unmatched_args[0] - elif param.name in unmatched_kwargs: - unmatched_kwargs.remove(param.name) - elif param.default is param.empty: - unsatisfied_args.append(param.name) - elif param.kind == param.POSITIONAL_ONLY: - if unmatched_args: - del unmatched_args[0] - elif param.name in unmatched_kwargs: - unmatched_kwargs.remove(param.name) - positional_only_kwargs.append(param.name) - elif param.default is param.empty: - unsatisfied_args.append(param.name) - elif param.kind == param.KEYWORD_ONLY: - if param.name in unmatched_kwargs: - unmatched_kwargs.remove(param.name) - elif param.default is param.empty: - unsatisfied_kwargs.append(param.name) - elif param.kind == param.VAR_POSITIONAL: - has_varargs = True - elif param.kind == param.VAR_KEYWORD: - has_var_kwargs = True - - # Make sure there are no conflicts between args and kwargs - if pos_kwargs_conflicts: - raise ValueError('The following arguments are supplied in both args and kwargs: %s' % - ', '.join(pos_kwargs_conflicts)) - - # Check if keyword arguments are being fed to positional-only parameters - if positional_only_kwargs: - raise ValueError('The following arguments cannot be given as keyword arguments: %s' % - ', '.join(positional_only_kwargs)) - - # Check that the number of positional arguments minus the number of matched kwargs matches the - # argspec - if unsatisfied_args: - raise ValueError('The following arguments have not been supplied: %s' % - ', '.join(unsatisfied_args)) - - # Check that all keyword-only arguments have been supplied - if unsatisfied_kwargs: - raise ValueError( - 'The following keyword-only arguments have not been supplied in kwargs: %s' % - ', '.join(unsatisfied_kwargs)) - - # Check that the callable can accept the given number of positional arguments - if not has_varargs and unmatched_args: - raise ValueError( - 'The list of positional arguments is longer than the target callable can handle ' - '(allowed: %d, given in args: %d)' % (len(args) - len(unmatched_args), len(args))) - - # Check that the callable can accept the given keyword arguments - if not has_var_kwargs and unmatched_kwargs: - raise ValueError( - 'The target callable does not accept the following keyword arguments: %s' % - ', '.join(unmatched_kwargs)) diff --git a/lib/argparse.py b/lib/argparse.py deleted file mode 100755 index f0cfe27e..00000000 --- a/lib/argparse.py +++ /dev/null @@ -1,2386 +0,0 @@ -# Author: Steven J. Bethard . - -"""Command-line parsing library - -This module is an optparse-inspired command-line parsing library that: - - - handles both optional and positional arguments - - produces highly informative usage messages - - supports parsers that dispatch to sub-parsers - -The following is a simple usage example that sums integers from the -command-line and writes the result to a file:: - - parser = argparse.ArgumentParser( - description='sum the integers at the command line') - parser.add_argument( - 'integers', metavar='int', nargs='+', type=int, - help='an integer to be summed') - parser.add_argument( - '--log', default=sys.stdout, type=argparse.FileType('w'), - help='the file where the sum should be written') - args = parser.parse_args() - args.log.write('%s' % sum(args.integers)) - args.log.close() - -The module contains the following public classes: - - - ArgumentParser -- The main entry point for command-line parsing. As the - example above shows, the add_argument() method is used to populate - the parser with actions for optional and positional arguments. Then - the parse_args() method is invoked to convert the args at the - command-line into an object with attributes. - - - ArgumentError -- The exception raised by ArgumentParser objects when - there are errors with the parser's actions. Errors raised while - parsing the command-line are caught by ArgumentParser and emitted - as command-line messages. - - - FileType -- A factory for defining types of files to be created. As the - example above shows, instances of FileType are typically passed as - the type= argument of add_argument() calls. - - - Action -- The base class for parser actions. Typically actions are - selected by passing strings like 'store_true' or 'append_const' to - the action= argument of add_argument(). However, for greater - customization of ArgumentParser actions, subclasses of Action may - be defined and passed as the action= argument. - - - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, - ArgumentDefaultsHelpFormatter -- Formatter classes which - may be passed as the formatter_class= argument to the - ArgumentParser constructor. HelpFormatter is the default, - RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser - not to change the formatting for help text, and - ArgumentDefaultsHelpFormatter adds information about argument defaults - to the help. - -All other classes in this module are considered implementation details. -(Also note that HelpFormatter and RawDescriptionHelpFormatter are only -considered public as object names -- the API of the formatter objects is -still considered an implementation detail.) -""" - -__version__ = '1.1' -__all__ = [ - 'ArgumentParser', - 'ArgumentError', - 'ArgumentTypeError', - 'FileType', - 'HelpFormatter', - 'ArgumentDefaultsHelpFormatter', - 'RawDescriptionHelpFormatter', - 'RawTextHelpFormatter', - 'MetavarTypeHelpFormatter', - 'Namespace', - 'Action', - 'ONE_OR_MORE', - 'OPTIONAL', - 'PARSER', - 'REMAINDER', - 'SUPPRESS', - 'ZERO_OR_MORE', -] - - -import collections as _collections -import copy as _copy -import os as _os -import re as _re -import sys as _sys -import textwrap as _textwrap - -from gettext import gettext as _, ngettext - - -def _callable(obj): - return hasattr(obj, '__call__') or hasattr(obj, '__bases__') - - -SUPPRESS = '==SUPPRESS==' - -OPTIONAL = '?' -ZERO_OR_MORE = '*' -ONE_OR_MORE = '+' -PARSER = 'A...' -REMAINDER = '...' -_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' - -# ============================= -# Utility functions and classes -# ============================= - -class _AttributeHolder(object): - """Abstract base class that provides __repr__. - - The __repr__ method returns a string in the format:: - ClassName(attr=name, attr=name, ...) - The attributes are determined either by a class-level attribute, - '_kwarg_names', or by inspecting the instance __dict__. - """ - - def __repr__(self): - type_name = type(self).__name__ - arg_strings = [] - for arg in self._get_args(): - arg_strings.append(repr(arg)) - for name, value in self._get_kwargs(): - arg_strings.append('%s=%r' % (name, value)) - return '%s(%s)' % (type_name, ', '.join(arg_strings)) - - def _get_kwargs(self): - return sorted(self.__dict__.items()) - - def _get_args(self): - return [] - - -def _ensure_value(namespace, name, value): - if getattr(namespace, name, None) is None: - setattr(namespace, name, value) - return getattr(namespace, name) - - -# =============== -# Formatting Help -# =============== - -class HelpFormatter(object): - """Formatter for generating usage messages and argument help strings. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def __init__(self, - prog, - indent_increment=2, - max_help_position=24, - width=None): - - # default setting for width - if width is None: - try: - width = int(_os.environ['COLUMNS']) - except (KeyError, ValueError): - width = 80 - width -= 2 - - self._prog = prog - self._indent_increment = indent_increment - self._max_help_position = max_help_position - self._width = width - - self._current_indent = 0 - self._level = 0 - self._action_max_length = 0 - - self._root_section = self._Section(self, None) - self._current_section = self._root_section - - self._whitespace_matcher = _re.compile(r'\s+') - self._long_break_matcher = _re.compile(r'\n\n\n+') - - # =============================== - # Section and indentation methods - # =============================== - def _indent(self): - self._current_indent += self._indent_increment - self._level += 1 - - def _dedent(self): - self._current_indent -= self._indent_increment - assert self._current_indent >= 0, 'Indent decreased below 0.' - self._level -= 1 - - class _Section(object): - - def __init__(self, formatter, parent, heading=None): - self.formatter = formatter - self.parent = parent - self.heading = heading - self.items = [] - - def format_help(self): - # format the indented section - if self.parent is not None: - self.formatter._indent() - join = self.formatter._join_parts - for func, args in self.items: - func(*args) - item_help = join([func(*args) for func, args in self.items]) - if self.parent is not None: - self.formatter._dedent() - - # return nothing if the section was empty - if not item_help: - return '' - - # add the heading if the section was non-empty - if self.heading is not SUPPRESS and self.heading is not None: - current_indent = self.formatter._current_indent - heading = '%*s%s:\n' % (current_indent, '', self.heading) - else: - heading = '' - - # join the section-initial newline, the heading and the help - return join(['\n', heading, item_help, '\n']) - - def _add_item(self, func, args): - self._current_section.items.append((func, args)) - - # ======================== - # Message building methods - # ======================== - def start_section(self, heading): - self._indent() - section = self._Section(self, self._current_section, heading) - self._add_item(section.format_help, []) - self._current_section = section - - def end_section(self): - self._current_section = self._current_section.parent - self._dedent() - - def add_text(self, text): - if text is not SUPPRESS and text is not None: - self._add_item(self._format_text, [text]) - - def add_usage(self, usage, actions, groups, prefix=None): - if usage is not SUPPRESS: - args = usage, actions, groups, prefix - self._add_item(self._format_usage, args) - - def add_argument(self, action): - if action.help is not SUPPRESS: - - # find all invocations - get_invocation = self._format_action_invocation - invocations = [get_invocation(action)] - for subaction in self._iter_indented_subactions(action): - invocations.append(get_invocation(subaction)) - - # update the maximum item length - invocation_length = max([len(s) for s in invocations]) - action_length = invocation_length + self._current_indent - self._action_max_length = max(self._action_max_length, - action_length) - - # add the item to the list - self._add_item(self._format_action, [action]) - - def add_arguments(self, actions): - for action in actions: - self.add_argument(action) - - # ======================= - # Help-formatting methods - # ======================= - def format_help(self): - help = self._root_section.format_help() - if help: - help = self._long_break_matcher.sub('\n\n', help) - help = help.strip('\n') + '\n' - return help - - def _join_parts(self, part_strings): - return ''.join([part - for part in part_strings - if part and part is not SUPPRESS]) - - def _format_usage(self, usage, actions, groups, prefix): - if prefix is None: - prefix = _('usage: ') - - # if usage is specified, use that - if usage is not None: - usage = usage % dict(prog=self._prog) - - # if no optionals or positionals are available, usage is just prog - elif usage is None and not actions: - usage = '%(prog)s' % dict(prog=self._prog) - - # if optionals and positionals are available, calculate usage - elif usage is None: - prog = '%(prog)s' % dict(prog=self._prog) - - # split optionals from positionals - optionals = [] - positionals = [] - for action in actions: - if action.option_strings: - optionals.append(action) - else: - positionals.append(action) - - # build full usage string - format = self._format_actions_usage - action_usage = format(optionals + positionals, groups) - usage = ' '.join([s for s in [prog, action_usage] if s]) - - # wrap the usage parts if it's too long - text_width = self._width - self._current_indent - if len(prefix) + len(usage) > text_width: - - # break usage into wrappable parts - part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' - opt_usage = format(optionals, groups) - pos_usage = format(positionals, groups) - opt_parts = _re.findall(part_regexp, opt_usage) - pos_parts = _re.findall(part_regexp, pos_usage) - assert ' '.join(opt_parts) == opt_usage - assert ' '.join(pos_parts) == pos_usage - - # helper for wrapping lines - def get_lines(parts, indent, prefix=None): - lines = [] - line = [] - if prefix is not None: - line_len = len(prefix) - 1 - else: - line_len = len(indent) - 1 - for part in parts: - if line_len + 1 + len(part) > text_width: - lines.append(indent + ' '.join(line)) - line = [] - line_len = len(indent) - 1 - line.append(part) - line_len += len(part) + 1 - if line: - lines.append(indent + ' '.join(line)) - if prefix is not None: - lines[0] = lines[0][len(indent):] - return lines - - # if prog is short, follow it with optionals or positionals - if len(prefix) + len(prog) <= 0.75 * text_width: - indent = ' ' * (len(prefix) + len(prog) + 1) - if opt_parts: - lines = get_lines([prog] + opt_parts, indent, prefix) - lines.extend(get_lines(pos_parts, indent)) - elif pos_parts: - lines = get_lines([prog] + pos_parts, indent, prefix) - else: - lines = [prog] - - # if prog is long, put it on its own line - else: - indent = ' ' * len(prefix) - parts = opt_parts + pos_parts - lines = get_lines(parts, indent) - if len(lines) > 1: - lines = [] - lines.extend(get_lines(opt_parts, indent)) - lines.extend(get_lines(pos_parts, indent)) - lines = [prog] + lines - - # join lines into usage - usage = '\n'.join(lines) - - # prefix with 'usage:' - return '%s%s\n\n' % (prefix, usage) - - def _format_actions_usage(self, actions, groups): - # find group indices and identify actions in groups - group_actions = set() - inserts = {} - for group in groups: - try: - start = actions.index(group._group_actions[0]) - except ValueError: - continue - else: - end = start + len(group._group_actions) - if actions[start:end] == group._group_actions: - for action in group._group_actions: - group_actions.add(action) - if not group.required: - if start in inserts: - inserts[start] += ' [' - else: - inserts[start] = '[' - inserts[end] = ']' - else: - if start in inserts: - inserts[start] += ' (' - else: - inserts[start] = '(' - inserts[end] = ')' - for i in range(start + 1, end): - inserts[i] = '|' - - # collect all actions format strings - parts = [] - for i, action in enumerate(actions): - - # suppressed arguments are marked with None - # remove | separators for suppressed arguments - if action.help is SUPPRESS: - parts.append(None) - if inserts.get(i) == '|': - inserts.pop(i) - elif inserts.get(i + 1) == '|': - inserts.pop(i + 1) - - # produce all arg strings - elif not action.option_strings: - default = self._get_default_metavar_for_positional(action) - part = self._format_args(action, default) - - # if it's in a group, strip the outer [] - if action in group_actions: - if part[0] == '[' and part[-1] == ']': - part = part[1:-1] - - # add the action string to the list - parts.append(part) - - # produce the first way to invoke the option in brackets - else: - option_string = action.option_strings[0] - - # if the Optional doesn't take a value, format is: - # -s or --long - if action.nargs == 0: - part = '%s' % option_string - - # if the Optional takes a value, format is: - # -s ARGS or --long ARGS - else: - default = self._get_default_metavar_for_optional(action) - args_string = self._format_args(action, default) - part = '%s %s' % (option_string, args_string) - - # make it look optional if it's not required or in a group - if not action.required and action not in group_actions: - part = '[%s]' % part - - # add the action string to the list - parts.append(part) - - # insert things at the necessary indices - for i in sorted(inserts, reverse=True): - parts[i:i] = [inserts[i]] - - # join all the action items with spaces - text = ' '.join([item for item in parts if item is not None]) - - # clean up separators for mutually exclusive groups - open = r'[\[(]' - close = r'[\])]' - text = _re.sub(r'(%s) ' % open, r'\1', text) - text = _re.sub(r' (%s)' % close, r'\1', text) - text = _re.sub(r'%s *%s' % (open, close), r'', text) - text = _re.sub(r'\(([^|]*)\)', r'\1', text) - text = text.strip() - - # return the text - return text - - def _format_text(self, text): - if '%(prog)' in text: - text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent - indent = ' ' * self._current_indent - return self._fill_text(text, text_width, indent) + '\n\n' - - def _format_action(self, action): - # determine the required width and the entry label - help_position = min(self._action_max_length + 2, - self._max_help_position) - help_width = self._width - help_position - action_width = help_position - self._current_indent - 2 - action_header = self._format_action_invocation(action) - - # ho nelp; start on same line and add a final newline - if not action.help: - tup = self._current_indent, '', action_header - action_header = '%*s%s\n' % tup - - # short action name; start on the same line and pad two spaces - elif len(action_header) <= action_width: - tup = self._current_indent, '', action_width, action_header - action_header = '%*s%-*s ' % tup - indent_first = 0 - - # long action name; start on the next line - else: - tup = self._current_indent, '', action_header - action_header = '%*s%s\n' % tup - indent_first = help_position - - # collect the pieces of the action help - parts = [action_header] - - # if there was help for the action, add lines of help text - if action.help: - help_text = self._expand_help(action) - help_lines = self._split_lines(help_text, help_width) - parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) - for line in help_lines[1:]: - parts.append('%*s%s\n' % (help_position, '', line)) - - # or add a newline if the description doesn't end with one - elif not action_header.endswith('\n'): - parts.append('\n') - - # if there are any sub-actions, add their help as well - for subaction in self._iter_indented_subactions(action): - parts.append(self._format_action(subaction)) - - # return a single string - return self._join_parts(parts) - - def _format_action_invocation(self, action): - if not action.option_strings: - default = self._get_default_metavar_for_positional(action) - metavar, = self._metavar_formatter(action, default)(1) - return metavar - - else: - parts = [] - - # if the Optional doesn't take a value, format is: - # -s, --long - if action.nargs == 0: - parts.extend(action.option_strings) - - # if the Optional takes a value, format is: - # -s ARGS, --long ARGS - else: - default = self._get_default_metavar_for_optional(action) - args_string = self._format_args(action, default) - for option_string in action.option_strings: - parts.append('%s %s' % (option_string, args_string)) - - return ', '.join(parts) - - def _metavar_formatter(self, action, default_metavar): - if action.metavar is not None: - result = action.metavar - elif action.choices is not None: - choice_strs = [str(choice) for choice in action.choices] - result = '{%s}' % ','.join(choice_strs) - else: - result = default_metavar - - def format(tuple_size): - if isinstance(result, tuple): - return result - else: - return (result, ) * tuple_size - return format - - def _format_args(self, action, default_metavar): - get_metavar = self._metavar_formatter(action, default_metavar) - if action.nargs is None: - result = '%s' % get_metavar(1) - elif action.nargs == OPTIONAL: - result = '[%s]' % get_metavar(1) - elif action.nargs == ZERO_OR_MORE: - result = '[%s [%s ...]]' % get_metavar(2) - elif action.nargs == ONE_OR_MORE: - result = '%s [%s ...]' % get_metavar(2) - elif action.nargs == REMAINDER: - result = '...' - elif action.nargs == PARSER: - result = '%s ...' % get_metavar(1) - else: - formats = ['%s' for _ in range(action.nargs)] - result = ' '.join(formats) % get_metavar(action.nargs) - return result - - def _expand_help(self, action): - params = dict(vars(action), prog=self._prog) - for name in list(params): - if params[name] is SUPPRESS: - del params[name] - for name in list(params): - if hasattr(params[name], '__name__'): - params[name] = params[name].__name__ - if params.get('choices') is not None: - choices_str = ', '.join([str(c) for c in params['choices']]) - params['choices'] = choices_str - return self._get_help_string(action) % params - - def _iter_indented_subactions(self, action): - try: - get_subactions = action._get_subactions - except AttributeError: - pass - else: - self._indent() - for subaction in get_subactions(): - yield subaction - self._dedent() - - def _split_lines(self, text, width): - text = self._whitespace_matcher.sub(' ', text).strip() - return _textwrap.wrap(text, width) - - def _fill_text(self, text, width, indent): - text = self._whitespace_matcher.sub(' ', text).strip() - return _textwrap.fill(text, width, initial_indent=indent, - subsequent_indent=indent) - - def _get_help_string(self, action): - return action.help - - def _get_default_metavar_for_optional(self, action): - return action.dest.upper() - - def _get_default_metavar_for_positional(self, action): - return action.dest - - -class RawDescriptionHelpFormatter(HelpFormatter): - """Help message formatter which retains any formatting in descriptions. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _fill_text(self, text, width, indent): - return ''.join([indent + line for line in text.splitlines(True)]) - - -class RawTextHelpFormatter(RawDescriptionHelpFormatter): - """Help message formatter which retains formatting of all help text. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _split_lines(self, text, width): - return text.splitlines() - - -class ArgumentDefaultsHelpFormatter(HelpFormatter): - """Help message formatter which adds default values to argument help. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _get_help_string(self, action): - help = action.help - if '%(default)' not in action.help: - if action.default is not SUPPRESS: - defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] - if action.option_strings or action.nargs in defaulting_nargs: - help += ' (default: %(default)s)' - return help - - -class MetavarTypeHelpFormatter(HelpFormatter): - """Help message formatter which uses the argument 'type' as the default - metavar value (instead of the argument 'dest') - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _get_default_metavar_for_optional(self, action): - return action.type.__name__ - - def _get_default_metavar_for_positional(self, action): - return action.type.__name__ - - - -# ===================== -# Options and Arguments -# ===================== - -def _get_action_name(argument): - if argument is None: - return None - elif argument.option_strings: - return '/'.join(argument.option_strings) - elif argument.metavar not in (None, SUPPRESS): - return argument.metavar - elif argument.dest not in (None, SUPPRESS): - return argument.dest - else: - return None - - -class ArgumentError(Exception): - """An error from creating or using an argument (optional or positional). - - The string value of this exception is the message, augmented with - information about the argument that caused it. - """ - - def __init__(self, argument, message): - self.argument_name = _get_action_name(argument) - self.message = message - - def __str__(self): - if self.argument_name is None: - format = '%(message)s' - else: - format = 'argument %(argument_name)s: %(message)s' - return format % dict(message=self.message, - argument_name=self.argument_name) - - -class ArgumentTypeError(Exception): - """An error from trying to convert a command line string to a type.""" - pass - - -# ============== -# Action classes -# ============== - -class Action(_AttributeHolder): - """Information about how to convert command line strings to Python objects. - - Action objects are used by an ArgumentParser to represent the information - needed to parse a single argument from one or more strings from the - command line. The keyword arguments to the Action constructor are also - all attributes of Action instances. - - Keyword Arguments: - - - option_strings -- A list of command-line option strings which - should be associated with this action. - - - dest -- The name of the attribute to hold the created object(s) - - - nargs -- The number of command-line arguments that should be - consumed. By default, one argument will be consumed and a single - value will be produced. Other values include: - - N (an integer) consumes N arguments (and produces a list) - - '?' consumes zero or one arguments - - '*' consumes zero or more arguments (and produces a list) - - '+' consumes one or more arguments (and produces a list) - Note that the difference between the default and nargs=1 is that - with the default, a single value will be produced, while with - nargs=1, a list containing a single value will be produced. - - - const -- The value to be produced if the option is specified and the - option uses an action that takes no values. - - - default -- The value to be produced if the option is not specified. - - - type -- The type which the command-line arguments should be converted - to, should be one of 'string', 'int', 'float', 'complex' or a - callable object that accepts a single string argument. If None, - 'string' is assumed. - - - choices -- A container of values that should be allowed. If not None, - after a command-line argument has been converted to the appropriate - type, an exception will be raised if it is not a member of this - collection. - - - required -- True if the action must always be specified at the - command line. This is only meaningful for optional command-line - arguments. - - - help -- The help string describing the argument. - - - metavar -- The name to be used for the option's argument with the - help string. If None, the 'dest' value will be used as the name. - """ - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): - self.option_strings = option_strings - self.dest = dest - self.nargs = nargs - self.const = const - self.default = default - self.type = type - self.choices = choices - self.required = required - self.help = help - self.metavar = metavar - - def _get_kwargs(self): - names = [ - 'option_strings', - 'dest', - 'nargs', - 'const', - 'default', - 'type', - 'choices', - 'help', - 'metavar', - ] - return [(name, getattr(self, name)) for name in names] - - def __call__(self, parser, namespace, values, option_string=None): - raise NotImplementedError(_('.__call__() not defined')) - - -class _StoreAction(Action): - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): - if nargs == 0: - raise ValueError('nargs for store actions must be > 0; if you ' - 'have nothing to store, actions such as store ' - 'true or store const may be more appropriate') - if const is not None and nargs != OPTIONAL: - raise ValueError('nargs must be %r to supply const' % OPTIONAL) - super(_StoreAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=nargs, - const=const, - default=default, - type=type, - choices=choices, - required=required, - help=help, - metavar=metavar) - - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, values) - - -class _StoreConstAction(Action): - - def __init__(self, - option_strings, - dest, - const, - default=None, - required=False, - help=None, - metavar=None): - super(_StoreConstAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - const=const, - default=default, - required=required, - help=help) - - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, self.const) - - -class _StoreTrueAction(_StoreConstAction): - - def __init__(self, - option_strings, - dest, - default=False, - required=False, - help=None): - super(_StoreTrueAction, self).__init__( - option_strings=option_strings, - dest=dest, - const=True, - default=default, - required=required, - help=help) - - -class _StoreFalseAction(_StoreConstAction): - - def __init__(self, - option_strings, - dest, - default=True, - required=False, - help=None): - super(_StoreFalseAction, self).__init__( - option_strings=option_strings, - dest=dest, - const=False, - default=default, - required=required, - help=help) - - -class _AppendAction(Action): - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): - if nargs == 0: - raise ValueError('nargs for append actions must be > 0; if arg ' - 'strings are not supplying the value to append, ' - 'the append const action may be more appropriate') - if const is not None and nargs != OPTIONAL: - raise ValueError('nargs must be %r to supply const' % OPTIONAL) - super(_AppendAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=nargs, - const=const, - default=default, - type=type, - choices=choices, - required=required, - help=help, - metavar=metavar) - - def __call__(self, parser, namespace, values, option_string=None): - items = _copy.copy(_ensure_value(namespace, self.dest, [])) - items.append(values) - setattr(namespace, self.dest, items) - - -class _AppendConstAction(Action): - - def __init__(self, - option_strings, - dest, - const, - default=None, - required=False, - help=None, - metavar=None): - super(_AppendConstAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - const=const, - default=default, - required=required, - help=help, - metavar=metavar) - - def __call__(self, parser, namespace, values, option_string=None): - items = _copy.copy(_ensure_value(namespace, self.dest, [])) - items.append(self.const) - setattr(namespace, self.dest, items) - - -class _CountAction(Action): - - def __init__(self, - option_strings, - dest, - default=None, - required=False, - help=None): - super(_CountAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - default=default, - required=required, - help=help) - - def __call__(self, parser, namespace, values, option_string=None): - new_count = _ensure_value(namespace, self.dest, 0) + 1 - setattr(namespace, self.dest, new_count) - - -class _HelpAction(Action): - - def __init__(self, - option_strings, - dest=SUPPRESS, - default=SUPPRESS, - help=None): - super(_HelpAction, self).__init__( - option_strings=option_strings, - dest=dest, - default=default, - nargs=0, - help=help) - - def __call__(self, parser, namespace, values, option_string=None): - parser.print_help() - parser.exit() - - -class _VersionAction(Action): - - def __init__(self, - option_strings, - version=None, - dest=SUPPRESS, - default=SUPPRESS, - help="show program's version number and exit"): - super(_VersionAction, self).__init__( - option_strings=option_strings, - dest=dest, - default=default, - nargs=0, - help=help) - self.version = version - - def __call__(self, parser, namespace, values, option_string=None): - version = self.version - if version is None: - version = parser.version - formatter = parser._get_formatter() - formatter.add_text(version) - parser.exit(message=formatter.format_help()) - - -class _SubParsersAction(Action): - - class _ChoicesPseudoAction(Action): - - def __init__(self, name, aliases, help): - metavar = dest = name - if aliases: - metavar += ' (%s)' % ', '.join(aliases) - sup = super(_SubParsersAction._ChoicesPseudoAction, self) - sup.__init__(option_strings=[], dest=dest, help=help, - metavar=metavar) - - def __init__(self, - option_strings, - prog, - parser_class, - dest=SUPPRESS, - help=None, - metavar=None): - - self._prog_prefix = prog - self._parser_class = parser_class - self._name_parser_map = _collections.OrderedDict() - self._choices_actions = [] - - super(_SubParsersAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=PARSER, - choices=self._name_parser_map, - help=help, - metavar=metavar) - - def add_parser(self, name, **kwargs): - # set prog from the existing prefix - if kwargs.get('prog') is None: - kwargs['prog'] = '%s %s' % (self._prog_prefix, name) - - aliases = kwargs.pop('aliases', ()) - - # create a pseudo-action to hold the choice help - if 'help' in kwargs: - help = kwargs.pop('help') - choice_action = self._ChoicesPseudoAction(name, aliases, help) - self._choices_actions.append(choice_action) - - # create the parser and add it to the map - parser = self._parser_class(**kwargs) - self._name_parser_map[name] = parser - - # make parser available under aliases also - for alias in aliases: - self._name_parser_map[alias] = parser - - return parser - - def _get_subactions(self): - return self._choices_actions - - def __call__(self, parser, namespace, values, option_string=None): - parser_name = values[0] - arg_strings = values[1:] - - # set the parser name if requested - if self.dest is not SUPPRESS: - setattr(namespace, self.dest, parser_name) - - # select the parser - try: - parser = self._name_parser_map[parser_name] - except KeyError: - args = {'parser_name': parser_name, - 'choices': ', '.join(self._name_parser_map)} - msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args - raise ArgumentError(self, msg) - - # parse all the remaining options into the namespace - # store any unrecognized options on the object, so that the top - # level parser can decide what to do with them - namespace, arg_strings = parser.parse_known_args(arg_strings, namespace) - if arg_strings: - vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) - getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) - - -# ============== -# Type classes -# ============== - -class FileType(object): - """Factory for creating file object types - - Instances of FileType are typically passed as type= arguments to the - ArgumentParser add_argument() method. - - Keyword Arguments: - - mode -- A string indicating how the file is to be opened. Accepts the - same values as the builtin open() function. - - bufsize -- The file's desired buffer size. Accepts the same values as - the builtin open() function. - """ - - def __init__(self, mode='r', bufsize=-1): - self._mode = mode - self._bufsize = bufsize - - def __call__(self, string): - # the special argument "-" means sys.std{in,out} - if string == '-': - if 'r' in self._mode: - return _sys.stdin - elif 'w' in self._mode: - return _sys.stdout - else: - msg = _('argument "-" with mode %r') % self._mode - raise ValueError(msg) - - # all other arguments are used as file names - try: - return open(string, self._mode, self._bufsize) - except IOError as e: - message = _("can't open '%s': %s") - raise ArgumentTypeError(message % (string, e)) - - def __repr__(self): - args = self._mode, self._bufsize - args_str = ', '.join(repr(arg) for arg in args if arg != -1) - return '%s(%s)' % (type(self).__name__, args_str) - -# =========================== -# Optional and Positional Parsing -# =========================== - -class Namespace(_AttributeHolder): - """Simple object for storing attributes. - - Implements equality by attribute names and values, and provides a simple - string representation. - """ - - def __init__(self, **kwargs): - for name in kwargs: - setattr(self, name, kwargs[name]) - - def __eq__(self, other): - return vars(self) == vars(other) - - def __ne__(self, other): - return not (self == other) - - def __contains__(self, key): - return key in self.__dict__ - - -class _ActionsContainer(object): - - def __init__(self, - description, - prefix_chars, - argument_default, - conflict_handler): - super(_ActionsContainer, self).__init__() - - self.description = description - self.argument_default = argument_default - self.prefix_chars = prefix_chars - self.conflict_handler = conflict_handler - - # set up registries - self._registries = {} - - # register actions - self.register('action', None, _StoreAction) - self.register('action', 'store', _StoreAction) - self.register('action', 'store_const', _StoreConstAction) - self.register('action', 'store_true', _StoreTrueAction) - self.register('action', 'store_false', _StoreFalseAction) - self.register('action', 'append', _AppendAction) - self.register('action', 'append_const', _AppendConstAction) - self.register('action', 'count', _CountAction) - self.register('action', 'help', _HelpAction) - self.register('action', 'version', _VersionAction) - self.register('action', 'parsers', _SubParsersAction) - - # raise an exception if the conflict handler is invalid - self._get_handler() - - # action storage - self._actions = [] - self._option_string_actions = {} - - # groups - self._action_groups = [] - self._mutually_exclusive_groups = [] - - # defaults storage - self._defaults = {} - - # determines whether an "option" looks like a negative number - self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') - - # whether or not there are any optionals that look like negative - # numbers -- uses a list so it can be shared and edited - self._has_negative_number_optionals = [] - - # ==================== - # Registration methods - # ==================== - def register(self, registry_name, value, object): - registry = self._registries.setdefault(registry_name, {}) - registry[value] = object - - def _registry_get(self, registry_name, value, default=None): - return self._registries[registry_name].get(value, default) - - # ================================== - # Namespace default accessor methods - # ================================== - def set_defaults(self, **kwargs): - self._defaults.update(kwargs) - - # if these defaults match any existing arguments, replace - # the previous default on the object with the new one - for action in self._actions: - if action.dest in kwargs: - action.default = kwargs[action.dest] - - def get_default(self, dest): - for action in self._actions: - if action.dest == dest and action.default is not None: - return action.default - return self._defaults.get(dest, None) - - - # ======================= - # Adding argument actions - # ======================= - def add_argument(self, *args, **kwargs): - """ - add_argument(dest, ..., name=value, ...) - add_argument(option_string, option_string, ..., name=value, ...) - """ - - # if no positional args are supplied or only one is supplied and - # it doesn't look like an option string, parse a positional - # argument - chars = self.prefix_chars - if not args or len(args) == 1 and args[0][0] not in chars: - if args and 'dest' in kwargs: - raise ValueError('dest supplied twice for positional argument') - kwargs = self._get_positional_kwargs(*args, **kwargs) - - # otherwise, we're adding an optional argument - else: - kwargs = self._get_optional_kwargs(*args, **kwargs) - - # if no default was supplied, use the parser-level default - if 'default' not in kwargs: - dest = kwargs['dest'] - if dest in self._defaults: - kwargs['default'] = self._defaults[dest] - elif self.argument_default is not None: - kwargs['default'] = self.argument_default - - # create the action object, and add it to the parser - action_class = self._pop_action_class(kwargs) - if not _callable(action_class): - raise ValueError('unknown action "%s"' % (action_class,)) - action = action_class(**kwargs) - - # raise an error if the action type is not callable - type_func = self._registry_get('type', action.type, action.type) - if not _callable(type_func): - raise ValueError('%r is not callable' % (type_func,)) - - # raise an error if the metavar does not match the type - if hasattr(self, "_get_formatter"): - try: - self._get_formatter()._format_args(action, None) - except TypeError: - raise ValueError("length of metavar tuple does not match nargs") - - return self._add_action(action) - - def add_argument_group(self, *args, **kwargs): - group = _ArgumentGroup(self, *args, **kwargs) - self._action_groups.append(group) - return group - - def add_mutually_exclusive_group(self, **kwargs): - group = _MutuallyExclusiveGroup(self, **kwargs) - self._mutually_exclusive_groups.append(group) - return group - - def _add_action(self, action): - # resolve any conflicts - self._check_conflict(action) - - # add to actions list - self._actions.append(action) - action.container = self - - # index the action by any option strings it has - for option_string in action.option_strings: - self._option_string_actions[option_string] = action - - # set the flag if any option strings look like negative numbers - for option_string in action.option_strings: - if self._negative_number_matcher.match(option_string): - if not self._has_negative_number_optionals: - self._has_negative_number_optionals.append(True) - - # return the created action - return action - - def _remove_action(self, action): - self._actions.remove(action) - - def _add_container_actions(self, container): - # collect groups by titles - title_group_map = {} - for group in self._action_groups: - if group.title in title_group_map: - msg = _('cannot merge actions - two groups are named %r') - raise ValueError(msg % (group.title)) - title_group_map[group.title] = group - - # map each action to its group - group_map = {} - for group in container._action_groups: - - # if a group with the title exists, use that, otherwise - # create a new group matching the container's group - if group.title not in title_group_map: - title_group_map[group.title] = self.add_argument_group( - title=group.title, - description=group.description, - conflict_handler=group.conflict_handler) - - # map the actions to their new group - for action in group._group_actions: - group_map[action] = title_group_map[group.title] - - # add container's mutually exclusive groups - # NOTE: if add_mutually_exclusive_group ever gains title= and - # description= then this code will need to be expanded as above - for group in container._mutually_exclusive_groups: - mutex_group = self.add_mutually_exclusive_group( - required=group.required) - - # map the actions to their new mutex group - for action in group._group_actions: - group_map[action] = mutex_group - - # add all actions to this container or their group - for action in container._actions: - group_map.get(action, self)._add_action(action) - - def _get_positional_kwargs(self, dest, **kwargs): - # make sure required is not specified - if 'required' in kwargs: - msg = _("'required' is an invalid argument for positionals") - raise TypeError(msg) - - # mark positional arguments as required if at least one is - # always required - if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: - kwargs['required'] = True - if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: - kwargs['required'] = True - - # return the keyword arguments with no option strings - return dict(kwargs, dest=dest, option_strings=[]) - - def _get_optional_kwargs(self, *args, **kwargs): - # determine short and long option strings - option_strings = [] - long_option_strings = [] - for option_string in args: - # error on strings that don't start with an appropriate prefix - if not option_string[0] in self.prefix_chars: - args = {'option': option_string, - 'prefix_chars': self.prefix_chars} - msg = _('invalid option string %(option)r: ' - 'must start with a character %(prefix_chars)r') - raise ValueError(msg % args) - - # strings starting with two prefix characters are long options - option_strings.append(option_string) - if option_string[0] in self.prefix_chars: - if len(option_string) > 1: - if option_string[1] in self.prefix_chars: - long_option_strings.append(option_string) - - # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' - dest = kwargs.pop('dest', None) - if dest is None: - if long_option_strings: - dest_option_string = long_option_strings[0] - else: - dest_option_string = option_strings[0] - dest = dest_option_string.lstrip(self.prefix_chars) - if not dest: - msg = _('dest= is required for options like %r') - raise ValueError(msg % option_string) - dest = dest.replace('-', '_') - - # return the updated keyword arguments - return dict(kwargs, dest=dest, option_strings=option_strings) - - def _pop_action_class(self, kwargs, default=None): - action = kwargs.pop('action', default) - return self._registry_get('action', action, action) - - def _get_handler(self): - # determine function from conflict handler string - handler_func_name = '_handle_conflict_%s' % self.conflict_handler - try: - return getattr(self, handler_func_name) - except AttributeError: - msg = _('invalid conflict_resolution value: %r') - raise ValueError(msg % self.conflict_handler) - - def _check_conflict(self, action): - - # find all options that conflict with this option - confl_optionals = [] - for option_string in action.option_strings: - if option_string in self._option_string_actions: - confl_optional = self._option_string_actions[option_string] - confl_optionals.append((option_string, confl_optional)) - - # resolve any conflicts - if confl_optionals: - conflict_handler = self._get_handler() - conflict_handler(action, confl_optionals) - - def _handle_conflict_error(self, action, conflicting_actions): - message = ngettext('conflicting option string: %s', - 'conflicting option strings: %s', - len(conflicting_actions)) - conflict_string = ', '.join([option_string - for option_string, action - in conflicting_actions]) - raise ArgumentError(action, message % conflict_string) - - def _handle_conflict_resolve(self, action, conflicting_actions): - - # remove all conflicting options - for option_string, action in conflicting_actions: - - # remove the conflicting option - action.option_strings.remove(option_string) - self._option_string_actions.pop(option_string, None) - - # if the option now has no option string, remove it from the - # container holding it - if not action.option_strings: - action.container._remove_action(action) - - -class _ArgumentGroup(_ActionsContainer): - - def __init__(self, container, title=None, description=None, **kwargs): - # add any missing keyword arguments by checking the container - update = kwargs.setdefault - update('conflict_handler', container.conflict_handler) - update('prefix_chars', container.prefix_chars) - update('argument_default', container.argument_default) - super_init = super(_ArgumentGroup, self).__init__ - super_init(description=description, **kwargs) - - # group attributes - self.title = title - self._group_actions = [] - - # share most attributes with the container - self._registries = container._registries - self._actions = container._actions - self._option_string_actions = container._option_string_actions - self._defaults = container._defaults - self._has_negative_number_optionals = \ - container._has_negative_number_optionals - self._mutually_exclusive_groups = container._mutually_exclusive_groups - - def _add_action(self, action): - action = super(_ArgumentGroup, self)._add_action(action) - self._group_actions.append(action) - return action - - def _remove_action(self, action): - super(_ArgumentGroup, self)._remove_action(action) - self._group_actions.remove(action) - - -class _MutuallyExclusiveGroup(_ArgumentGroup): - - def __init__(self, container, required=False): - super(_MutuallyExclusiveGroup, self).__init__(container) - self.required = required - self._container = container - - def _add_action(self, action): - if action.required: - msg = _('mutually exclusive arguments must be optional') - raise ValueError(msg) - action = self._container._add_action(action) - self._group_actions.append(action) - return action - - def _remove_action(self, action): - self._container._remove_action(action) - self._group_actions.remove(action) - - -class ArgumentParser(_AttributeHolder, _ActionsContainer): - """Object for parsing command line strings into Python objects. - - Keyword Arguments: - - prog -- The name of the program (default: sys.argv[0]) - - usage -- A usage message (default: auto-generated from arguments) - - description -- A description of what the program does - - epilog -- Text following the argument descriptions - - parents -- Parsers whose arguments should be copied into this one - - formatter_class -- HelpFormatter class for printing help messages - - prefix_chars -- Characters that prefix optional arguments - - fromfile_prefix_chars -- Characters that prefix files containing - additional arguments - - argument_default -- The default value for all arguments - - conflict_handler -- String indicating how to handle conflicts - - add_help -- Add a -h/-help option - """ - - def __init__(self, - prog=None, - usage=None, - description=None, - epilog=None, - version=None, - parents=[], - formatter_class=HelpFormatter, - prefix_chars='-', - fromfile_prefix_chars=None, - argument_default=None, - conflict_handler='error', - add_help=True): - - if version is not None: - import warnings - warnings.warn( - """The "version" argument to ArgumentParser is deprecated. """ - """Please use """ - """"add_argument(..., action='version', version="N", ...)" """ - """instead""", DeprecationWarning) - - superinit = super(ArgumentParser, self).__init__ - superinit(description=description, - prefix_chars=prefix_chars, - argument_default=argument_default, - conflict_handler=conflict_handler) - - # default setting for prog - if prog is None: - prog = _os.path.basename(_sys.argv[0]) - - self.prog = prog - self.usage = usage - self.epilog = epilog - self.version = version - self.formatter_class = formatter_class - self.fromfile_prefix_chars = fromfile_prefix_chars - self.add_help = add_help - - add_group = self.add_argument_group - self._positionals = add_group(_('positional arguments')) - self._optionals = add_group(_('optional arguments')) - self._subparsers = None - - # register types - def identity(string): - return string - self.register('type', None, identity) - - # add help and version arguments if necessary - # (using explicit default to override global argument_default) - default_prefix = '-' if '-' in prefix_chars else prefix_chars[0] - if self.add_help: - self.add_argument( - default_prefix+'h', default_prefix*2+'help', - action='help', default=SUPPRESS, - help=_('show this help message and exit')) - if self.version: - self.add_argument( - default_prefix+'v', default_prefix*2+'version', - action='version', default=SUPPRESS, - version=self.version, - help=_("show program's version number and exit")) - - # add parent arguments and defaults - for parent in parents: - self._add_container_actions(parent) - try: - defaults = parent._defaults - except AttributeError: - pass - else: - self._defaults.update(defaults) - - # ======================= - # Pretty __repr__ methods - # ======================= - def _get_kwargs(self): - names = [ - 'prog', - 'usage', - 'description', - 'version', - 'formatter_class', - 'conflict_handler', - 'add_help', - ] - return [(name, getattr(self, name)) for name in names] - - # ================================== - # Optional/Positional adding methods - # ================================== - def add_subparsers(self, **kwargs): - if self._subparsers is not None: - self.error(_('cannot have multiple subparser arguments')) - - # add the parser class to the arguments if it's not present - kwargs.setdefault('parser_class', type(self)) - - if 'title' in kwargs or 'description' in kwargs: - title = _(kwargs.pop('title', 'subcommands')) - description = _(kwargs.pop('description', None)) - self._subparsers = self.add_argument_group(title, description) - else: - self._subparsers = self._positionals - - # prog defaults to the usage message of this parser, skipping - # optional arguments and with no "usage:" prefix - if kwargs.get('prog') is None: - formatter = self._get_formatter() - positionals = self._get_positional_actions() - groups = self._mutually_exclusive_groups - formatter.add_usage(self.usage, positionals, groups, '') - kwargs['prog'] = formatter.format_help().strip() - - # create the parsers action and add it to the positionals list - parsers_class = self._pop_action_class(kwargs, 'parsers') - action = parsers_class(option_strings=[], **kwargs) - self._subparsers._add_action(action) - - # return the created parsers action - return action - - def _add_action(self, action): - if action.option_strings: - self._optionals._add_action(action) - else: - self._positionals._add_action(action) - return action - - def _get_optional_actions(self): - return [action - for action in self._actions - if action.option_strings] - - def _get_positional_actions(self): - return [action - for action in self._actions - if not action.option_strings] - - # ===================================== - # Command line argument parsing methods - # ===================================== - def parse_args(self, args=None, namespace=None): - args, argv = self.parse_known_args(args, namespace) - if argv: - msg = _('unrecognized arguments: %s') - self.error(msg % ' '.join(argv)) - return args - - def parse_known_args(self, args=None, namespace=None): - # args default to the system args - if args is None: - args = _sys.argv[1:] - - # default Namespace built from parser defaults - if namespace is None: - namespace = Namespace() - - # add any action defaults that aren't present - for action in self._actions: - if action.dest is not SUPPRESS: - if not hasattr(namespace, action.dest): - if action.default is not SUPPRESS: - default = action.default - if isinstance(action.default, str): - default = self._get_value(action, default) - setattr(namespace, action.dest, default) - - # add any parser defaults that aren't present - for dest in self._defaults: - if not hasattr(namespace, dest): - setattr(namespace, dest, self._defaults[dest]) - - # parse the arguments and exit if there are any errors - try: - namespace, args = self._parse_known_args(args, namespace) - if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): - args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) - delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) - return namespace, args - except ArgumentError: - err = _sys.exc_info()[1] - self.error(str(err)) - - def _parse_known_args(self, arg_strings, namespace): - # replace arg strings that are file references - if self.fromfile_prefix_chars is not None: - arg_strings = self._read_args_from_files(arg_strings) - - # map all mutually exclusive arguments to the other arguments - # they can't occur with - action_conflicts = {} - for mutex_group in self._mutually_exclusive_groups: - group_actions = mutex_group._group_actions - for i, mutex_action in enumerate(mutex_group._group_actions): - conflicts = action_conflicts.setdefault(mutex_action, []) - conflicts.extend(group_actions[:i]) - conflicts.extend(group_actions[i + 1:]) - - # find all option indices, and determine the arg_string_pattern - # which has an 'O' if there is an option at an index, - # an 'A' if there is an argument, or a '-' if there is a '--' - option_string_indices = {} - arg_string_pattern_parts = [] - arg_strings_iter = iter(arg_strings) - for i, arg_string in enumerate(arg_strings_iter): - - # all args after -- are non-options - if arg_string == '--': - arg_string_pattern_parts.append('-') - for arg_string in arg_strings_iter: - arg_string_pattern_parts.append('A') - - # otherwise, add the arg to the arg strings - # and note the index if it was an option - else: - option_tuple = self._parse_optional(arg_string) - if option_tuple is None: - pattern = 'A' - else: - option_string_indices[i] = option_tuple - pattern = 'O' - arg_string_pattern_parts.append(pattern) - - # join the pieces together to form the pattern - arg_strings_pattern = ''.join(arg_string_pattern_parts) - - # converts arg strings to the appropriate and then takes the action - seen_actions = set() - seen_non_default_actions = set() - - def take_action(action, argument_strings, option_string=None): - seen_actions.add(action) - argument_values = self._get_values(action, argument_strings) - - # error if this argument is not allowed with other previously - # seen arguments, assuming that actions that use the default - # value don't really count as "present" - if argument_values is not action.default: - seen_non_default_actions.add(action) - for conflict_action in action_conflicts.get(action, []): - if conflict_action in seen_non_default_actions: - msg = _('not allowed with argument %s') - action_name = _get_action_name(conflict_action) - raise ArgumentError(action, msg % action_name) - - # take the action if we didn't receive a SUPPRESS value - # (e.g. from a default) - if argument_values is not SUPPRESS: - action(self, namespace, argument_values, option_string) - - # function to convert arg_strings into an optional action - def consume_optional(start_index): - - # get the optional identified at this index - option_tuple = option_string_indices[start_index] - action, option_string, explicit_arg = option_tuple - - # identify additional optionals in the same arg string - # (e.g. -xyz is the same as -x -y -z if no args are required) - match_argument = self._match_argument - action_tuples = [] - while True: - - # if we found no optional action, skip it - if action is None: - extras.append(arg_strings[start_index]) - return start_index + 1 - - # if there is an explicit argument, try to match the - # optional's string arguments to only this - if explicit_arg is not None: - arg_count = match_argument(action, 'A') - - # if the action is a single-dash option and takes no - # arguments, try to parse more single-dash options out - # of the tail of the option string - chars = self.prefix_chars - if arg_count == 0 and option_string[1] not in chars: - action_tuples.append((action, [], option_string)) - char = option_string[0] - option_string = char + explicit_arg[0] - new_explicit_arg = explicit_arg[1:] or None - optionals_map = self._option_string_actions - if option_string in optionals_map: - action = optionals_map[option_string] - explicit_arg = new_explicit_arg - else: - msg = _('ignored explicit argument %r') - raise ArgumentError(action, msg % explicit_arg) - - # if the action expect exactly one argument, we've - # successfully matched the option; exit the loop - elif arg_count == 1: - stop = start_index + 1 - args = [explicit_arg] - action_tuples.append((action, args, option_string)) - break - - # error if a double-dash option did not use the - # explicit argument - else: - msg = _('ignored explicit argument %r') - raise ArgumentError(action, msg % explicit_arg) - - # if there is no explicit argument, try to match the - # optional's string arguments with the following strings - # if successful, exit the loop - else: - start = start_index + 1 - selected_patterns = arg_strings_pattern[start:] - arg_count = match_argument(action, selected_patterns) - stop = start + arg_count - args = arg_strings[start:stop] - action_tuples.append((action, args, option_string)) - break - - # add the Optional to the list and return the index at which - # the Optional's string args stopped - assert action_tuples - for action, args, option_string in action_tuples: - take_action(action, args, option_string) - return stop - - # the list of Positionals left to be parsed; this is modified - # by consume_positionals() - positionals = self._get_positional_actions() - - # function to convert arg_strings into positional actions - def consume_positionals(start_index): - # match as many Positionals as possible - match_partial = self._match_arguments_partial - selected_pattern = arg_strings_pattern[start_index:] - arg_counts = match_partial(positionals, selected_pattern) - - # slice off the appropriate arg strings for each Positional - # and add the Positional and its args to the list - for action, arg_count in zip(positionals, arg_counts): - args = arg_strings[start_index: start_index + arg_count] - start_index += arg_count - take_action(action, args) - - # slice off the Positionals that we just parsed and return the - # index at which the Positionals' string args stopped - positionals[:] = positionals[len(arg_counts):] - return start_index - - # consume Positionals and Optionals alternately, until we have - # passed the last option string - extras = [] - start_index = 0 - if option_string_indices: - max_option_string_index = max(option_string_indices) - else: - max_option_string_index = -1 - while start_index <= max_option_string_index: - - # consume any Positionals preceding the next option - next_option_string_index = min([ - index - for index in option_string_indices - if index >= start_index]) - if start_index != next_option_string_index: - positionals_end_index = consume_positionals(start_index) - - # only try to parse the next optional if we didn't consume - # the option string during the positionals parsing - if positionals_end_index > start_index: - start_index = positionals_end_index - continue - else: - start_index = positionals_end_index - - # if we consumed all the positionals we could and we're not - # at the index of an option string, there were extra arguments - if start_index not in option_string_indices: - strings = arg_strings[start_index:next_option_string_index] - extras.extend(strings) - start_index = next_option_string_index - - # consume the next optional and any arguments for it - start_index = consume_optional(start_index) - - # consume any positionals following the last Optional - stop_index = consume_positionals(start_index) - - # if we didn't consume all the argument strings, there were extras - extras.extend(arg_strings[stop_index:]) - - # make sure all required actions were present - required_actions = [_get_action_name(action) for action in self._actions - if action.required and action not in seen_actions] - if required_actions: - self.error(_('the following arguments are required: %s') % - ', '.join(required_actions)) - - # make sure all required groups had one option present - for group in self._mutually_exclusive_groups: - if group.required: - for action in group._group_actions: - if action in seen_non_default_actions: - break - - # if no actions were used, report the error - else: - names = [_get_action_name(action) - for action in group._group_actions - if action.help is not SUPPRESS] - msg = _('one of the arguments %s is required') - self.error(msg % ' '.join(names)) - - # return the updated namespace and the extra arguments - return namespace, extras - - def _read_args_from_files(self, arg_strings): - # expand arguments referencing files - new_arg_strings = [] - for arg_string in arg_strings: - - # for regular arguments, just add them back into the list - if arg_string[0] not in self.fromfile_prefix_chars: - new_arg_strings.append(arg_string) - - # replace arguments referencing files with the file content - else: - try: - args_file = open(arg_string[1:]) - try: - arg_strings = [] - for arg_line in args_file.read().splitlines(): - for arg in self.convert_arg_line_to_args(arg_line): - arg_strings.append(arg) - arg_strings = self._read_args_from_files(arg_strings) - new_arg_strings.extend(arg_strings) - finally: - args_file.close() - except IOError: - err = _sys.exc_info()[1] - self.error(str(err)) - - # return the modified argument list - return new_arg_strings - - def convert_arg_line_to_args(self, arg_line): - return [arg_line] - - def _match_argument(self, action, arg_strings_pattern): - # match the pattern for this action to the arg strings - nargs_pattern = self._get_nargs_pattern(action) - match = _re.match(nargs_pattern, arg_strings_pattern) - - # raise an exception if we weren't able to find a match - if match is None: - nargs_errors = { - None: _('expected one argument'), - OPTIONAL: _('expected at most one argument'), - ONE_OR_MORE: _('expected at least one argument'), - } - default = ngettext('expected %s argument', - 'expected %s arguments', - action.nargs) % action.nargs - msg = nargs_errors.get(action.nargs, default) - raise ArgumentError(action, msg) - - # return the number of arguments matched - return len(match.group(1)) - - def _match_arguments_partial(self, actions, arg_strings_pattern): - # progressively shorten the actions list by slicing off the - # final actions until we find a match - result = [] - for i in range(len(actions), 0, -1): - actions_slice = actions[:i] - pattern = ''.join([self._get_nargs_pattern(action) - for action in actions_slice]) - match = _re.match(pattern, arg_strings_pattern) - if match is not None: - result.extend([len(string) for string in match.groups()]) - break - - # return the list of arg string counts - return result - - def _parse_optional(self, arg_string): - # if it's an empty string, it was meant to be a positional - if not arg_string: - return None - - # if it doesn't start with a prefix, it was meant to be positional - if not arg_string[0] in self.prefix_chars: - return None - - # if the option string is present in the parser, return the action - if arg_string in self._option_string_actions: - action = self._option_string_actions[arg_string] - return action, arg_string, None - - # if it's just a single character, it was meant to be positional - if len(arg_string) == 1: - return None - - # if the option string before the "=" is present, return the action - if '=' in arg_string: - option_string, explicit_arg = arg_string.split('=', 1) - if option_string in self._option_string_actions: - action = self._option_string_actions[option_string] - return action, option_string, explicit_arg - - # search through all possible prefixes of the option string - # and all actions in the parser for possible interpretations - option_tuples = self._get_option_tuples(arg_string) - - # if multiple actions match, the option string was ambiguous - if len(option_tuples) > 1: - options = ', '.join([option_string - for action, option_string, explicit_arg in option_tuples]) - args = {'option': arg_string, 'matches': options} - msg = _('ambiguous option: %(option)s could match %(matches)s') - self.error(msg % args) - - # if exactly one action matched, this segmentation is good, - # so return the parsed action - elif len(option_tuples) == 1: - option_tuple, = option_tuples - return option_tuple - - # if it was not found as an option, but it looks like a negative - # number, it was meant to be positional - # unless there are negative-number-like options - if self._negative_number_matcher.match(arg_string): - if not self._has_negative_number_optionals: - return None - - # if it contains a space, it was meant to be a positional - if ' ' in arg_string: - return None - - # it was meant to be an optional but there is no such option - # in this parser (though it might be a valid option in a subparser) - return None, arg_string, None - - def _get_option_tuples(self, option_string): - result = [] - - # option strings starting with two prefix characters are only - # split at the '=' - chars = self.prefix_chars - if option_string[0] in chars and option_string[1] in chars: - if '=' in option_string: - option_prefix, explicit_arg = option_string.split('=', 1) - else: - option_prefix = option_string - explicit_arg = None - for option_string in self._option_string_actions: - if option_string.startswith(option_prefix): - action = self._option_string_actions[option_string] - tup = action, option_string, explicit_arg - result.append(tup) - - # single character options can be concatenated with their arguments - # but multiple character options always have to have their argument - # separate - elif option_string[0] in chars and option_string[1] not in chars: - option_prefix = option_string - explicit_arg = None - short_option_prefix = option_string[:2] - short_explicit_arg = option_string[2:] - - for option_string in self._option_string_actions: - if option_string == short_option_prefix: - action = self._option_string_actions[option_string] - tup = action, option_string, short_explicit_arg - result.append(tup) - elif option_string.startswith(option_prefix): - action = self._option_string_actions[option_string] - tup = action, option_string, explicit_arg - result.append(tup) - - # shouldn't ever get here - else: - self.error(_('unexpected option string: %s') % option_string) - - # return the collected option tuples - return result - - def _get_nargs_pattern(self, action): - # in all examples below, we have to allow for '--' args - # which are represented as '-' in the pattern - nargs = action.nargs - - # the default (None) is assumed to be a single argument - if nargs is None: - nargs_pattern = '(-*A-*)' - - # allow zero or one arguments - elif nargs == OPTIONAL: - nargs_pattern = '(-*A?-*)' - - # allow zero or more arguments - elif nargs == ZERO_OR_MORE: - nargs_pattern = '(-*[A-]*)' - - # allow one or more arguments - elif nargs == ONE_OR_MORE: - nargs_pattern = '(-*A[A-]*)' - - # allow any number of options or arguments - elif nargs == REMAINDER: - nargs_pattern = '([-AO]*)' - - # allow one argument followed by any number of options or arguments - elif nargs == PARSER: - nargs_pattern = '(-*A[-AO]*)' - - # all others should be integers - else: - nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) - - # if this is an optional action, -- is not allowed - if action.option_strings: - nargs_pattern = nargs_pattern.replace('-*', '') - nargs_pattern = nargs_pattern.replace('-', '') - - # return the pattern - return nargs_pattern - - # ======================== - # Value conversion methods - # ======================== - def _get_values(self, action, arg_strings): - # for everything but PARSER args, strip out '--' - if action.nargs not in [PARSER, REMAINDER]: - arg_strings = [s for s in arg_strings if s != '--'] - - # optional argument produces a default when not present - if not arg_strings and action.nargs == OPTIONAL: - if action.option_strings: - value = action.const - else: - value = action.default - if isinstance(value, str): - value = self._get_value(action, value) - self._check_value(action, value) - - # when nargs='*' on a positional, if there were no command-line - # args, use the default if it is anything other than None - elif (not arg_strings and action.nargs == ZERO_OR_MORE and - not action.option_strings): - if action.default is not None: - value = action.default - else: - value = arg_strings - self._check_value(action, value) - - # single argument or optional argument produces a single value - elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: - arg_string, = arg_strings - value = self._get_value(action, arg_string) - self._check_value(action, value) - - # REMAINDER arguments convert all values, checking none - elif action.nargs == REMAINDER: - value = [self._get_value(action, v) for v in arg_strings] - - # PARSER arguments convert all values, but check only the first - elif action.nargs == PARSER: - value = [self._get_value(action, v) for v in arg_strings] - self._check_value(action, value[0]) - - # all other types of nargs produce a list - else: - value = [self._get_value(action, v) for v in arg_strings] - for v in value: - self._check_value(action, v) - - # return the converted value - return value - - def _get_value(self, action, arg_string): - type_func = self._registry_get('type', action.type, action.type) - if not _callable(type_func): - msg = _('%r is not callable') - raise ArgumentError(action, msg % type_func) - - # convert the value to the appropriate type - try: - result = type_func(arg_string) - - # ArgumentTypeErrors indicate errors - except ArgumentTypeError: - name = getattr(action.type, '__name__', repr(action.type)) - msg = str(_sys.exc_info()[1]) - raise ArgumentError(action, msg) - - # TypeErrors or ValueErrors also indicate errors - except (TypeError, ValueError): - name = getattr(action.type, '__name__', repr(action.type)) - args = {'type': name, 'value': arg_string} - msg = _('invalid %(type)s value: %(value)r') - raise ArgumentError(action, msg % args) - - # return the converted value - return result - - def _check_value(self, action, value): - # converted value must be one of the choices (if specified) - if action.choices is not None and value not in action.choices: - args = {'value': value, - 'choices': ', '.join(map(repr, action.choices))} - msg = _('invalid choice: %(value)r (choose from %(choices)s)') - raise ArgumentError(action, msg % args) - - # ======================= - # Help-formatting methods - # ======================= - def format_usage(self): - formatter = self._get_formatter() - formatter.add_usage(self.usage, self._actions, - self._mutually_exclusive_groups) - return formatter.format_help() - - def format_help(self): - formatter = self._get_formatter() - - # usage - formatter.add_usage(self.usage, self._actions, - self._mutually_exclusive_groups) - - # description - formatter.add_text(self.description) - - # positionals, optionals and user-defined groups - for action_group in self._action_groups: - formatter.start_section(action_group.title) - formatter.add_text(action_group.description) - formatter.add_arguments(action_group._group_actions) - formatter.end_section() - - # epilog - formatter.add_text(self.epilog) - - # determine help from format above - return formatter.format_help() - - def format_version(self): - import warnings - warnings.warn( - 'The format_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) - formatter = self._get_formatter() - formatter.add_text(self.version) - return formatter.format_help() - - def _get_formatter(self): - return self.formatter_class(prog=self.prog) - - # ===================== - # Help-printing methods - # ===================== - def print_usage(self, file=None): - if file is None: - file = _sys.stdout - self._print_message(self.format_usage(), file) - - def print_help(self, file=None): - if file is None: - file = _sys.stdout - self._print_message(self.format_help(), file) - - def print_version(self, file=None): - import warnings - warnings.warn( - 'The print_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) - self._print_message(self.format_version(), file) - - def _print_message(self, message, file=None): - if message: - if file is None: - file = _sys.stderr - file.write(message) - - # =============== - # Exiting methods - # =============== - def exit(self, status=0, message=None): - if message: - self._print_message(message, _sys.stderr) - _sys.exit(status) - - def error(self, message): - """error(message: string) - - Prints a usage message incorporating the message to stderr and - exits. - - If you override this in a subclass, it should not return -- it - should either exit or raise an exception. - """ - self.print_usage(_sys.stderr) - args = {'prog': self.prog, 'message': message} - self.exit(2, _('%(prog)s: error: %(message)s\n') % args) diff --git a/lib/bs4/__init__.py b/lib/bs4/__init__.py deleted file mode 100644 index 7a80452f..00000000 --- a/lib/bs4/__init__.py +++ /dev/null @@ -1,529 +0,0 @@ -"""Beautiful Soup -Elixir and Tonic -"The Screen-Scraper's Friend" -http://www.crummy.com/software/BeautifulSoup/ - -Beautiful Soup uses a pluggable XML or HTML parser to parse a -(possibly invalid) document into a tree representation. Beautiful Soup -provides methods and Pythonic idioms that make it easy to navigate, -search, and modify the parse tree. - -Beautiful Soup works with Python 2.7 and up. It works better if lxml -and/or html5lib is installed. - -For more than you ever wanted to know about Beautiful Soup, see the -documentation: -http://www.crummy.com/software/BeautifulSoup/bs4/doc/ - -""" - -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -__author__ = "Leonard Richardson (leonardr@segfault.org)" -__version__ = "4.6.0" -__copyright__ = "Copyright (c) 2004-2017 Leonard Richardson" -__license__ = "MIT" - -__all__ = ['BeautifulSoup'] - -import os -import re -import traceback -import warnings - -from .builder import builder_registry, ParserRejectedMarkup -from .dammit import UnicodeDammit -from .element import ( - CData, - Comment, - DEFAULT_OUTPUT_ENCODING, - Declaration, - Doctype, - NavigableString, - PageElement, - ProcessingInstruction, - ResultSet, - SoupStrainer, - Tag, - ) - -# The very first thing we do is give a useful error if someone is -# running this code under Python 3 without converting it. -'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'<>'You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).' - -class BeautifulSoup(Tag): - """ - This class defines the basic interface called by the tree builders. - - These methods will be called by the parser: - reset() - feed(markup) - - The tree builder may call these methods from its feed() implementation: - handle_starttag(name, attrs) # See note about return value - handle_endtag(name) - handle_data(data) # Appends to the current data node - endData(containerClass=NavigableString) # Ends the current data node - - No matter how complicated the underlying parser is, you should be - able to build a tree using 'start tag' events, 'end tag' events, - 'data' events, and "done with data" events. - - If you encounter an empty-element tag (aka a self-closing tag, - like HTML's
tag), call handle_starttag and then - handle_endtag. - """ - ROOT_TAG_NAME = u'[document]' - - # If the end-user gives no indication which tree builder they - # want, look for one with these features. - DEFAULT_BUILDER_FEATURES = ['html', 'fast'] - - ASCII_SPACES = '\x20\x0a\x09\x0c\x0d' - - NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, change code that looks like this:\n\n BeautifulSoup(YOUR_MARKUP})\n\nto this:\n\n BeautifulSoup(YOUR_MARKUP, \"%(parser)s\")\n" - - def __init__(self, markup="", features=None, builder=None, - parse_only=None, from_encoding=None, exclude_encodings=None, - **kwargs): - """The Soup object is initialized as the 'root tag', and the - provided markup (which can be a string or a file-like object) - is fed into the underlying parser.""" - - if 'convertEntities' in kwargs: - warnings.warn( - "BS4 does not respect the convertEntities argument to the " - "BeautifulSoup constructor. Entities are always converted " - "to Unicode characters.") - - if 'markupMassage' in kwargs: - del kwargs['markupMassage'] - warnings.warn( - "BS4 does not respect the markupMassage argument to the " - "BeautifulSoup constructor. The tree builder is responsible " - "for any necessary markup massage.") - - if 'smartQuotesTo' in kwargs: - del kwargs['smartQuotesTo'] - warnings.warn( - "BS4 does not respect the smartQuotesTo argument to the " - "BeautifulSoup constructor. Smart quotes are always converted " - "to Unicode characters.") - - if 'selfClosingTags' in kwargs: - del kwargs['selfClosingTags'] - warnings.warn( - "BS4 does not respect the selfClosingTags argument to the " - "BeautifulSoup constructor. The tree builder is responsible " - "for understanding self-closing tags.") - - if 'isHTML' in kwargs: - del kwargs['isHTML'] - warnings.warn( - "BS4 does not respect the isHTML argument to the " - "BeautifulSoup constructor. Suggest you use " - "features='lxml' for HTML and features='lxml-xml' for " - "XML.") - - def deprecated_argument(old_name, new_name): - if old_name in kwargs: - warnings.warn( - 'The "%s" argument to the BeautifulSoup constructor ' - 'has been renamed to "%s."' % (old_name, new_name)) - value = kwargs[old_name] - del kwargs[old_name] - return value - return None - - parse_only = parse_only or deprecated_argument( - "parseOnlyThese", "parse_only") - - from_encoding = from_encoding or deprecated_argument( - "fromEncoding", "from_encoding") - - if from_encoding and isinstance(markup, unicode): - warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.") - from_encoding = None - - if len(kwargs) > 0: - arg = kwargs.keys().pop() - raise TypeError( - "__init__() got an unexpected keyword argument '%s'" % arg) - - if builder is None: - original_features = features - if isinstance(features, basestring): - features = [features] - if features is None or len(features) == 0: - features = self.DEFAULT_BUILDER_FEATURES - builder_class = builder_registry.lookup(*features) - if builder_class is None: - raise FeatureNotFound( - "Couldn't find a tree builder with the features you " - "requested: %s. Do you need to install a parser library?" - % ",".join(features)) - builder = builder_class() - if not (original_features == builder.NAME or - original_features in builder.ALTERNATE_NAMES): - if builder.is_xml: - markup_type = "XML" - else: - markup_type = "HTML" - - caller = traceback.extract_stack()[0] - filename = caller[0] - line_number = caller[1] - warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict( - filename=filename, - line_number=line_number, - parser=builder.NAME, - markup_type=markup_type)) - - self.builder = builder - self.is_xml = builder.is_xml - self.known_xml = self.is_xml - self.builder.soup = self - - self.parse_only = parse_only - - if hasattr(markup, 'read'): # It's a file-type object. - markup = markup.read() - elif len(markup) <= 256 and ( - (isinstance(markup, bytes) and not b'<' in markup) - or (isinstance(markup, unicode) and not u'<' in markup) - ): - # Print out warnings for a couple beginner problems - # involving passing non-markup to Beautiful Soup. - # Beautiful Soup will still parse the input as markup, - # just in case that's what the user really wants. - if (isinstance(markup, unicode) - and not os.path.supports_unicode_filenames): - possible_filename = markup.encode("utf8") - else: - possible_filename = markup - is_file = False - try: - is_file = os.path.exists(possible_filename) - except Exception, e: - # This is almost certainly a problem involving - # characters not valid in filenames on this - # system. Just let it go. - pass - if is_file: - if isinstance(markup, unicode): - markup = markup.encode("utf8") - warnings.warn( - '"%s" looks like a filename, not markup. You should' - ' probably open this file and pass the filehandle into' - ' Beautiful Soup.' % markup) - self._check_markup_is_url(markup) - - for (self.markup, self.original_encoding, self.declared_html_encoding, - self.contains_replacement_characters) in ( - self.builder.prepare_markup( - markup, from_encoding, exclude_encodings=exclude_encodings)): - self.reset() - try: - self._feed() - break - except ParserRejectedMarkup: - pass - - # Clear out the markup and remove the builder's circular - # reference to this object. - self.markup = None - self.builder.soup = None - - def __copy__(self): - copy = type(self)( - self.encode('utf-8'), builder=self.builder, from_encoding='utf-8' - ) - - # Although we encoded the tree to UTF-8, that may not have - # been the encoding of the original markup. Set the copy's - # .original_encoding to reflect the original object's - # .original_encoding. - copy.original_encoding = self.original_encoding - return copy - - def __getstate__(self): - # Frequently a tree builder can't be pickled. - d = dict(self.__dict__) - if 'builder' in d and not self.builder.picklable: - d['builder'] = None - return d - - @staticmethod - def _check_markup_is_url(markup): - """ - Check if markup looks like it's actually a url and raise a warning - if so. Markup can be unicode or str (py2) / bytes (py3). - """ - if isinstance(markup, bytes): - space = b' ' - cant_start_with = (b"http:", b"https:") - elif isinstance(markup, unicode): - space = u' ' - cant_start_with = (u"http:", u"https:") - else: - return - - if any(markup.startswith(prefix) for prefix in cant_start_with): - if not space in markup: - if isinstance(markup, bytes): - decoded_markup = markup.decode('utf-8', 'replace') - else: - decoded_markup = markup - warnings.warn( - '"%s" looks like a URL. Beautiful Soup is not an' - ' HTTP client. You should probably use an HTTP client like' - ' requests to get the document behind the URL, and feed' - ' that document to Beautiful Soup.' % decoded_markup - ) - - def _feed(self): - # Convert the document to Unicode. - self.builder.reset() - - self.builder.feed(self.markup) - # Close out any unfinished strings and close all the open tags. - self.endData() - while self.currentTag.name != self.ROOT_TAG_NAME: - self.popTag() - - def reset(self): - Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) - self.hidden = 1 - self.builder.reset() - self.current_data = [] - self.currentTag = None - self.tagStack = [] - self.preserve_whitespace_tag_stack = [] - self.pushTag(self) - - def new_tag(self, name, namespace=None, nsprefix=None, **attrs): - """Create a new tag associated with this soup.""" - return Tag(None, self.builder, name, namespace, nsprefix, attrs) - - def new_string(self, s, subclass=NavigableString): - """Create a new NavigableString associated with this soup.""" - return subclass(s) - - def insert_before(self, successor): - raise NotImplementedError("BeautifulSoup objects don't support insert_before().") - - def insert_after(self, successor): - raise NotImplementedError("BeautifulSoup objects don't support insert_after().") - - def popTag(self): - tag = self.tagStack.pop() - if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]: - self.preserve_whitespace_tag_stack.pop() - #print "Pop", tag.name - if self.tagStack: - self.currentTag = self.tagStack[-1] - return self.currentTag - - def pushTag(self, tag): - #print "Push", tag.name - if self.currentTag: - self.currentTag.contents.append(tag) - self.tagStack.append(tag) - self.currentTag = self.tagStack[-1] - if tag.name in self.builder.preserve_whitespace_tags: - self.preserve_whitespace_tag_stack.append(tag) - - def endData(self, containerClass=NavigableString): - if self.current_data: - current_data = u''.join(self.current_data) - # If whitespace is not preserved, and this string contains - # nothing but ASCII spaces, replace it with a single space - # or newline. - if not self.preserve_whitespace_tag_stack: - strippable = True - for i in current_data: - if i not in self.ASCII_SPACES: - strippable = False - break - if strippable: - if '\n' in current_data: - current_data = '\n' - else: - current_data = ' ' - - # Reset the data collector. - self.current_data = [] - - # Should we add this string to the tree at all? - if self.parse_only and len(self.tagStack) <= 1 and \ - (not self.parse_only.text or \ - not self.parse_only.search(current_data)): - return - - o = containerClass(current_data) - self.object_was_parsed(o) - - def object_was_parsed(self, o, parent=None, most_recent_element=None): - """Add an object to the parse tree.""" - parent = parent or self.currentTag - previous_element = most_recent_element or self._most_recent_element - - next_element = previous_sibling = next_sibling = None - if isinstance(o, Tag): - next_element = o.next_element - next_sibling = o.next_sibling - previous_sibling = o.previous_sibling - if not previous_element: - previous_element = o.previous_element - - o.setup(parent, previous_element, next_element, previous_sibling, next_sibling) - - self._most_recent_element = o - parent.contents.append(o) - - if parent.next_sibling: - # This node is being inserted into an element that has - # already been parsed. Deal with any dangling references. - index = len(parent.contents)-1 - while index >= 0: - if parent.contents[index] is o: - break - index -= 1 - else: - raise ValueError( - "Error building tree: supposedly %r was inserted " - "into %r after the fact, but I don't see it!" % ( - o, parent - ) - ) - if index == 0: - previous_element = parent - previous_sibling = None - else: - previous_element = previous_sibling = parent.contents[index-1] - if index == len(parent.contents)-1: - next_element = parent.next_sibling - next_sibling = None - else: - next_element = next_sibling = parent.contents[index+1] - - o.previous_element = previous_element - if previous_element: - previous_element.next_element = o - o.next_element = next_element - if next_element: - next_element.previous_element = o - o.next_sibling = next_sibling - if next_sibling: - next_sibling.previous_sibling = o - o.previous_sibling = previous_sibling - if previous_sibling: - previous_sibling.next_sibling = o - - def _popToTag(self, name, nsprefix=None, inclusivePop=True): - """Pops the tag stack up to and including the most recent - instance of the given tag. If inclusivePop is false, pops the tag - stack up to but *not* including the most recent instqance of - the given tag.""" - #print "Popping to %s" % name - if name == self.ROOT_TAG_NAME: - # The BeautifulSoup object itself can never be popped. - return - - most_recently_popped = None - - stack_size = len(self.tagStack) - for i in range(stack_size - 1, 0, -1): - t = self.tagStack[i] - if (name == t.name and nsprefix == t.prefix): - if inclusivePop: - most_recently_popped = self.popTag() - break - most_recently_popped = self.popTag() - - return most_recently_popped - - def handle_starttag(self, name, namespace, nsprefix, attrs): - """Push a start tag on to the stack. - - If this method returns None, the tag was rejected by the - SoupStrainer. You should proceed as if the tag had not occurred - in the document. For instance, if this was a self-closing tag, - don't call handle_endtag. - """ - - # print "Start tag %s: %s" % (name, attrs) - self.endData() - - if (self.parse_only and len(self.tagStack) <= 1 - and (self.parse_only.text - or not self.parse_only.search_tag(name, attrs))): - return None - - tag = Tag(self, self.builder, name, namespace, nsprefix, attrs, - self.currentTag, self._most_recent_element) - if tag is None: - return tag - if self._most_recent_element: - self._most_recent_element.next_element = tag - self._most_recent_element = tag - self.pushTag(tag) - return tag - - def handle_endtag(self, name, nsprefix=None): - #print "End tag: " + name - self.endData() - self._popToTag(name, nsprefix) - - def handle_data(self, data): - self.current_data.append(data) - - def decode(self, pretty_print=False, - eventual_encoding=DEFAULT_OUTPUT_ENCODING, - formatter="minimal"): - """Returns a string or Unicode representation of this document. - To get Unicode, pass None for encoding.""" - - if self.is_xml: - # Print the XML declaration - encoding_part = '' - if eventual_encoding != None: - encoding_part = ' encoding="%s"' % eventual_encoding - prefix = u'\n' % encoding_part - else: - prefix = u'' - if not pretty_print: - indent_level = None - else: - indent_level = 0 - return prefix + super(BeautifulSoup, self).decode( - indent_level, eventual_encoding, formatter) - -# Alias to make it easier to type import: 'from bs4 import _soup' -_s = BeautifulSoup -_soup = BeautifulSoup - -class BeautifulStoneSoup(BeautifulSoup): - """Deprecated interface to an XML parser.""" - - def __init__(self, *args, **kwargs): - kwargs['features'] = 'xml' - warnings.warn( - 'The BeautifulStoneSoup class is deprecated. Instead of using ' - 'it, pass features="xml" into the BeautifulSoup constructor.') - super(BeautifulStoneSoup, self).__init__(*args, **kwargs) - - -class StopParsing(Exception): - pass - -class FeatureNotFound(ValueError): - pass - - -#By default, act as an HTML pretty-printer. -if __name__ == '__main__': - import sys - soup = BeautifulSoup(sys.stdin) - print soup.prettify() diff --git a/lib/bs4/builder/__init__.py b/lib/bs4/builder/__init__.py deleted file mode 100644 index fdb3362f..00000000 --- a/lib/bs4/builder/__init__.py +++ /dev/null @@ -1,333 +0,0 @@ -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from collections import defaultdict -import itertools -import sys -from bs4.element import ( - CharsetMetaAttributeValue, - ContentMetaAttributeValue, - HTMLAwareEntitySubstitution, - whitespace_re - ) - -__all__ = [ - 'HTMLTreeBuilder', - 'SAXTreeBuilder', - 'TreeBuilder', - 'TreeBuilderRegistry', - ] - -# Some useful features for a TreeBuilder to have. -FAST = 'fast' -PERMISSIVE = 'permissive' -STRICT = 'strict' -XML = 'xml' -HTML = 'html' -HTML_5 = 'html5' - - -class TreeBuilderRegistry(object): - - def __init__(self): - self.builders_for_feature = defaultdict(list) - self.builders = [] - - def register(self, treebuilder_class): - """Register a treebuilder based on its advertised features.""" - for feature in treebuilder_class.features: - self.builders_for_feature[feature].insert(0, treebuilder_class) - self.builders.insert(0, treebuilder_class) - - def lookup(self, *features): - if len(self.builders) == 0: - # There are no builders at all. - return None - - if len(features) == 0: - # They didn't ask for any features. Give them the most - # recently registered builder. - return self.builders[0] - - # Go down the list of features in order, and eliminate any builders - # that don't match every feature. - features = list(features) - features.reverse() - candidates = None - candidate_set = None - while len(features) > 0: - feature = features.pop() - we_have_the_feature = self.builders_for_feature.get(feature, []) - if len(we_have_the_feature) > 0: - if candidates is None: - candidates = we_have_the_feature - candidate_set = set(candidates) - else: - # Eliminate any candidates that don't have this feature. - candidate_set = candidate_set.intersection( - set(we_have_the_feature)) - - # The only valid candidates are the ones in candidate_set. - # Go through the original list of candidates and pick the first one - # that's in candidate_set. - if candidate_set is None: - return None - for candidate in candidates: - if candidate in candidate_set: - return candidate - return None - -# The BeautifulSoup class will take feature lists from developers and use them -# to look up builders in this registry. -builder_registry = TreeBuilderRegistry() - -class TreeBuilder(object): - """Turn a document into a Beautiful Soup object tree.""" - - NAME = "[Unknown tree builder]" - ALTERNATE_NAMES = [] - features = [] - - is_xml = False - picklable = False - preserve_whitespace_tags = set() - empty_element_tags = None # A tag will be considered an empty-element - # tag when and only when it has no contents. - - # A value for these tag/attribute combinations is a space- or - # comma-separated list of CDATA, rather than a single CDATA. - cdata_list_attributes = {} - - - def __init__(self): - self.soup = None - - def reset(self): - pass - - def can_be_empty_element(self, tag_name): - """Might a tag with this name be an empty-element tag? - - The final markup may or may not actually present this tag as - self-closing. - - For instance: an HTMLBuilder does not consider a

tag to be - an empty-element tag (it's not in - HTMLBuilder.empty_element_tags). This means an empty

tag - will be presented as "

", not "

". - - The default implementation has no opinion about which tags are - empty-element tags, so a tag will be presented as an - empty-element tag if and only if it has no contents. - "" will become "", and "bar" will - be left alone. - """ - if self.empty_element_tags is None: - return True - return tag_name in self.empty_element_tags - - def feed(self, markup): - raise NotImplementedError() - - def prepare_markup(self, markup, user_specified_encoding=None, - document_declared_encoding=None): - return markup, None, None, False - - def test_fragment_to_document(self, fragment): - """Wrap an HTML fragment to make it look like a document. - - Different parsers do this differently. For instance, lxml - introduces an empty tag, and html5lib - doesn't. Abstracting this away lets us write simple tests - which run HTML fragments through the parser and compare the - results against other HTML fragments. - - This method should not be used outside of tests. - """ - return fragment - - def set_up_substitutions(self, tag): - return False - - def _replace_cdata_list_attribute_values(self, tag_name, attrs): - """Replaces class="foo bar" with class=["foo", "bar"] - - Modifies its input in place. - """ - if not attrs: - return attrs - if self.cdata_list_attributes: - universal = self.cdata_list_attributes.get('*', []) - tag_specific = self.cdata_list_attributes.get( - tag_name.lower(), None) - for attr in attrs.keys(): - if attr in universal or (tag_specific and attr in tag_specific): - # We have a "class"-type attribute whose string - # value is a whitespace-separated list of - # values. Split it into a list. - value = attrs[attr] - if isinstance(value, basestring): - values = whitespace_re.split(value) - else: - # html5lib sometimes calls setAttributes twice - # for the same tag when rearranging the parse - # tree. On the second call the attribute value - # here is already a list. If this happens, - # leave the value alone rather than trying to - # split it again. - values = value - attrs[attr] = values - return attrs - -class SAXTreeBuilder(TreeBuilder): - """A Beautiful Soup treebuilder that listens for SAX events.""" - - def feed(self, markup): - raise NotImplementedError() - - def close(self): - pass - - def startElement(self, name, attrs): - attrs = dict((key[1], value) for key, value in list(attrs.items())) - #print "Start %s, %r" % (name, attrs) - self.soup.handle_starttag(name, attrs) - - def endElement(self, name): - #print "End %s" % name - self.soup.handle_endtag(name) - - def startElementNS(self, nsTuple, nodeName, attrs): - # Throw away (ns, nodeName) for now. - self.startElement(nodeName, attrs) - - def endElementNS(self, nsTuple, nodeName): - # Throw away (ns, nodeName) for now. - self.endElement(nodeName) - #handler.endElementNS((ns, node.nodeName), node.nodeName) - - def startPrefixMapping(self, prefix, nodeValue): - # Ignore the prefix for now. - pass - - def endPrefixMapping(self, prefix): - # Ignore the prefix for now. - # handler.endPrefixMapping(prefix) - pass - - def characters(self, content): - self.soup.handle_data(content) - - def startDocument(self): - pass - - def endDocument(self): - pass - - -class HTMLTreeBuilder(TreeBuilder): - """This TreeBuilder knows facts about HTML. - - Such as which tags are empty-element tags. - """ - - preserve_whitespace_tags = HTMLAwareEntitySubstitution.preserve_whitespace_tags - empty_element_tags = set([ - # These are from HTML5. - 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr', - - # These are from HTML4, removed in HTML5. - 'spacer', 'frame' - ]) - - # The HTML standard defines these attributes as containing a - # space-separated list of values, not a single value. That is, - # class="foo bar" means that the 'class' attribute has two values, - # 'foo' and 'bar', not the single value 'foo bar'. When we - # encounter one of these attributes, we will parse its value into - # a list of values if possible. Upon output, the list will be - # converted back into a string. - cdata_list_attributes = { - "*" : ['class', 'accesskey', 'dropzone'], - "a" : ['rel', 'rev'], - "link" : ['rel', 'rev'], - "td" : ["headers"], - "th" : ["headers"], - "td" : ["headers"], - "form" : ["accept-charset"], - "object" : ["archive"], - - # These are HTML5 specific, as are *.accesskey and *.dropzone above. - "area" : ["rel"], - "icon" : ["sizes"], - "iframe" : ["sandbox"], - "output" : ["for"], - } - - def set_up_substitutions(self, tag): - # We are only interested in tags - if tag.name != 'meta': - return False - - http_equiv = tag.get('http-equiv') - content = tag.get('content') - charset = tag.get('charset') - - # We are interested in tags that say what encoding the - # document was originally in. This means HTML 5-style - # tags that provide the "charset" attribute. It also means - # HTML 4-style tags that provide the "content" - # attribute and have "http-equiv" set to "content-type". - # - # In both cases we will replace the value of the appropriate - # attribute with a standin object that can take on any - # encoding. - meta_encoding = None - if charset is not None: - # HTML 5 style: - # - meta_encoding = charset - tag['charset'] = CharsetMetaAttributeValue(charset) - - elif (content is not None and http_equiv is not None - and http_equiv.lower() == 'content-type'): - # HTML 4 style: - # - tag['content'] = ContentMetaAttributeValue(content) - - return (meta_encoding is not None) - -def register_treebuilders_from(module): - """Copy TreeBuilders from the given module into this module.""" - # I'm fairly sure this is not the best way to do this. - this_module = sys.modules['bs4.builder'] - for name in module.__all__: - obj = getattr(module, name) - - if issubclass(obj, TreeBuilder): - setattr(this_module, name, obj) - this_module.__all__.append(name) - # Register the builder while we're at it. - this_module.builder_registry.register(obj) - -class ParserRejectedMarkup(Exception): - pass - -# Builders are registered in reverse order of priority, so that custom -# builder registrations will take precedence. In general, we want lxml -# to take precedence over html5lib, because it's faster. And we only -# want to use HTMLParser as a last result. -from . import _htmlparser -register_treebuilders_from(_htmlparser) -try: - from . import _html5lib - register_treebuilders_from(_html5lib) -except ImportError: - # They don't have html5lib installed. - pass -try: - from . import _lxml - register_treebuilders_from(_lxml) -except ImportError: - # They don't have lxml installed. - pass diff --git a/lib/bs4/builder/_html5lib.py b/lib/bs4/builder/_html5lib.py deleted file mode 100644 index 5f548935..00000000 --- a/lib/bs4/builder/_html5lib.py +++ /dev/null @@ -1,426 +0,0 @@ -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -__all__ = [ - 'HTML5TreeBuilder', - ] - -import warnings -import re -from bs4.builder import ( - PERMISSIVE, - HTML, - HTML_5, - HTMLTreeBuilder, - ) -from bs4.element import ( - NamespacedAttribute, - whitespace_re, -) -import html5lib -from html5lib.constants import ( - namespaces, - prefixes, - ) -from bs4.element import ( - Comment, - Doctype, - NavigableString, - Tag, - ) - -try: - # Pre-0.99999999 - from html5lib.treebuilders import _base as treebuilder_base - new_html5lib = False -except ImportError, e: - # 0.99999999 and up - from html5lib.treebuilders import base as treebuilder_base - new_html5lib = True - -class HTML5TreeBuilder(HTMLTreeBuilder): - """Use html5lib to build a tree.""" - - NAME = "html5lib" - - features = [NAME, PERMISSIVE, HTML_5, HTML] - - def prepare_markup(self, markup, user_specified_encoding, - document_declared_encoding=None, exclude_encodings=None): - # Store the user-specified encoding for use later on. - self.user_specified_encoding = user_specified_encoding - - # document_declared_encoding and exclude_encodings aren't used - # ATM because the html5lib TreeBuilder doesn't use - # UnicodeDammit. - if exclude_encodings: - warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.") - yield (markup, None, None, False) - - # These methods are defined by Beautiful Soup. - def feed(self, markup): - if self.soup.parse_only is not None: - warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.") - parser = html5lib.HTMLParser(tree=self.create_treebuilder) - - extra_kwargs = dict() - if not isinstance(markup, unicode): - if new_html5lib: - extra_kwargs['override_encoding'] = self.user_specified_encoding - else: - extra_kwargs['encoding'] = self.user_specified_encoding - doc = parser.parse(markup, **extra_kwargs) - - # Set the character encoding detected by the tokenizer. - if isinstance(markup, unicode): - # We need to special-case this because html5lib sets - # charEncoding to UTF-8 if it gets Unicode input. - doc.original_encoding = None - else: - original_encoding = parser.tokenizer.stream.charEncoding[0] - if not isinstance(original_encoding, basestring): - # In 0.99999999 and up, the encoding is an html5lib - # Encoding object. We want to use a string for compatibility - # with other tree builders. - original_encoding = original_encoding.name - doc.original_encoding = original_encoding - - def create_treebuilder(self, namespaceHTMLElements): - self.underlying_builder = TreeBuilderForHtml5lib( - namespaceHTMLElements, self.soup) - return self.underlying_builder - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return u'%s' % fragment - - -class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder): - - def __init__(self, namespaceHTMLElements, soup=None): - if soup: - self.soup = soup - else: - from bs4 import BeautifulSoup - self.soup = BeautifulSoup("", "html.parser") - super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements) - - def documentClass(self): - self.soup.reset() - return Element(self.soup, self.soup, None) - - def insertDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - - doctype = Doctype.for_name_and_ids(name, publicId, systemId) - self.soup.object_was_parsed(doctype) - - def elementClass(self, name, namespace): - tag = self.soup.new_tag(name, namespace) - return Element(tag, self.soup, namespace) - - def commentClass(self, data): - return TextNode(Comment(data), self.soup) - - def fragmentClass(self): - from bs4 import BeautifulSoup - self.soup = BeautifulSoup("", "html.parser") - self.soup.name = "[document_fragment]" - return Element(self.soup, self.soup, None) - - def appendChild(self, node): - # XXX This code is not covered by the BS4 tests. - self.soup.append(node.element) - - def getDocument(self): - return self.soup - - def getFragment(self): - return treebuilder_base.TreeBuilder.getFragment(self).element - - def testSerializer(self, element): - from bs4 import BeautifulSoup - rv = [] - doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$') - - def serializeElement(element, indent=0): - if isinstance(element, BeautifulSoup): - pass - if isinstance(element, Doctype): - m = doctype_re.match(element) - if m: - name = m.group(1) - if m.lastindex > 1: - publicId = m.group(2) or "" - systemId = m.group(3) or m.group(4) or "" - rv.append("""|%s""" % - (' ' * indent, name, publicId, systemId)) - else: - rv.append("|%s" % (' ' * indent, name)) - else: - rv.append("|%s" % (' ' * indent,)) - elif isinstance(element, Comment): - rv.append("|%s" % (' ' * indent, element)) - elif isinstance(element, NavigableString): - rv.append("|%s\"%s\"" % (' ' * indent, element)) - else: - if element.namespace: - name = "%s %s" % (prefixes[element.namespace], - element.name) - else: - name = element.name - rv.append("|%s<%s>" % (' ' * indent, name)) - if element.attrs: - attributes = [] - for name, value in element.attrs.items(): - if isinstance(name, NamespacedAttribute): - name = "%s %s" % (prefixes[name.namespace], name.name) - if isinstance(value, list): - value = " ".join(value) - attributes.append((name, value)) - - for name, value in sorted(attributes): - rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) - indent += 2 - for child in element.children: - serializeElement(child, indent) - serializeElement(element, 0) - - return "\n".join(rv) - -class AttrList(object): - def __init__(self, element): - self.element = element - self.attrs = dict(self.element.attrs) - def __iter__(self): - return list(self.attrs.items()).__iter__() - def __setitem__(self, name, value): - # If this attribute is a multi-valued attribute for this element, - # turn its value into a list. - list_attr = HTML5TreeBuilder.cdata_list_attributes - if (name in list_attr['*'] - or (self.element.name in list_attr - and name in list_attr[self.element.name])): - # A node that is being cloned may have already undergone - # this procedure. - if not isinstance(value, list): - value = whitespace_re.split(value) - self.element[name] = value - def items(self): - return list(self.attrs.items()) - def keys(self): - return list(self.attrs.keys()) - def __len__(self): - return len(self.attrs) - def __getitem__(self, name): - return self.attrs[name] - def __contains__(self, name): - return name in list(self.attrs.keys()) - - -class Element(treebuilder_base.Node): - def __init__(self, element, soup, namespace): - treebuilder_base.Node.__init__(self, element.name) - self.element = element - self.soup = soup - self.namespace = namespace - - def appendChild(self, node): - string_child = child = None - if isinstance(node, basestring): - # Some other piece of code decided to pass in a string - # instead of creating a TextElement object to contain the - # string. - string_child = child = node - elif isinstance(node, Tag): - # Some other piece of code decided to pass in a Tag - # instead of creating an Element object to contain the - # Tag. - child = node - elif node.element.__class__ == NavigableString: - string_child = child = node.element - node.parent = self - else: - child = node.element - node.parent = self - - if not isinstance(child, basestring) and child.parent is not None: - node.element.extract() - - if (string_child and self.element.contents - and self.element.contents[-1].__class__ == NavigableString): - # We are appending a string onto another string. - # TODO This has O(n^2) performance, for input like - # "aaa..." - old_element = self.element.contents[-1] - new_element = self.soup.new_string(old_element + string_child) - old_element.replace_with(new_element) - self.soup._most_recent_element = new_element - else: - if isinstance(node, basestring): - # Create a brand new NavigableString from this string. - child = self.soup.new_string(node) - - # Tell Beautiful Soup to act as if it parsed this element - # immediately after the parent's last descendant. (Or - # immediately after the parent, if it has no children.) - if self.element.contents: - most_recent_element = self.element._last_descendant(False) - elif self.element.next_element is not None: - # Something from further ahead in the parse tree is - # being inserted into this earlier element. This is - # very annoying because it means an expensive search - # for the last element in the tree. - most_recent_element = self.soup._last_descendant() - else: - most_recent_element = self.element - - self.soup.object_was_parsed( - child, parent=self.element, - most_recent_element=most_recent_element) - - def getAttributes(self): - if isinstance(self.element, Comment): - return {} - return AttrList(self.element) - - def setAttributes(self, attributes): - - if attributes is not None and len(attributes) > 0: - - converted_attributes = [] - for name, value in list(attributes.items()): - if isinstance(name, tuple): - new_name = NamespacedAttribute(*name) - del attributes[name] - attributes[new_name] = value - - self.soup.builder._replace_cdata_list_attribute_values( - self.name, attributes) - for name, value in attributes.items(): - self.element[name] = value - - # The attributes may contain variables that need substitution. - # Call set_up_substitutions manually. - # - # The Tag constructor called this method when the Tag was created, - # but we just set/changed the attributes, so call it again. - self.soup.builder.set_up_substitutions(self.element) - attributes = property(getAttributes, setAttributes) - - def insertText(self, data, insertBefore=None): - text = TextNode(self.soup.new_string(data), self.soup) - if insertBefore: - self.insertBefore(text, insertBefore) - else: - self.appendChild(text) - - def insertBefore(self, node, refNode): - index = self.element.index(refNode.element) - if (node.element.__class__ == NavigableString and self.element.contents - and self.element.contents[index-1].__class__ == NavigableString): - # (See comments in appendChild) - old_node = self.element.contents[index-1] - new_str = self.soup.new_string(old_node + node.element) - old_node.replace_with(new_str) - else: - self.element.insert(index, node.element) - node.parent = self - - def removeChild(self, node): - node.element.extract() - - def reparentChildren(self, new_parent): - """Move all of this tag's children into another tag.""" - # print "MOVE", self.element.contents - # print "FROM", self.element - # print "TO", new_parent.element - - element = self.element - new_parent_element = new_parent.element - # Determine what this tag's next_element will be once all the children - # are removed. - final_next_element = element.next_sibling - - new_parents_last_descendant = new_parent_element._last_descendant(False, False) - if len(new_parent_element.contents) > 0: - # The new parent already contains children. We will be - # appending this tag's children to the end. - new_parents_last_child = new_parent_element.contents[-1] - new_parents_last_descendant_next_element = new_parents_last_descendant.next_element - else: - # The new parent contains no children. - new_parents_last_child = None - new_parents_last_descendant_next_element = new_parent_element.next_element - - to_append = element.contents - if len(to_append) > 0: - # Set the first child's previous_element and previous_sibling - # to elements within the new parent - first_child = to_append[0] - if new_parents_last_descendant: - first_child.previous_element = new_parents_last_descendant - else: - first_child.previous_element = new_parent_element - first_child.previous_sibling = new_parents_last_child - if new_parents_last_descendant: - new_parents_last_descendant.next_element = first_child - else: - new_parent_element.next_element = first_child - if new_parents_last_child: - new_parents_last_child.next_sibling = first_child - - # Find the very last element being moved. It is now the - # parent's last descendant. It has no .next_sibling and - # its .next_element is whatever the previous last - # descendant had. - last_childs_last_descendant = to_append[-1]._last_descendant(False, True) - - last_childs_last_descendant.next_element = new_parents_last_descendant_next_element - if new_parents_last_descendant_next_element: - # TODO: This code has no test coverage and I'm not sure - # how to get html5lib to go through this path, but it's - # just the other side of the previous line. - new_parents_last_descendant_next_element.previous_element = last_childs_last_descendant - last_childs_last_descendant.next_sibling = None - - for child in to_append: - child.parent = new_parent_element - new_parent_element.contents.append(child) - - # Now that this element has no children, change its .next_element. - element.contents = [] - element.next_element = final_next_element - - # print "DONE WITH MOVE" - # print "FROM", self.element - # print "TO", new_parent_element - - def cloneNode(self): - tag = self.soup.new_tag(self.element.name, self.namespace) - node = Element(tag, self.soup, self.namespace) - for key,value in self.attributes: - node.attributes[key] = value - return node - - def hasContent(self): - return self.element.contents - - def getNameTuple(self): - if self.namespace == None: - return namespaces["html"], self.name - else: - return self.namespace, self.name - - nameTuple = property(getNameTuple) - -class TextNode(Element): - def __init__(self, element, soup): - treebuilder_base.Node.__init__(self, None) - self.element = element - self.soup = soup - - def cloneNode(self): - raise NotImplementedError diff --git a/lib/bs4/builder/_htmlparser.py b/lib/bs4/builder/_htmlparser.py deleted file mode 100644 index 67890b3a..00000000 --- a/lib/bs4/builder/_htmlparser.py +++ /dev/null @@ -1,314 +0,0 @@ -"""Use the HTMLParser library to parse HTML files that aren't too bad.""" - -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -__all__ = [ - 'HTMLParserTreeBuilder', - ] - -from HTMLParser import HTMLParser - -try: - from HTMLParser import HTMLParseError -except ImportError, e: - # HTMLParseError is removed in Python 3.5. Since it can never be - # thrown in 3.5, we can just define our own class as a placeholder. - class HTMLParseError(Exception): - pass - -import sys -import warnings - -# Starting in Python 3.2, the HTMLParser constructor takes a 'strict' -# argument, which we'd like to set to False. Unfortunately, -# http://bugs.python.org/issue13273 makes strict=True a better bet -# before Python 3.2.3. -# -# At the end of this file, we monkeypatch HTMLParser so that -# strict=True works well on Python 3.2.2. -major, minor, release = sys.version_info[:3] -CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3 -CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3 -CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4 - - -from bs4.element import ( - CData, - Comment, - Declaration, - Doctype, - ProcessingInstruction, - ) -from bs4.dammit import EntitySubstitution, UnicodeDammit - -from bs4.builder import ( - HTML, - HTMLTreeBuilder, - STRICT, - ) - - -HTMLPARSER = 'html.parser' - -class BeautifulSoupHTMLParser(HTMLParser): - - def __init__(self, *args, **kwargs): - HTMLParser.__init__(self, *args, **kwargs) - - # Keep a list of empty-element tags that were encountered - # without an explicit closing tag. If we encounter a closing tag - # of this type, we'll associate it with one of those entries. - # - # This isn't a stack because we don't care about the - # order. It's a list of closing tags we've already handled and - # will ignore, assuming they ever show up. - self.already_closed_empty_element = [] - - def handle_startendtag(self, name, attrs): - # This is only called when the markup looks like - # . - - # is_startend() tells handle_starttag not to close the tag - # just because its name matches a known empty-element tag. We - # know that this is an empty-element tag and we want to call - # handle_endtag ourselves. - tag = self.handle_starttag(name, attrs, handle_empty_element=False) - self.handle_endtag(name) - - def handle_starttag(self, name, attrs, handle_empty_element=True): - # XXX namespace - attr_dict = {} - for key, value in attrs: - # Change None attribute values to the empty string - # for consistency with the other tree builders. - if value is None: - value = '' - attr_dict[key] = value - attrvalue = '""' - #print "START", name - tag = self.soup.handle_starttag(name, None, None, attr_dict) - if tag and tag.is_empty_element and handle_empty_element: - # Unlike other parsers, html.parser doesn't send separate end tag - # events for empty-element tags. (It's handled in - # handle_startendtag, but only if the original markup looked like - # .) - # - # So we need to call handle_endtag() ourselves. Since we - # know the start event is identical to the end event, we - # don't want handle_endtag() to cross off any previous end - # events for tags of this name. - self.handle_endtag(name, check_already_closed=False) - - # But we might encounter an explicit closing tag for this tag - # later on. If so, we want to ignore it. - self.already_closed_empty_element.append(name) - - def handle_endtag(self, name, check_already_closed=True): - #print "END", name - if check_already_closed and name in self.already_closed_empty_element: - # This is a redundant end tag for an empty-element tag. - # We've already called handle_endtag() for it, so just - # check it off the list. - # print "ALREADY CLOSED", name - self.already_closed_empty_element.remove(name) - else: - self.soup.handle_endtag(name) - - def handle_data(self, data): - self.soup.handle_data(data) - - def handle_charref(self, name): - # XXX workaround for a bug in HTMLParser. Remove this once - # it's fixed in all supported versions. - # http://bugs.python.org/issue13633 - if name.startswith('x'): - real_name = int(name.lstrip('x'), 16) - elif name.startswith('X'): - real_name = int(name.lstrip('X'), 16) - else: - real_name = int(name) - - try: - data = unichr(real_name) - except (ValueError, OverflowError), e: - data = u"\N{REPLACEMENT CHARACTER}" - - self.handle_data(data) - - def handle_entityref(self, name): - character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name) - if character is not None: - data = character - else: - data = "&%s;" % name - self.handle_data(data) - - def handle_comment(self, data): - self.soup.endData() - self.soup.handle_data(data) - self.soup.endData(Comment) - - def handle_decl(self, data): - self.soup.endData() - if data.startswith("DOCTYPE "): - data = data[len("DOCTYPE "):] - elif data == 'DOCTYPE': - # i.e. "" - data = '' - self.soup.handle_data(data) - self.soup.endData(Doctype) - - def unknown_decl(self, data): - if data.upper().startswith('CDATA['): - cls = CData - data = data[len('CDATA['):] - else: - cls = Declaration - self.soup.endData() - self.soup.handle_data(data) - self.soup.endData(cls) - - def handle_pi(self, data): - self.soup.endData() - self.soup.handle_data(data) - self.soup.endData(ProcessingInstruction) - - -class HTMLParserTreeBuilder(HTMLTreeBuilder): - - is_xml = False - picklable = True - NAME = HTMLPARSER - features = [NAME, HTML, STRICT] - - def __init__(self, *args, **kwargs): - if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED: - kwargs['strict'] = False - if CONSTRUCTOR_TAKES_CONVERT_CHARREFS: - kwargs['convert_charrefs'] = False - self.parser_args = (args, kwargs) - - def prepare_markup(self, markup, user_specified_encoding=None, - document_declared_encoding=None, exclude_encodings=None): - """ - :return: A 4-tuple (markup, original encoding, encoding - declared within markup, whether any characters had to be - replaced with REPLACEMENT CHARACTER). - """ - if isinstance(markup, unicode): - yield (markup, None, None, False) - return - - try_encodings = [user_specified_encoding, document_declared_encoding] - dammit = UnicodeDammit(markup, try_encodings, is_html=True, - exclude_encodings=exclude_encodings) - yield (dammit.markup, dammit.original_encoding, - dammit.declared_html_encoding, - dammit.contains_replacement_characters) - - def feed(self, markup): - args, kwargs = self.parser_args - parser = BeautifulSoupHTMLParser(*args, **kwargs) - parser.soup = self.soup - try: - parser.feed(markup) - except HTMLParseError, e: - warnings.warn(RuntimeWarning( - "Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help.")) - raise e - parser.already_closed_empty_element = [] - -# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some -# 3.2.3 code. This ensures they don't treat markup like

as a -# string. -# -# XXX This code can be removed once most Python 3 users are on 3.2.3. -if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT: - import re - attrfind_tolerant = re.compile( - r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*' - r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?') - HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant - - locatestarttagend = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name - (?:\s+ # whitespace before attribute name - (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name - (?:\s*=\s* # value indicator - (?:'[^']*' # LITA-enclosed value - |\"[^\"]*\" # LIT-enclosed value - |[^'\">\s]+ # bare value - ) - )? - ) - )* - \s* # trailing whitespace -""", re.VERBOSE) - BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend - - from html.parser import tagfind, attrfind - - def parse_starttag(self, i): - self.__starttag_text = None - endpos = self.check_for_whole_start_tag(i) - if endpos < 0: - return endpos - rawdata = self.rawdata - self.__starttag_text = rawdata[i:endpos] - - # Now parse the data between i+1 and j into a tag and attrs - attrs = [] - match = tagfind.match(rawdata, i+1) - assert match, 'unexpected call to parse_starttag()' - k = match.end() - self.lasttag = tag = rawdata[i+1:k].lower() - while k < endpos: - if self.strict: - m = attrfind.match(rawdata, k) - else: - m = attrfind_tolerant.match(rawdata, k) - if not m: - break - attrname, rest, attrvalue = m.group(1, 2, 3) - if not rest: - attrvalue = None - elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ - attrvalue[:1] == '"' == attrvalue[-1:]: - attrvalue = attrvalue[1:-1] - if attrvalue: - attrvalue = self.unescape(attrvalue) - attrs.append((attrname.lower(), attrvalue)) - k = m.end() - - end = rawdata[k:endpos].strip() - if end not in (">", "/>"): - lineno, offset = self.getpos() - if "\n" in self.__starttag_text: - lineno = lineno + self.__starttag_text.count("\n") - offset = len(self.__starttag_text) \ - - self.__starttag_text.rfind("\n") - else: - offset = offset + len(self.__starttag_text) - if self.strict: - self.error("junk characters in start tag: %r" - % (rawdata[k:endpos][:20],)) - self.handle_data(rawdata[i:endpos]) - return endpos - if end.endswith('/>'): - # XHTML-style empty tag: - self.handle_startendtag(tag, attrs) - else: - self.handle_starttag(tag, attrs) - if tag in self.CDATA_CONTENT_ELEMENTS: - self.set_cdata_mode(tag) - return endpos - - def set_cdata_mode(self, elem): - self.cdata_elem = elem.lower() - self.interesting = re.compile(r'' % self.cdata_elem, re.I) - - BeautifulSoupHTMLParser.parse_starttag = parse_starttag - BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode - - CONSTRUCTOR_TAKES_STRICT = True diff --git a/lib/bs4/builder/_lxml.py b/lib/bs4/builder/_lxml.py deleted file mode 100644 index d2ca2872..00000000 --- a/lib/bs4/builder/_lxml.py +++ /dev/null @@ -1,258 +0,0 @@ -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -__all__ = [ - 'LXMLTreeBuilderForXML', - 'LXMLTreeBuilder', - ] - -from io import BytesIO -from StringIO import StringIO -import collections -from lxml import etree -from bs4.element import ( - Comment, - Doctype, - NamespacedAttribute, - ProcessingInstruction, - XMLProcessingInstruction, -) -from bs4.builder import ( - FAST, - HTML, - HTMLTreeBuilder, - PERMISSIVE, - ParserRejectedMarkup, - TreeBuilder, - XML) -from bs4.dammit import EncodingDetector - -LXML = 'lxml' - -class LXMLTreeBuilderForXML(TreeBuilder): - DEFAULT_PARSER_CLASS = etree.XMLParser - - is_xml = True - processing_instruction_class = XMLProcessingInstruction - - NAME = "lxml-xml" - ALTERNATE_NAMES = ["xml"] - - # Well, it's permissive by XML parser standards. - features = [NAME, LXML, XML, FAST, PERMISSIVE] - - CHUNK_SIZE = 512 - - # This namespace mapping is specified in the XML Namespace - # standard. - DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"} - - def default_parser(self, encoding): - # This can either return a parser object or a class, which - # will be instantiated with default arguments. - if self._default_parser is not None: - return self._default_parser - return etree.XMLParser( - target=self, strip_cdata=False, recover=True, encoding=encoding) - - def parser_for(self, encoding): - # Use the default parser. - parser = self.default_parser(encoding) - - if isinstance(parser, collections.Callable): - # Instantiate the parser with default arguments - parser = parser(target=self, strip_cdata=False, encoding=encoding) - return parser - - def __init__(self, parser=None, empty_element_tags=None): - # TODO: Issue a warning if parser is present but not a - # callable, since that means there's no way to create new - # parsers for different encodings. - self._default_parser = parser - if empty_element_tags is not None: - self.empty_element_tags = set(empty_element_tags) - self.soup = None - self.nsmaps = [self.DEFAULT_NSMAPS] - - def _getNsTag(self, tag): - # Split the namespace URL out of a fully-qualified lxml tag - # name. Copied from lxml's src/lxml/sax.py. - if tag[0] == '{': - return tuple(tag[1:].split('}', 1)) - else: - return (None, tag) - - def prepare_markup(self, markup, user_specified_encoding=None, - exclude_encodings=None, - document_declared_encoding=None): - """ - :yield: A series of 4-tuples. - (markup, encoding, declared encoding, - has undergone character replacement) - - Each 4-tuple represents a strategy for parsing the document. - """ - # Instead of using UnicodeDammit to convert the bytestring to - # Unicode using different encodings, use EncodingDetector to - # iterate over the encodings, and tell lxml to try to parse - # the document as each one in turn. - is_html = not self.is_xml - if is_html: - self.processing_instruction_class = ProcessingInstruction - else: - self.processing_instruction_class = XMLProcessingInstruction - - if isinstance(markup, unicode): - # We were given Unicode. Maybe lxml can parse Unicode on - # this system? - yield markup, None, document_declared_encoding, False - - if isinstance(markup, unicode): - # No, apparently not. Convert the Unicode to UTF-8 and - # tell lxml to parse it as UTF-8. - yield (markup.encode("utf8"), "utf8", - document_declared_encoding, False) - - try_encodings = [user_specified_encoding, document_declared_encoding] - detector = EncodingDetector( - markup, try_encodings, is_html, exclude_encodings) - for encoding in detector.encodings: - yield (detector.markup, encoding, document_declared_encoding, False) - - def feed(self, markup): - if isinstance(markup, bytes): - markup = BytesIO(markup) - elif isinstance(markup, unicode): - markup = StringIO(markup) - - # Call feed() at least once, even if the markup is empty, - # or the parser won't be initialized. - data = markup.read(self.CHUNK_SIZE) - try: - self.parser = self.parser_for(self.soup.original_encoding) - self.parser.feed(data) - while len(data) != 0: - # Now call feed() on the rest of the data, chunk by chunk. - data = markup.read(self.CHUNK_SIZE) - if len(data) != 0: - self.parser.feed(data) - self.parser.close() - except (UnicodeDecodeError, LookupError, etree.ParserError), e: - raise ParserRejectedMarkup(str(e)) - - def close(self): - self.nsmaps = [self.DEFAULT_NSMAPS] - - def start(self, name, attrs, nsmap={}): - # Make sure attrs is a mutable dict--lxml may send an immutable dictproxy. - attrs = dict(attrs) - nsprefix = None - # Invert each namespace map as it comes in. - if len(self.nsmaps) > 1: - # There are no new namespaces for this tag, but - # non-default namespaces are in play, so we need a - # separate tag stack to know when they end. - self.nsmaps.append(None) - elif len(nsmap) > 0: - # A new namespace mapping has come into play. - inverted_nsmap = dict((value, key) for key, value in nsmap.items()) - self.nsmaps.append(inverted_nsmap) - # Also treat the namespace mapping as a set of attributes on the - # tag, so we can recreate it later. - attrs = attrs.copy() - for prefix, namespace in nsmap.items(): - attribute = NamespacedAttribute( - "xmlns", prefix, "http://www.w3.org/2000/xmlns/") - attrs[attribute] = namespace - - # Namespaces are in play. Find any attributes that came in - # from lxml with namespaces attached to their names, and - # turn then into NamespacedAttribute objects. - new_attrs = {} - for attr, value in attrs.items(): - namespace, attr = self._getNsTag(attr) - if namespace is None: - new_attrs[attr] = value - else: - nsprefix = self._prefix_for_namespace(namespace) - attr = NamespacedAttribute(nsprefix, attr, namespace) - new_attrs[attr] = value - attrs = new_attrs - - namespace, name = self._getNsTag(name) - nsprefix = self._prefix_for_namespace(namespace) - self.soup.handle_starttag(name, namespace, nsprefix, attrs) - - def _prefix_for_namespace(self, namespace): - """Find the currently active prefix for the given namespace.""" - if namespace is None: - return None - for inverted_nsmap in reversed(self.nsmaps): - if inverted_nsmap is not None and namespace in inverted_nsmap: - return inverted_nsmap[namespace] - return None - - def end(self, name): - self.soup.endData() - completed_tag = self.soup.tagStack[-1] - namespace, name = self._getNsTag(name) - nsprefix = None - if namespace is not None: - for inverted_nsmap in reversed(self.nsmaps): - if inverted_nsmap is not None and namespace in inverted_nsmap: - nsprefix = inverted_nsmap[namespace] - break - self.soup.handle_endtag(name, nsprefix) - if len(self.nsmaps) > 1: - # This tag, or one of its parents, introduced a namespace - # mapping, so pop it off the stack. - self.nsmaps.pop() - - def pi(self, target, data): - self.soup.endData() - self.soup.handle_data(target + ' ' + data) - self.soup.endData(self.processing_instruction_class) - - def data(self, content): - self.soup.handle_data(content) - - def doctype(self, name, pubid, system): - self.soup.endData() - doctype = Doctype.for_name_and_ids(name, pubid, system) - self.soup.object_was_parsed(doctype) - - def comment(self, content): - "Handle comments as Comment objects." - self.soup.endData() - self.soup.handle_data(content) - self.soup.endData(Comment) - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return u'\n%s' % fragment - - -class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): - - NAME = LXML - ALTERNATE_NAMES = ["lxml-html"] - - features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE] - is_xml = False - processing_instruction_class = ProcessingInstruction - - def default_parser(self, encoding): - return etree.HTMLParser - - def feed(self, markup): - encoding = self.soup.original_encoding - try: - self.parser = self.parser_for(encoding) - self.parser.feed(markup) - self.parser.close() - except (UnicodeDecodeError, LookupError, etree.ParserError), e: - raise ParserRejectedMarkup(str(e)) - - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return u'%s' % fragment diff --git a/lib/bs4/dammit.py b/lib/bs4/dammit.py deleted file mode 100644 index 7965565f..00000000 --- a/lib/bs4/dammit.py +++ /dev/null @@ -1,842 +0,0 @@ -# -*- coding: utf-8 -*- -"""Beautiful Soup bonus library: Unicode, Dammit - -This library converts a bytestream to Unicode through any means -necessary. It is heavily based on code from Mark Pilgrim's Universal -Feed Parser. It works best on XML and HTML, but it does not rewrite the -XML or HTML to reflect a new encoding; that's the tree builder's job. -""" -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -__license__ = "MIT" - -import codecs -from htmlentitydefs import codepoint2name -import re -import logging -import string - -# Import a library to autodetect character encodings. -chardet_type = None -try: - # First try the fast C implementation. - # PyPI package: cchardet - import cchardet - def chardet_dammit(s): - return cchardet.detect(s)['encoding'] -except ImportError: - try: - # Fall back to the pure Python implementation - # Debian package: python-chardet - # PyPI package: chardet - import chardet - def chardet_dammit(s): - return chardet.detect(s)['encoding'] - #import chardet.constants - #chardet.constants._debug = 1 - except ImportError: - # No chardet available. - def chardet_dammit(s): - return None - -# Available from http://cjkpython.i18n.org/. -try: - import iconv_codec -except ImportError: - pass - -xml_encoding_re = re.compile( - '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I) -html_meta_re = re.compile( - '<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I) - -class EntitySubstitution(object): - - """Substitute XML or HTML entities for the corresponding characters.""" - - def _populate_class_variables(): - lookup = {} - reverse_lookup = {} - characters_for_re = [] - for codepoint, name in list(codepoint2name.items()): - character = unichr(codepoint) - if codepoint != 34: - # There's no point in turning the quotation mark into - # ", unless it happens within an attribute value, which - # is handled elsewhere. - characters_for_re.append(character) - lookup[character] = name - # But we do want to turn " into the quotation mark. - reverse_lookup[name] = character - re_definition = "[%s]" % "".join(characters_for_re) - return lookup, reverse_lookup, re.compile(re_definition) - (CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER, - CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables() - - CHARACTER_TO_XML_ENTITY = { - "'": "apos", - '"': "quot", - "&": "amp", - "<": "lt", - ">": "gt", - } - - BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" - "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" - ")") - - AMPERSAND_OR_BRACKET = re.compile("([<>&])") - - @classmethod - def _substitute_html_entity(cls, matchobj): - entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0)) - return "&%s;" % entity - - @classmethod - def _substitute_xml_entity(cls, matchobj): - """Used with a regular expression to substitute the - appropriate XML entity for an XML special character.""" - entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)] - return "&%s;" % entity - - @classmethod - def quoted_attribute_value(self, value): - """Make a value into a quoted XML attribute, possibly escaping it. - - Most strings will be quoted using double quotes. - - Bob's Bar -> "Bob's Bar" - - If a string contains double quotes, it will be quoted using - single quotes. - - Welcome to "my bar" -> 'Welcome to "my bar"' - - If a string contains both single and double quotes, the - double quotes will be escaped, and the string will be quoted - using double quotes. - - Welcome to "Bob's Bar" -> "Welcome to "Bob's bar" - """ - quote_with = '"' - if '"' in value: - if "'" in value: - # The string contains both single and double - # quotes. Turn the double quotes into - # entities. We quote the double quotes rather than - # the single quotes because the entity name is - # """ whether this is HTML or XML. If we - # quoted the single quotes, we'd have to decide - # between ' and &squot;. - replace_with = """ - value = value.replace('"', replace_with) - else: - # There are double quotes but no single quotes. - # We can use single quotes to quote the attribute. - quote_with = "'" - return quote_with + value + quote_with - - @classmethod - def substitute_xml(cls, value, make_quoted_attribute=False): - """Substitute XML entities for special XML characters. - - :param value: A string to be substituted. The less-than sign - will become <, the greater-than sign will become >, - and any ampersands will become &. If you want ampersands - that appear to be part of an entity definition to be left - alone, use substitute_xml_containing_entities() instead. - - :param make_quoted_attribute: If True, then the string will be - quoted, as befits an attribute value. - """ - # Escape angle brackets and ampersands. - value = cls.AMPERSAND_OR_BRACKET.sub( - cls._substitute_xml_entity, value) - - if make_quoted_attribute: - value = cls.quoted_attribute_value(value) - return value - - @classmethod - def substitute_xml_containing_entities( - cls, value, make_quoted_attribute=False): - """Substitute XML entities for special XML characters. - - :param value: A string to be substituted. The less-than sign will - become <, the greater-than sign will become >, and any - ampersands that are not part of an entity defition will - become &. - - :param make_quoted_attribute: If True, then the string will be - quoted, as befits an attribute value. - """ - # Escape angle brackets, and ampersands that aren't part of - # entities. - value = cls.BARE_AMPERSAND_OR_BRACKET.sub( - cls._substitute_xml_entity, value) - - if make_quoted_attribute: - value = cls.quoted_attribute_value(value) - return value - - @classmethod - def substitute_html(cls, s): - """Replace certain Unicode characters with named HTML entities. - - This differs from data.encode(encoding, 'xmlcharrefreplace') - in that the goal is to make the result more readable (to those - with ASCII displays) rather than to recover from - errors. There's absolutely nothing wrong with a UTF-8 string - containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that - character with "é" will make it more readable to some - people. - """ - return cls.CHARACTER_TO_HTML_ENTITY_RE.sub( - cls._substitute_html_entity, s) - - -class EncodingDetector: - """Suggests a number of possible encodings for a bytestring. - - Order of precedence: - - 1. Encodings you specifically tell EncodingDetector to try first - (the override_encodings argument to the constructor). - - 2. An encoding declared within the bytestring itself, either in an - XML declaration (if the bytestring is to be interpreted as an XML - document), or in a tag (if the bytestring is to be - interpreted as an HTML document.) - - 3. An encoding detected through textual analysis by chardet, - cchardet, or a similar external library. - - 4. UTF-8. - - 5. Windows-1252. - """ - def __init__(self, markup, override_encodings=None, is_html=False, - exclude_encodings=None): - self.override_encodings = override_encodings or [] - exclude_encodings = exclude_encodings or [] - self.exclude_encodings = set([x.lower() for x in exclude_encodings]) - self.chardet_encoding = None - self.is_html = is_html - self.declared_encoding = None - - # First order of business: strip a byte-order mark. - self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup) - - def _usable(self, encoding, tried): - if encoding is not None: - encoding = encoding.lower() - if encoding in self.exclude_encodings: - return False - if encoding not in tried: - tried.add(encoding) - return True - return False - - @property - def encodings(self): - """Yield a number of encodings that might work for this markup.""" - tried = set() - for e in self.override_encodings: - if self._usable(e, tried): - yield e - - # Did the document originally start with a byte-order mark - # that indicated its encoding? - if self._usable(self.sniffed_encoding, tried): - yield self.sniffed_encoding - - # Look within the document for an XML or HTML encoding - # declaration. - if self.declared_encoding is None: - self.declared_encoding = self.find_declared_encoding( - self.markup, self.is_html) - if self._usable(self.declared_encoding, tried): - yield self.declared_encoding - - # Use third-party character set detection to guess at the - # encoding. - if self.chardet_encoding is None: - self.chardet_encoding = chardet_dammit(self.markup) - if self._usable(self.chardet_encoding, tried): - yield self.chardet_encoding - - # As a last-ditch effort, try utf-8 and windows-1252. - for e in ('utf-8', 'windows-1252'): - if self._usable(e, tried): - yield e - - @classmethod - def strip_byte_order_mark(cls, data): - """If a byte-order mark is present, strip it and return the encoding it implies.""" - encoding = None - if isinstance(data, unicode): - # Unicode data cannot have a byte-order mark. - return data, encoding - if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \ - and (data[2:4] != '\x00\x00'): - encoding = 'utf-16be' - data = data[2:] - elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \ - and (data[2:4] != '\x00\x00'): - encoding = 'utf-16le' - data = data[2:] - elif data[:3] == b'\xef\xbb\xbf': - encoding = 'utf-8' - data = data[3:] - elif data[:4] == b'\x00\x00\xfe\xff': - encoding = 'utf-32be' - data = data[4:] - elif data[:4] == b'\xff\xfe\x00\x00': - encoding = 'utf-32le' - data = data[4:] - return data, encoding - - @classmethod - def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False): - """Given a document, tries to find its declared encoding. - - An XML encoding is declared at the beginning of the document. - - An HTML encoding is declared in a tag, hopefully near the - beginning of the document. - """ - if search_entire_document: - xml_endpos = html_endpos = len(markup) - else: - xml_endpos = 1024 - html_endpos = max(2048, int(len(markup) * 0.05)) - - declared_encoding = None - declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos) - if not declared_encoding_match and is_html: - declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos) - if declared_encoding_match is not None: - declared_encoding = declared_encoding_match.groups()[0].decode( - 'ascii', 'replace') - if declared_encoding: - return declared_encoding.lower() - return None - -class UnicodeDammit: - """A class for detecting the encoding of a *ML document and - converting it to a Unicode string. If the source encoding is - windows-1252, can replace MS smart quotes with their HTML or XML - equivalents.""" - - # This dictionary maps commonly seen values for "charset" in HTML - # meta tags to the corresponding Python codec names. It only covers - # values that aren't in Python's aliases and can't be determined - # by the heuristics in find_codec. - CHARSET_ALIASES = {"macintosh": "mac-roman", - "x-sjis": "shift-jis"} - - ENCODINGS_WITH_SMART_QUOTES = [ - "windows-1252", - "iso-8859-1", - "iso-8859-2", - ] - - def __init__(self, markup, override_encodings=[], - smart_quotes_to=None, is_html=False, exclude_encodings=[]): - self.smart_quotes_to = smart_quotes_to - self.tried_encodings = [] - self.contains_replacement_characters = False - self.is_html = is_html - self.log = logging.getLogger(__name__) - self.detector = EncodingDetector( - markup, override_encodings, is_html, exclude_encodings) - - # Short-circuit if the data is in Unicode to begin with. - if isinstance(markup, unicode) or markup == '': - self.markup = markup - self.unicode_markup = unicode(markup) - self.original_encoding = None - return - - # The encoding detector may have stripped a byte-order mark. - # Use the stripped markup from this point on. - self.markup = self.detector.markup - - u = None - for encoding in self.detector.encodings: - markup = self.detector.markup - u = self._convert_from(encoding) - if u is not None: - break - - if not u: - # None of the encodings worked. As an absolute last resort, - # try them again with character replacement. - - for encoding in self.detector.encodings: - if encoding != "ascii": - u = self._convert_from(encoding, "replace") - if u is not None: - self.log.warning( - "Some characters could not be decoded, and were " - "replaced with REPLACEMENT CHARACTER." - ) - self.contains_replacement_characters = True - break - - # If none of that worked, we could at this point force it to - # ASCII, but that would destroy so much data that I think - # giving up is better. - self.unicode_markup = u - if not u: - self.original_encoding = None - - def _sub_ms_char(self, match): - """Changes a MS smart quote character to an XML or HTML - entity, or an ASCII character.""" - orig = match.group(1) - if self.smart_quotes_to == 'ascii': - sub = self.MS_CHARS_TO_ASCII.get(orig).encode() - else: - sub = self.MS_CHARS.get(orig) - if type(sub) == tuple: - if self.smart_quotes_to == 'xml': - sub = '&#x'.encode() + sub[1].encode() + ';'.encode() - else: - sub = '&'.encode() + sub[0].encode() + ';'.encode() - else: - sub = sub.encode() - return sub - - def _convert_from(self, proposed, errors="strict"): - proposed = self.find_codec(proposed) - if not proposed or (proposed, errors) in self.tried_encodings: - return None - self.tried_encodings.append((proposed, errors)) - markup = self.markup - # Convert smart quotes to HTML if coming from an encoding - # that might have them. - if (self.smart_quotes_to is not None - and proposed in self.ENCODINGS_WITH_SMART_QUOTES): - smart_quotes_re = b"([\x80-\x9f])" - smart_quotes_compiled = re.compile(smart_quotes_re) - markup = smart_quotes_compiled.sub(self._sub_ms_char, markup) - - try: - #print "Trying to convert document to %s (errors=%s)" % ( - # proposed, errors) - u = self._to_unicode(markup, proposed, errors) - self.markup = u - self.original_encoding = proposed - except Exception as e: - #print "That didn't work!" - #print e - return None - #print "Correct encoding: %s" % proposed - return self.markup - - def _to_unicode(self, data, encoding, errors="strict"): - '''Given a string and its encoding, decodes the string into Unicode. - %encoding is a string recognized by encodings.aliases''' - return unicode(data, encoding, errors) - - @property - def declared_html_encoding(self): - if not self.is_html: - return None - return self.detector.declared_encoding - - def find_codec(self, charset): - value = (self._codec(self.CHARSET_ALIASES.get(charset, charset)) - or (charset and self._codec(charset.replace("-", ""))) - or (charset and self._codec(charset.replace("-", "_"))) - or (charset and charset.lower()) - or charset - ) - if value: - return value.lower() - return None - - def _codec(self, charset): - if not charset: - return charset - codec = None - try: - codecs.lookup(charset) - codec = charset - except (LookupError, ValueError): - pass - return codec - - - # A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities. - MS_CHARS = {b'\x80': ('euro', '20AC'), - b'\x81': ' ', - b'\x82': ('sbquo', '201A'), - b'\x83': ('fnof', '192'), - b'\x84': ('bdquo', '201E'), - b'\x85': ('hellip', '2026'), - b'\x86': ('dagger', '2020'), - b'\x87': ('Dagger', '2021'), - b'\x88': ('circ', '2C6'), - b'\x89': ('permil', '2030'), - b'\x8A': ('Scaron', '160'), - b'\x8B': ('lsaquo', '2039'), - b'\x8C': ('OElig', '152'), - b'\x8D': '?', - b'\x8E': ('#x17D', '17D'), - b'\x8F': '?', - b'\x90': '?', - b'\x91': ('lsquo', '2018'), - b'\x92': ('rsquo', '2019'), - b'\x93': ('ldquo', '201C'), - b'\x94': ('rdquo', '201D'), - b'\x95': ('bull', '2022'), - b'\x96': ('ndash', '2013'), - b'\x97': ('mdash', '2014'), - b'\x98': ('tilde', '2DC'), - b'\x99': ('trade', '2122'), - b'\x9a': ('scaron', '161'), - b'\x9b': ('rsaquo', '203A'), - b'\x9c': ('oelig', '153'), - b'\x9d': '?', - b'\x9e': ('#x17E', '17E'), - b'\x9f': ('Yuml', ''),} - - # A parochial partial mapping of ISO-Latin-1 to ASCII. Contains - # horrors like stripping diacritical marks to turn á into a, but also - # contains non-horrors like turning “ into ". - MS_CHARS_TO_ASCII = { - b'\x80' : 'EUR', - b'\x81' : ' ', - b'\x82' : ',', - b'\x83' : 'f', - b'\x84' : ',,', - b'\x85' : '...', - b'\x86' : '+', - b'\x87' : '++', - b'\x88' : '^', - b'\x89' : '%', - b'\x8a' : 'S', - b'\x8b' : '<', - b'\x8c' : 'OE', - b'\x8d' : '?', - b'\x8e' : 'Z', - b'\x8f' : '?', - b'\x90' : '?', - b'\x91' : "'", - b'\x92' : "'", - b'\x93' : '"', - b'\x94' : '"', - b'\x95' : '*', - b'\x96' : '-', - b'\x97' : '--', - b'\x98' : '~', - b'\x99' : '(TM)', - b'\x9a' : 's', - b'\x9b' : '>', - b'\x9c' : 'oe', - b'\x9d' : '?', - b'\x9e' : 'z', - b'\x9f' : 'Y', - b'\xa0' : ' ', - b'\xa1' : '!', - b'\xa2' : 'c', - b'\xa3' : 'GBP', - b'\xa4' : '$', #This approximation is especially parochial--this is the - #generic currency symbol. - b'\xa5' : 'YEN', - b'\xa6' : '|', - b'\xa7' : 'S', - b'\xa8' : '..', - b'\xa9' : '', - b'\xaa' : '(th)', - b'\xab' : '<<', - b'\xac' : '!', - b'\xad' : ' ', - b'\xae' : '(R)', - b'\xaf' : '-', - b'\xb0' : 'o', - b'\xb1' : '+-', - b'\xb2' : '2', - b'\xb3' : '3', - b'\xb4' : ("'", 'acute'), - b'\xb5' : 'u', - b'\xb6' : 'P', - b'\xb7' : '*', - b'\xb8' : ',', - b'\xb9' : '1', - b'\xba' : '(th)', - b'\xbb' : '>>', - b'\xbc' : '1/4', - b'\xbd' : '1/2', - b'\xbe' : '3/4', - b'\xbf' : '?', - b'\xc0' : 'A', - b'\xc1' : 'A', - b'\xc2' : 'A', - b'\xc3' : 'A', - b'\xc4' : 'A', - b'\xc5' : 'A', - b'\xc6' : 'AE', - b'\xc7' : 'C', - b'\xc8' : 'E', - b'\xc9' : 'E', - b'\xca' : 'E', - b'\xcb' : 'E', - b'\xcc' : 'I', - b'\xcd' : 'I', - b'\xce' : 'I', - b'\xcf' : 'I', - b'\xd0' : 'D', - b'\xd1' : 'N', - b'\xd2' : 'O', - b'\xd3' : 'O', - b'\xd4' : 'O', - b'\xd5' : 'O', - b'\xd6' : 'O', - b'\xd7' : '*', - b'\xd8' : 'O', - b'\xd9' : 'U', - b'\xda' : 'U', - b'\xdb' : 'U', - b'\xdc' : 'U', - b'\xdd' : 'Y', - b'\xde' : 'b', - b'\xdf' : 'B', - b'\xe0' : 'a', - b'\xe1' : 'a', - b'\xe2' : 'a', - b'\xe3' : 'a', - b'\xe4' : 'a', - b'\xe5' : 'a', - b'\xe6' : 'ae', - b'\xe7' : 'c', - b'\xe8' : 'e', - b'\xe9' : 'e', - b'\xea' : 'e', - b'\xeb' : 'e', - b'\xec' : 'i', - b'\xed' : 'i', - b'\xee' : 'i', - b'\xef' : 'i', - b'\xf0' : 'o', - b'\xf1' : 'n', - b'\xf2' : 'o', - b'\xf3' : 'o', - b'\xf4' : 'o', - b'\xf5' : 'o', - b'\xf6' : 'o', - b'\xf7' : '/', - b'\xf8' : 'o', - b'\xf9' : 'u', - b'\xfa' : 'u', - b'\xfb' : 'u', - b'\xfc' : 'u', - b'\xfd' : 'y', - b'\xfe' : 'b', - b'\xff' : 'y', - } - - # A map used when removing rogue Windows-1252/ISO-8859-1 - # characters in otherwise UTF-8 documents. - # - # Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in - # Windows-1252. - WINDOWS_1252_TO_UTF8 = { - 0x80 : b'\xe2\x82\xac', # € - 0x82 : b'\xe2\x80\x9a', # ‚ - 0x83 : b'\xc6\x92', # ƒ - 0x84 : b'\xe2\x80\x9e', # „ - 0x85 : b'\xe2\x80\xa6', # … - 0x86 : b'\xe2\x80\xa0', # † - 0x87 : b'\xe2\x80\xa1', # ‡ - 0x88 : b'\xcb\x86', # ˆ - 0x89 : b'\xe2\x80\xb0', # ‰ - 0x8a : b'\xc5\xa0', # Š - 0x8b : b'\xe2\x80\xb9', # ‹ - 0x8c : b'\xc5\x92', # Œ - 0x8e : b'\xc5\xbd', # Ž - 0x91 : b'\xe2\x80\x98', # ‘ - 0x92 : b'\xe2\x80\x99', # ’ - 0x93 : b'\xe2\x80\x9c', # “ - 0x94 : b'\xe2\x80\x9d', # ” - 0x95 : b'\xe2\x80\xa2', # • - 0x96 : b'\xe2\x80\x93', # – - 0x97 : b'\xe2\x80\x94', # — - 0x98 : b'\xcb\x9c', # ˜ - 0x99 : b'\xe2\x84\xa2', # ™ - 0x9a : b'\xc5\xa1', # š - 0x9b : b'\xe2\x80\xba', # › - 0x9c : b'\xc5\x93', # œ - 0x9e : b'\xc5\xbe', # ž - 0x9f : b'\xc5\xb8', # Ÿ - 0xa0 : b'\xc2\xa0', #   - 0xa1 : b'\xc2\xa1', # ¡ - 0xa2 : b'\xc2\xa2', # ¢ - 0xa3 : b'\xc2\xa3', # £ - 0xa4 : b'\xc2\xa4', # ¤ - 0xa5 : b'\xc2\xa5', # ¥ - 0xa6 : b'\xc2\xa6', # ¦ - 0xa7 : b'\xc2\xa7', # § - 0xa8 : b'\xc2\xa8', # ¨ - 0xa9 : b'\xc2\xa9', # © - 0xaa : b'\xc2\xaa', # ª - 0xab : b'\xc2\xab', # « - 0xac : b'\xc2\xac', # ¬ - 0xad : b'\xc2\xad', # ­ - 0xae : b'\xc2\xae', # ® - 0xaf : b'\xc2\xaf', # ¯ - 0xb0 : b'\xc2\xb0', # ° - 0xb1 : b'\xc2\xb1', # ± - 0xb2 : b'\xc2\xb2', # ² - 0xb3 : b'\xc2\xb3', # ³ - 0xb4 : b'\xc2\xb4', # ´ - 0xb5 : b'\xc2\xb5', # µ - 0xb6 : b'\xc2\xb6', # ¶ - 0xb7 : b'\xc2\xb7', # · - 0xb8 : b'\xc2\xb8', # ¸ - 0xb9 : b'\xc2\xb9', # ¹ - 0xba : b'\xc2\xba', # º - 0xbb : b'\xc2\xbb', # » - 0xbc : b'\xc2\xbc', # ¼ - 0xbd : b'\xc2\xbd', # ½ - 0xbe : b'\xc2\xbe', # ¾ - 0xbf : b'\xc2\xbf', # ¿ - 0xc0 : b'\xc3\x80', # À - 0xc1 : b'\xc3\x81', # Á - 0xc2 : b'\xc3\x82', #  - 0xc3 : b'\xc3\x83', # à - 0xc4 : b'\xc3\x84', # Ä - 0xc5 : b'\xc3\x85', # Å - 0xc6 : b'\xc3\x86', # Æ - 0xc7 : b'\xc3\x87', # Ç - 0xc8 : b'\xc3\x88', # È - 0xc9 : b'\xc3\x89', # É - 0xca : b'\xc3\x8a', # Ê - 0xcb : b'\xc3\x8b', # Ë - 0xcc : b'\xc3\x8c', # Ì - 0xcd : b'\xc3\x8d', # Í - 0xce : b'\xc3\x8e', # Î - 0xcf : b'\xc3\x8f', # Ï - 0xd0 : b'\xc3\x90', # Ð - 0xd1 : b'\xc3\x91', # Ñ - 0xd2 : b'\xc3\x92', # Ò - 0xd3 : b'\xc3\x93', # Ó - 0xd4 : b'\xc3\x94', # Ô - 0xd5 : b'\xc3\x95', # Õ - 0xd6 : b'\xc3\x96', # Ö - 0xd7 : b'\xc3\x97', # × - 0xd8 : b'\xc3\x98', # Ø - 0xd9 : b'\xc3\x99', # Ù - 0xda : b'\xc3\x9a', # Ú - 0xdb : b'\xc3\x9b', # Û - 0xdc : b'\xc3\x9c', # Ü - 0xdd : b'\xc3\x9d', # Ý - 0xde : b'\xc3\x9e', # Þ - 0xdf : b'\xc3\x9f', # ß - 0xe0 : b'\xc3\xa0', # à - 0xe1 : b'\xa1', # á - 0xe2 : b'\xc3\xa2', # â - 0xe3 : b'\xc3\xa3', # ã - 0xe4 : b'\xc3\xa4', # ä - 0xe5 : b'\xc3\xa5', # å - 0xe6 : b'\xc3\xa6', # æ - 0xe7 : b'\xc3\xa7', # ç - 0xe8 : b'\xc3\xa8', # è - 0xe9 : b'\xc3\xa9', # é - 0xea : b'\xc3\xaa', # ê - 0xeb : b'\xc3\xab', # ë - 0xec : b'\xc3\xac', # ì - 0xed : b'\xc3\xad', # í - 0xee : b'\xc3\xae', # î - 0xef : b'\xc3\xaf', # ï - 0xf0 : b'\xc3\xb0', # ð - 0xf1 : b'\xc3\xb1', # ñ - 0xf2 : b'\xc3\xb2', # ò - 0xf3 : b'\xc3\xb3', # ó - 0xf4 : b'\xc3\xb4', # ô - 0xf5 : b'\xc3\xb5', # õ - 0xf6 : b'\xc3\xb6', # ö - 0xf7 : b'\xc3\xb7', # ÷ - 0xf8 : b'\xc3\xb8', # ø - 0xf9 : b'\xc3\xb9', # ù - 0xfa : b'\xc3\xba', # ú - 0xfb : b'\xc3\xbb', # û - 0xfc : b'\xc3\xbc', # ü - 0xfd : b'\xc3\xbd', # ý - 0xfe : b'\xc3\xbe', # þ - } - - MULTIBYTE_MARKERS_AND_SIZES = [ - (0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF - (0xe0, 0xef, 3), # 3-byte characters start with E0-EF - (0xf0, 0xf4, 4), # 4-byte characters start with F0-F4 - ] - - FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0] - LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1] - - @classmethod - def detwingle(cls, in_bytes, main_encoding="utf8", - embedded_encoding="windows-1252"): - """Fix characters from one encoding embedded in some other encoding. - - Currently the only situation supported is Windows-1252 (or its - subset ISO-8859-1), embedded in UTF-8. - - The input must be a bytestring. If you've already converted - the document to Unicode, you're too late. - - The output is a bytestring in which `embedded_encoding` - characters have been converted to their `main_encoding` - equivalents. - """ - if embedded_encoding.replace('_', '-').lower() not in ( - 'windows-1252', 'windows_1252'): - raise NotImplementedError( - "Windows-1252 and ISO-8859-1 are the only currently supported " - "embedded encodings.") - - if main_encoding.lower() not in ('utf8', 'utf-8'): - raise NotImplementedError( - "UTF-8 is the only currently supported main encoding.") - - byte_chunks = [] - - chunk_start = 0 - pos = 0 - while pos < len(in_bytes): - byte = in_bytes[pos] - if not isinstance(byte, int): - # Python 2.x - byte = ord(byte) - if (byte >= cls.FIRST_MULTIBYTE_MARKER - and byte <= cls.LAST_MULTIBYTE_MARKER): - # This is the start of a UTF-8 multibyte character. Skip - # to the end. - for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES: - if byte >= start and byte <= end: - pos += size - break - elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8: - # We found a Windows-1252 character! - # Save the string up to this point as a chunk. - byte_chunks.append(in_bytes[chunk_start:pos]) - - # Now translate the Windows-1252 character into UTF-8 - # and add it as another, one-byte chunk. - byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte]) - pos += 1 - chunk_start = pos - else: - # Go on to the next character. - pos += 1 - if chunk_start == 0: - # The string is unchanged. - return in_bytes - else: - # Store the final chunk. - byte_chunks.append(in_bytes[chunk_start:]) - return b''.join(byte_chunks) - diff --git a/lib/bs4/diagnose.py b/lib/bs4/diagnose.py deleted file mode 100644 index 8768332f..00000000 --- a/lib/bs4/diagnose.py +++ /dev/null @@ -1,219 +0,0 @@ -"""Diagnostic functions, mainly for use when doing tech support.""" - -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -__license__ = "MIT" - -import cProfile -from StringIO import StringIO -from HTMLParser import HTMLParser -import bs4 -from bs4 import BeautifulSoup, __version__ -from bs4.builder import builder_registry - -import os -import pstats -import random -import tempfile -import time -import traceback -import sys -import cProfile - -def diagnose(data): - """Diagnostic suite for isolating common problems.""" - print "Diagnostic running on Beautiful Soup %s" % __version__ - print "Python version %s" % sys.version - - basic_parsers = ["html.parser", "html5lib", "lxml"] - for name in basic_parsers: - for builder in builder_registry.builders: - if name in builder.features: - break - else: - basic_parsers.remove(name) - print ( - "I noticed that %s is not installed. Installing it may help." % - name) - - if 'lxml' in basic_parsers: - basic_parsers.append(["lxml", "xml"]) - try: - from lxml import etree - print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)) - except ImportError, e: - print ( - "lxml is not installed or couldn't be imported.") - - - if 'html5lib' in basic_parsers: - try: - import html5lib - print "Found html5lib version %s" % html5lib.__version__ - except ImportError, e: - print ( - "html5lib is not installed or couldn't be imported.") - - if hasattr(data, 'read'): - data = data.read() - elif os.path.exists(data): - print '"%s" looks like a filename. Reading data from the file.' % data - with open(data) as fp: - data = fp.read() - elif data.startswith("http:") or data.startswith("https:"): - print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data - print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup." - return - print - - for parser in basic_parsers: - print "Trying to parse your markup with %s" % parser - success = False - try: - soup = BeautifulSoup(data, parser) - success = True - except Exception, e: - print "%s could not parse the markup." % parser - traceback.print_exc() - if success: - print "Here's what %s did with the markup:" % parser - print soup.prettify() - - print "-" * 80 - -def lxml_trace(data, html=True, **kwargs): - """Print out the lxml events that occur during parsing. - - This lets you see how lxml parses a document when no Beautiful - Soup code is running. - """ - from lxml import etree - for event, element in etree.iterparse(StringIO(data), html=html, **kwargs): - print("%s, %4s, %s" % (event, element.tag, element.text)) - -class AnnouncingParser(HTMLParser): - """Announces HTMLParser parse events, without doing anything else.""" - - def _p(self, s): - print(s) - - def handle_starttag(self, name, attrs): - self._p("%s START" % name) - - def handle_endtag(self, name): - self._p("%s END" % name) - - def handle_data(self, data): - self._p("%s DATA" % data) - - def handle_charref(self, name): - self._p("%s CHARREF" % name) - - def handle_entityref(self, name): - self._p("%s ENTITYREF" % name) - - def handle_comment(self, data): - self._p("%s COMMENT" % data) - - def handle_decl(self, data): - self._p("%s DECL" % data) - - def unknown_decl(self, data): - self._p("%s UNKNOWN-DECL" % data) - - def handle_pi(self, data): - self._p("%s PI" % data) - -def htmlparser_trace(data): - """Print out the HTMLParser events that occur during parsing. - - This lets you see how HTMLParser parses a document when no - Beautiful Soup code is running. - """ - parser = AnnouncingParser() - parser.feed(data) - -_vowels = "aeiou" -_consonants = "bcdfghjklmnpqrstvwxyz" - -def rword(length=5): - "Generate a random word-like string." - s = '' - for i in range(length): - if i % 2 == 0: - t = _consonants - else: - t = _vowels - s += random.choice(t) - return s - -def rsentence(length=4): - "Generate a random sentence-like string." - return " ".join(rword(random.randint(4,9)) for i in range(length)) - -def rdoc(num_elements=1000): - """Randomly generate an invalid HTML document.""" - tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] - elements = [] - for i in range(num_elements): - choice = random.randint(0,3) - if choice == 0: - # New tag. - tag_name = random.choice(tag_names) - elements.append("<%s>" % tag_name) - elif choice == 1: - elements.append(rsentence(random.randint(1,4))) - elif choice == 2: - # Close a tag. - tag_name = random.choice(tag_names) - elements.append("" % tag_name) - return "" + "\n".join(elements) + "" - -def benchmark_parsers(num_elements=100000): - """Very basic head-to-head performance benchmark.""" - print "Comparative parser benchmark on Beautiful Soup %s" % __version__ - data = rdoc(num_elements) - print "Generated a large invalid HTML document (%d bytes)." % len(data) - - for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: - success = False - try: - a = time.time() - soup = BeautifulSoup(data, parser) - b = time.time() - success = True - except Exception, e: - print "%s could not parse the markup." % parser - traceback.print_exc() - if success: - print "BS4+%s parsed the markup in %.2fs." % (parser, b-a) - - from lxml import etree - a = time.time() - etree.HTML(data) - b = time.time() - print "Raw lxml parsed the markup in %.2fs." % (b-a) - - import html5lib - parser = html5lib.HTMLParser() - a = time.time() - parser.parse(data) - b = time.time() - print "Raw html5lib parsed the markup in %.2fs." % (b-a) - -def profile(num_elements=100000, parser="lxml"): - - filehandle = tempfile.NamedTemporaryFile() - filename = filehandle.name - - data = rdoc(num_elements) - vars = dict(bs4=bs4, data=data, parser=parser) - cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename) - - stats = pstats.Stats(filename) - # stats.strip_dirs() - stats.sort_stats("cumulative") - stats.print_stats('_html5lib|bs4', 50) - -if __name__ == '__main__': - diagnose(sys.stdin.read()) diff --git a/lib/bs4/element.py b/lib/bs4/element.py deleted file mode 100644 index 9ef75f81..00000000 --- a/lib/bs4/element.py +++ /dev/null @@ -1,1808 +0,0 @@ -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -__license__ = "MIT" - -import collections -import re -import shlex -import sys -import warnings -from bs4.dammit import EntitySubstitution - -DEFAULT_OUTPUT_ENCODING = "utf-8" -PY3K = (sys.version_info[0] > 2) - -whitespace_re = re.compile("\s+") - -def _alias(attr): - """Alias one attribute name to another for backward compatibility""" - @property - def alias(self): - return getattr(self, attr) - - @alias.setter - def alias(self): - return setattr(self, attr) - return alias - - -class NamespacedAttribute(unicode): - - def __new__(cls, prefix, name, namespace=None): - if name is None: - obj = unicode.__new__(cls, prefix) - elif prefix is None: - # Not really namespaced. - obj = unicode.__new__(cls, name) - else: - obj = unicode.__new__(cls, prefix + ":" + name) - obj.prefix = prefix - obj.name = name - obj.namespace = namespace - return obj - -class AttributeValueWithCharsetSubstitution(unicode): - """A stand-in object for a character encoding specified in HTML.""" - -class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution): - """A generic stand-in for the value of a meta tag's 'charset' attribute. - - When Beautiful Soup parses the markup '', the - value of the 'charset' attribute will be one of these objects. - """ - - def __new__(cls, original_value): - obj = unicode.__new__(cls, original_value) - obj.original_value = original_value - return obj - - def encode(self, encoding): - return encoding - - -class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution): - """A generic stand-in for the value of a meta tag's 'content' attribute. - - When Beautiful Soup parses the markup: - - - The value of the 'content' attribute will be one of these objects. - """ - - CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) - - def __new__(cls, original_value): - match = cls.CHARSET_RE.search(original_value) - if match is None: - # No substitution necessary. - return unicode.__new__(unicode, original_value) - - obj = unicode.__new__(cls, original_value) - obj.original_value = original_value - return obj - - def encode(self, encoding): - def rewrite(match): - return match.group(1) + encoding - return self.CHARSET_RE.sub(rewrite, self.original_value) - -class HTMLAwareEntitySubstitution(EntitySubstitution): - - """Entity substitution rules that are aware of some HTML quirks. - - Specifically, the contents of - -Hello, world! - - -''' - soup = self.soup(html) - self.assertEqual("text/javascript", soup.find('script')['type']) - - def test_comment(self): - # Comments are represented as Comment objects. - markup = "

foobaz

" - self.assertSoupEquals(markup) - - soup = self.soup(markup) - comment = soup.find(text="foobar") - self.assertEqual(comment.__class__, Comment) - - # The comment is properly integrated into the tree. - foo = soup.find(text="foo") - self.assertEqual(comment, foo.next_element) - baz = soup.find(text="baz") - self.assertEqual(comment, baz.previous_element) - - def test_preserved_whitespace_in_pre_and_textarea(self): - """Whitespace must be preserved in
 and "
-        self.assertSoupEquals(pre_markup)
-        self.assertSoupEquals(textarea_markup)
-
-        soup = self.soup(pre_markup)
-        self.assertEqual(soup.pre.prettify(), pre_markup)
-
-        soup = self.soup(textarea_markup)
-        self.assertEqual(soup.textarea.prettify(), textarea_markup)
-
-        soup = self.soup("")
-        self.assertEqual(soup.textarea.prettify(), "")
-
-    def test_nested_inline_elements(self):
-        """Inline elements can be nested indefinitely."""
-        b_tag = "Inside a B tag"
-        self.assertSoupEquals(b_tag)
-
-        nested_b_tag = "

A nested tag

" - self.assertSoupEquals(nested_b_tag) - - double_nested_b_tag = "

A doubly nested tag

" - self.assertSoupEquals(nested_b_tag) - - def test_nested_block_level_elements(self): - """Block elements can be nested.""" - soup = self.soup('

Foo

') - blockquote = soup.blockquote - self.assertEqual(blockquote.p.b.string, 'Foo') - self.assertEqual(blockquote.b.string, 'Foo') - - def test_correctly_nested_tables(self): - """One table can go inside another one.""" - markup = ('' - '' - "') - - self.assertSoupEquals( - markup, - '
Here's another table:" - '' - '' - '
foo
Here\'s another table:' - '
foo
' - '
') - - self.assertSoupEquals( - "" - "" - "
Foo
Bar
Baz
") - - def test_deeply_nested_multivalued_attribute(self): - # html5lib can set the attributes of the same tag many times - # as it rearranges the tree. This has caused problems with - # multivalued attributes. - markup = '
' - soup = self.soup(markup) - self.assertEqual(["css"], soup.div.div['class']) - - def test_multivalued_attribute_on_html(self): - # html5lib uses a different API to set the attributes ot the - # tag. This has caused problems with multivalued - # attributes. - markup = '' - soup = self.soup(markup) - self.assertEqual(["a", "b"], soup.html['class']) - - def test_angle_brackets_in_attribute_values_are_escaped(self): - self.assertSoupEquals('', '') - - def test_entities_in_attributes_converted_to_unicode(self): - expect = u'

' - self.assertSoupEquals('

', expect) - self.assertSoupEquals('

', expect) - self.assertSoupEquals('

', expect) - self.assertSoupEquals('

', expect) - - def test_entities_in_text_converted_to_unicode(self): - expect = u'

pi\N{LATIN SMALL LETTER N WITH TILDE}ata

' - self.assertSoupEquals("

piñata

", expect) - self.assertSoupEquals("

piñata

", expect) - self.assertSoupEquals("

piñata

", expect) - self.assertSoupEquals("

piñata

", expect) - - def test_quot_entity_converted_to_quotation_mark(self): - self.assertSoupEquals("

I said "good day!"

", - '

I said "good day!"

') - - def test_out_of_range_entity(self): - expect = u"\N{REPLACEMENT CHARACTER}" - self.assertSoupEquals("�", expect) - self.assertSoupEquals("�", expect) - self.assertSoupEquals("�", expect) - - def test_multipart_strings(self): - "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." - soup = self.soup("

\nfoo

") - self.assertEqual("p", soup.h2.string.next_element.name) - self.assertEqual("p", soup.p.name) - self.assertConnectedness(soup) - - def test_empty_element_tags(self): - """Verify consistent handling of empty-element tags, - no matter how they come in through the markup. - """ - self.assertSoupEquals('


', "


") - self.assertSoupEquals('


', "


") - - def test_head_tag_between_head_and_body(self): - "Prevent recurrence of a bug in the html5lib treebuilder." - content = """ - - foo - -""" - soup = self.soup(content) - self.assertNotEqual(None, soup.html.body) - self.assertConnectedness(soup) - - def test_multiple_copies_of_a_tag(self): - "Prevent recurrence of a bug in the html5lib treebuilder." - content = """ - - - - - -""" - soup = self.soup(content) - self.assertConnectedness(soup.article) - - def test_basic_namespaces(self): - """Parsers don't need to *understand* namespaces, but at the - very least they should not choke on namespaces or lose - data.""" - - markup = b'4' - soup = self.soup(markup) - self.assertEqual(markup, soup.encode()) - html = soup.html - self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns']) - self.assertEqual( - 'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml']) - self.assertEqual( - 'http://www.w3.org/2000/svg', soup.html['xmlns:svg']) - - def test_multivalued_attribute_value_becomes_list(self): - markup = b'' - soup = self.soup(markup) - self.assertEqual(['foo', 'bar'], soup.a['class']) - - # - # Generally speaking, tests below this point are more tests of - # Beautiful Soup than tests of the tree builders. But parsers are - # weird, so we run these tests separately for every tree builder - # to detect any differences between them. - # - - def test_can_parse_unicode_document(self): - # A seemingly innocuous document... but it's in Unicode! And - # it contains characters that can't be represented in the - # encoding found in the declaration! The horror! - markup = u'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' - soup = self.soup(markup) - self.assertEqual(u'Sacr\xe9 bleu!', soup.body.string) - - def test_soupstrainer(self): - """Parsers should be able to work with SoupStrainers.""" - strainer = SoupStrainer("b") - soup = self.soup("A bold statement", - parse_only=strainer) - self.assertEqual(soup.decode(), "bold") - - def test_single_quote_attribute_values_become_double_quotes(self): - self.assertSoupEquals("", - '') - - def test_attribute_values_with_nested_quotes_are_left_alone(self): - text = """a""" - self.assertSoupEquals(text) - - def test_attribute_values_with_double_nested_quotes_get_quoted(self): - text = """a""" - soup = self.soup(text) - soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"' - self.assertSoupEquals( - soup.foo.decode(), - """a""") - - def test_ampersand_in_attribute_value_gets_escaped(self): - self.assertSoupEquals('', - '') - - self.assertSoupEquals( - 'foo', - 'foo') - - def test_escaped_ampersand_in_attribute_value_is_left_alone(self): - self.assertSoupEquals('') - - def test_entities_in_strings_converted_during_parsing(self): - # Both XML and HTML entities are converted to Unicode characters - # during parsing. - text = "

<<sacré bleu!>>

" - expected = u"

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

" - self.assertSoupEquals(text, expected) - - def test_smart_quotes_converted_on_the_way_in(self): - # Microsoft smart quotes are converted to Unicode characters during - # parsing. - quote = b"

\x91Foo\x92

" - soup = self.soup(quote) - self.assertEqual( - soup.p.string, - u"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}") - - def test_non_breaking_spaces_converted_on_the_way_in(self): - soup = self.soup("  ") - self.assertEqual(soup.a.string, u"\N{NO-BREAK SPACE}" * 2) - - def test_entities_converted_on_the_way_out(self): - text = "

<<sacré bleu!>>

" - expected = u"

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

".encode("utf-8") - soup = self.soup(text) - self.assertEqual(soup.p.encode("utf-8"), expected) - - def test_real_iso_latin_document(self): - # Smoke test of interrelated functionality, using an - # easy-to-understand document. - - # Here it is in Unicode. Note that it claims to be in ISO-Latin-1. - unicode_html = u'

Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!

' - - # That's because we're going to encode it into ISO-Latin-1, and use - # that to test. - iso_latin_html = unicode_html.encode("iso-8859-1") - - # Parse the ISO-Latin-1 HTML. - soup = self.soup(iso_latin_html) - # Encode it to UTF-8. - result = soup.encode("utf-8") - - # What do we expect the result to look like? Well, it would - # look like unicode_html, except that the META tag would say - # UTF-8 instead of ISO-Latin-1. - expected = unicode_html.replace("ISO-Latin-1", "utf-8") - - # And, of course, it would be in UTF-8, not Unicode. - expected = expected.encode("utf-8") - - # Ta-da! - self.assertEqual(result, expected) - - def test_real_shift_jis_document(self): - # Smoke test to make sure the parser can handle a document in - # Shift-JIS encoding, without choking. - shift_jis_html = ( - b'
'
-            b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
-            b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
-            b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
-            b'
') - unicode_html = shift_jis_html.decode("shift-jis") - soup = self.soup(unicode_html) - - # Make sure the parse tree is correctly encoded to various - # encodings. - self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8")) - self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp")) - - def test_real_hebrew_document(self): - # A real-world test to make sure we can convert ISO-8859-9 (a - # Hebrew encoding) to UTF-8. - hebrew_document = b'Hebrew (ISO 8859-8) in Visual Directionality

Hebrew (ISO 8859-8) in Visual Directionality

\xed\xe5\xec\xf9' - soup = self.soup( - hebrew_document, from_encoding="iso8859-8") - # Some tree builders call it iso8859-8, others call it iso-8859-9. - # That's not a difference we really care about. - assert soup.original_encoding in ('iso8859-8', 'iso-8859-8') - self.assertEqual( - soup.encode('utf-8'), - hebrew_document.decode("iso8859-8").encode("utf-8")) - - def test_meta_tag_reflects_current_encoding(self): - # Here's the tag saying that a document is - # encoded in Shift-JIS. - meta_tag = ('') - - # Here's a document incorporating that meta tag. - shift_jis_html = ( - '\n%s\n' - '' - 'Shift-JIS markup goes here.') % meta_tag - soup = self.soup(shift_jis_html) - - # Parse the document, and the charset is seemingly unaffected. - parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'}) - content = parsed_meta['content'] - self.assertEqual('text/html; charset=x-sjis', content) - - # But that value is actually a ContentMetaAttributeValue object. - self.assertTrue(isinstance(content, ContentMetaAttributeValue)) - - # And it will take on a value that reflects its current - # encoding. - self.assertEqual('text/html; charset=utf8', content.encode("utf8")) - - # For the rest of the story, see TestSubstitutions in - # test_tree.py. - - def test_html5_style_meta_tag_reflects_current_encoding(self): - # Here's the tag saying that a document is - # encoded in Shift-JIS. - meta_tag = ('') - - # Here's a document incorporating that meta tag. - shift_jis_html = ( - '\n%s\n' - '' - 'Shift-JIS markup goes here.') % meta_tag - soup = self.soup(shift_jis_html) - - # Parse the document, and the charset is seemingly unaffected. - parsed_meta = soup.find('meta', id="encoding") - charset = parsed_meta['charset'] - self.assertEqual('x-sjis', charset) - - # But that value is actually a CharsetMetaAttributeValue object. - self.assertTrue(isinstance(charset, CharsetMetaAttributeValue)) - - # And it will take on a value that reflects its current - # encoding. - self.assertEqual('utf8', charset.encode("utf8")) - - def test_tag_with_no_attributes_can_have_attributes_added(self): - data = self.soup("text") - data.a['foo'] = 'bar' - self.assertEqual('text', data.a.decode()) - -class XMLTreeBuilderSmokeTest(object): - - def test_pickle_and_unpickle_identity(self): - # Pickling a tree, then unpickling it, yields a tree identical - # to the original. - tree = self.soup("foo") - dumped = pickle.dumps(tree, 2) - loaded = pickle.loads(dumped) - self.assertEqual(loaded.__class__, BeautifulSoup) - self.assertEqual(loaded.decode(), tree.decode()) - - def test_docstring_generated(self): - soup = self.soup("") - self.assertEqual( - soup.encode(), b'\n') - - def test_xml_declaration(self): - markup = b"""\n""" - soup = self.soup(markup) - self.assertEqual(markup, soup.encode("utf8")) - - def test_processing_instruction(self): - markup = b"""\n""" - soup = self.soup(markup) - self.assertEqual(markup, soup.encode("utf8")) - - def test_real_xhtml_document(self): - """A real XHTML document should come out *exactly* the same as it went in.""" - markup = b""" - - -Hello. -Goodbye. -""" - soup = self.soup(markup) - self.assertEqual( - soup.encode("utf-8"), markup) - - def test_formatter_processes_script_tag_for_xml_documents(self): - doc = """ - -""" - soup = BeautifulSoup(doc, "lxml-xml") - # lxml would have stripped this while parsing, but we can add - # it later. - soup.script.string = 'console.log("< < hey > > ");' - encoded = soup.encode() - self.assertTrue(b"< < hey > >" in encoded) - - def test_can_parse_unicode_document(self): - markup = u'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' - soup = self.soup(markup) - self.assertEqual(u'Sacr\xe9 bleu!', soup.root.string) - - def test_popping_namespaced_tag(self): - markup = 'b2012-07-02T20:33:42Zcd' - soup = self.soup(markup) - self.assertEqual( - unicode(soup.rss), markup) - - def test_docstring_includes_correct_encoding(self): - soup = self.soup("") - self.assertEqual( - soup.encode("latin1"), - b'\n') - - def test_large_xml_document(self): - """A large XML document should come out the same as it went in.""" - markup = (b'\n' - + b'0' * (2**12) - + b'') - soup = self.soup(markup) - self.assertEqual(soup.encode("utf-8"), markup) - - - def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): - self.assertSoupEquals("

", "

") - self.assertSoupEquals("

foo

") - - def test_namespaces_are_preserved(self): - markup = 'This tag is in the a namespaceThis tag is in the b namespace' - soup = self.soup(markup) - root = soup.root - self.assertEqual("http://example.com/", root['xmlns:a']) - self.assertEqual("http://example.net/", root['xmlns:b']) - - def test_closing_namespaced_tag(self): - markup = '

20010504

' - soup = self.soup(markup) - self.assertEqual(unicode(soup.p), markup) - - def test_namespaced_attributes(self): - markup = '' - soup = self.soup(markup) - self.assertEqual(unicode(soup.foo), markup) - - def test_namespaced_attributes_xml_namespace(self): - markup = 'bar' - soup = self.soup(markup) - self.assertEqual(unicode(soup.foo), markup) - - def test_find_by_prefixed_name(self): - doc = """ -foo - bar - baz - -""" - soup = self.soup(doc) - - # There are three tags. - self.assertEqual(3, len(soup.find_all('tag'))) - - # But two of them are ns1:tag and one of them is ns2:tag. - self.assertEqual(2, len(soup.find_all('ns1:tag'))) - self.assertEqual(1, len(soup.find_all('ns2:tag'))) - - self.assertEqual(1, len(soup.find_all('ns2:tag', key='value'))) - self.assertEqual(3, len(soup.find_all(['ns1:tag', 'ns2:tag']))) - - def test_copy_tag_preserves_namespace(self): - xml = """ -""" - - soup = self.soup(xml) - tag = soup.document - duplicate = copy.copy(tag) - - # The two tags have the same namespace prefix. - self.assertEqual(tag.prefix, duplicate.prefix) - - -class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): - """Smoke test for a tree builder that supports HTML5.""" - - def test_real_xhtml_document(self): - # Since XHTML is not HTML5, HTML5 parsers are not tested to handle - # XHTML documents in any particular way. - pass - - def test_html_tags_have_namespace(self): - markup = "" - soup = self.soup(markup) - self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace) - - def test_svg_tags_have_namespace(self): - markup = '' - soup = self.soup(markup) - namespace = "http://www.w3.org/2000/svg" - self.assertEqual(namespace, soup.svg.namespace) - self.assertEqual(namespace, soup.circle.namespace) - - - def test_mathml_tags_have_namespace(self): - markup = '5' - soup = self.soup(markup) - namespace = 'http://www.w3.org/1998/Math/MathML' - self.assertEqual(namespace, soup.math.namespace) - self.assertEqual(namespace, soup.msqrt.namespace) - - def test_xml_declaration_becomes_comment(self): - markup = '' - soup = self.soup(markup) - self.assertTrue(isinstance(soup.contents[0], Comment)) - self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?') - self.assertEqual("html", soup.contents[0].next_element.name) - -def skipIf(condition, reason): - def nothing(test, *args, **kwargs): - return None - - def decorator(test_item): - if condition: - return nothing - else: - return test_item - - return decorator diff --git a/lib/bs4/tests/__init__.py b/lib/bs4/tests/__init__.py deleted file mode 100644 index 142c8cc3..00000000 --- a/lib/bs4/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"The beautifulsoup tests." diff --git a/lib/bs4/tests/test_builder_registry.py b/lib/bs4/tests/test_builder_registry.py deleted file mode 100644 index 90cad829..00000000 --- a/lib/bs4/tests/test_builder_registry.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Tests of the builder registry.""" - -import unittest -import warnings - -from bs4 import BeautifulSoup -from bs4.builder import ( - builder_registry as registry, - HTMLParserTreeBuilder, - TreeBuilderRegistry, -) - -try: - from bs4.builder import HTML5TreeBuilder - HTML5LIB_PRESENT = True -except ImportError: - HTML5LIB_PRESENT = False - -try: - from bs4.builder import ( - LXMLTreeBuilderForXML, - LXMLTreeBuilder, - ) - LXML_PRESENT = True -except ImportError: - LXML_PRESENT = False - - -class BuiltInRegistryTest(unittest.TestCase): - """Test the built-in registry with the default builders registered.""" - - def test_combination(self): - if LXML_PRESENT: - self.assertEqual(registry.lookup('fast', 'html'), - LXMLTreeBuilder) - - if LXML_PRESENT: - self.assertEqual(registry.lookup('permissive', 'xml'), - LXMLTreeBuilderForXML) - self.assertEqual(registry.lookup('strict', 'html'), - HTMLParserTreeBuilder) - if HTML5LIB_PRESENT: - self.assertEqual(registry.lookup('html5lib', 'html'), - HTML5TreeBuilder) - - def test_lookup_by_markup_type(self): - if LXML_PRESENT: - self.assertEqual(registry.lookup('html'), LXMLTreeBuilder) - self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML) - else: - self.assertEqual(registry.lookup('xml'), None) - if HTML5LIB_PRESENT: - self.assertEqual(registry.lookup('html'), HTML5TreeBuilder) - else: - self.assertEqual(registry.lookup('html'), HTMLParserTreeBuilder) - - def test_named_library(self): - if LXML_PRESENT: - self.assertEqual(registry.lookup('lxml', 'xml'), - LXMLTreeBuilderForXML) - self.assertEqual(registry.lookup('lxml', 'html'), - LXMLTreeBuilder) - if HTML5LIB_PRESENT: - self.assertEqual(registry.lookup('html5lib'), - HTML5TreeBuilder) - - self.assertEqual(registry.lookup('html.parser'), - HTMLParserTreeBuilder) - - def test_beautifulsoup_constructor_does_lookup(self): - - with warnings.catch_warnings(record=True) as w: - # This will create a warning about not explicitly - # specifying a parser, but we'll ignore it. - - # You can pass in a string. - BeautifulSoup("", features="html") - # Or a list of strings. - BeautifulSoup("", features=["html", "fast"]) - - # You'll get an exception if BS can't find an appropriate - # builder. - self.assertRaises(ValueError, BeautifulSoup, - "", features="no-such-feature") - -class RegistryTest(unittest.TestCase): - """Test the TreeBuilderRegistry class in general.""" - - def setUp(self): - self.registry = TreeBuilderRegistry() - - def builder_for_features(self, *feature_list): - cls = type('Builder_' + '_'.join(feature_list), - (object,), {'features' : feature_list}) - - self.registry.register(cls) - return cls - - def test_register_with_no_features(self): - builder = self.builder_for_features() - - # Since the builder advertises no features, you can't find it - # by looking up features. - self.assertEqual(self.registry.lookup('foo'), None) - - # But you can find it by doing a lookup with no features, if - # this happens to be the only registered builder. - self.assertEqual(self.registry.lookup(), builder) - - def test_register_with_features_makes_lookup_succeed(self): - builder = self.builder_for_features('foo', 'bar') - self.assertEqual(self.registry.lookup('foo'), builder) - self.assertEqual(self.registry.lookup('bar'), builder) - - def test_lookup_fails_when_no_builder_implements_feature(self): - builder = self.builder_for_features('foo', 'bar') - self.assertEqual(self.registry.lookup('baz'), None) - - def test_lookup_gets_most_recent_registration_when_no_feature_specified(self): - builder1 = self.builder_for_features('foo') - builder2 = self.builder_for_features('bar') - self.assertEqual(self.registry.lookup(), builder2) - - def test_lookup_fails_when_no_tree_builders_registered(self): - self.assertEqual(self.registry.lookup(), None) - - def test_lookup_gets_most_recent_builder_supporting_all_features(self): - has_one = self.builder_for_features('foo') - has_the_other = self.builder_for_features('bar') - has_both_early = self.builder_for_features('foo', 'bar', 'baz') - has_both_late = self.builder_for_features('foo', 'bar', 'quux') - lacks_one = self.builder_for_features('bar') - has_the_other = self.builder_for_features('foo') - - # There are two builders featuring 'foo' and 'bar', but - # the one that also features 'quux' was registered later. - self.assertEqual(self.registry.lookup('foo', 'bar'), - has_both_late) - - # There is only one builder featuring 'foo', 'bar', and 'baz'. - self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'), - has_both_early) - - def test_lookup_fails_when_cannot_reconcile_requested_features(self): - builder1 = self.builder_for_features('foo', 'bar') - builder2 = self.builder_for_features('foo', 'baz') - self.assertEqual(self.registry.lookup('bar', 'baz'), None) diff --git a/lib/bs4/tests/test_docs.py b/lib/bs4/tests/test_docs.py deleted file mode 100644 index 5b9f6770..00000000 --- a/lib/bs4/tests/test_docs.py +++ /dev/null @@ -1,36 +0,0 @@ -"Test harness for doctests." - -# pylint: disable-msg=E0611,W0142 - -__metaclass__ = type -__all__ = [ - 'additional_tests', - ] - -import atexit -import doctest -import os -#from pkg_resources import ( -# resource_filename, resource_exists, resource_listdir, cleanup_resources) -import unittest - -DOCTEST_FLAGS = ( - doctest.ELLIPSIS | - doctest.NORMALIZE_WHITESPACE | - doctest.REPORT_NDIFF) - - -# def additional_tests(): -# "Run the doc tests (README.txt and docs/*, if any exist)" -# doctest_files = [ -# os.path.abspath(resource_filename('bs4', 'README.txt'))] -# if resource_exists('bs4', 'docs'): -# for name in resource_listdir('bs4', 'docs'): -# if name.endswith('.txt'): -# doctest_files.append( -# os.path.abspath( -# resource_filename('bs4', 'docs/%s' % name))) -# kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS) -# atexit.register(cleanup_resources) -# return unittest.TestSuite(( -# doctest.DocFileSuite(*doctest_files, **kwargs))) diff --git a/lib/bs4/tests/test_html5lib.py b/lib/bs4/tests/test_html5lib.py deleted file mode 100644 index 0f89d624..00000000 --- a/lib/bs4/tests/test_html5lib.py +++ /dev/null @@ -1,130 +0,0 @@ -"""Tests to ensure that the html5lib tree builder generates good trees.""" - -import warnings - -try: - from bs4.builder import HTML5TreeBuilder - HTML5LIB_PRESENT = True -except ImportError, e: - HTML5LIB_PRESENT = False -from bs4.element import SoupStrainer -from bs4.testing import ( - HTML5TreeBuilderSmokeTest, - SoupTest, - skipIf, -) - -@skipIf( - not HTML5LIB_PRESENT, - "html5lib seems not to be present, not testing its tree builder.") -class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest): - """See ``HTML5TreeBuilderSmokeTest``.""" - - @property - def default_builder(self): - return HTML5TreeBuilder() - - def test_soupstrainer(self): - # The html5lib tree builder does not support SoupStrainers. - strainer = SoupStrainer("b") - markup = "

A bold statement.

" - with warnings.catch_warnings(record=True) as w: - soup = self.soup(markup, parse_only=strainer) - self.assertEqual( - soup.decode(), self.document_for(markup)) - - self.assertTrue( - "the html5lib tree builder doesn't support parse_only" in - str(w[0].message)) - - def test_correctly_nested_tables(self): - """html5lib inserts tags where other parsers don't.""" - markup = ('' - '' - "') - - self.assertSoupEquals( - markup, - '
Here's another table:" - '' - '' - '
foo
Here\'s another table:' - '
foo
' - '
') - - self.assertSoupEquals( - "" - "" - "
Foo
Bar
Baz
") - - def test_xml_declaration_followed_by_doctype(self): - markup = ''' - - - - - -

foo

- -''' - soup = self.soup(markup) - # Verify that we can reach the

tag; this means the tree is connected. - self.assertEqual(b"

foo

", soup.p.encode()) - - def test_reparented_markup(self): - markup = '

foo

\n

bar

' - soup = self.soup(markup) - self.assertEqual(u"

foo

\n

bar

", soup.body.decode()) - self.assertEqual(2, len(soup.find_all('p'))) - - - def test_reparented_markup_ends_with_whitespace(self): - markup = '

foo

\n

bar

\n' - soup = self.soup(markup) - self.assertEqual(u"

foo

\n

bar

\n", soup.body.decode()) - self.assertEqual(2, len(soup.find_all('p'))) - - def test_reparented_markup_containing_identical_whitespace_nodes(self): - """Verify that we keep the two whitespace nodes in this - document distinct when reparenting the adjacent tags. - """ - markup = '
' - soup = self.soup(markup) - space1, space2 = soup.find_all(string=' ') - tbody1, tbody2 = soup.find_all('tbody') - assert space1.next_element is tbody1 - assert tbody2.next_element is space2 - - def test_reparented_markup_containing_children(self): - markup = '' - soup = self.soup(markup) - noscript = soup.noscript - self.assertEqual("target", noscript.next_element) - target = soup.find(string='target') - - # The 'aftermath' string was duplicated; we want the second one. - final_aftermath = soup.find_all(string='aftermath')[-1] - - # The