aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python
diff options
context:
space:
mode:
authorMikhail Borisov <borisov.mikhail@gmail.com>2022-02-10 16:45:40 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:40 +0300
commit5d50718e66d9c037dc587a0211110b7d25a66185 (patch)
treee98df59de24d2ef7c77baed9f41e4875a2fef972 /contrib/python
parenta6a92afe03e02795227d2641b49819b687f088f8 (diff)
downloadydb-5d50718e66d9c037dc587a0211110b7d25a66185.tar.gz
Restoring authorship annotation for Mikhail Borisov <borisov.mikhail@gmail.com>. Commit 2 of 2.
Diffstat (limited to 'contrib/python')
-rw-r--r--contrib/python/PyYAML/py2/LICENSE36
-rw-r--r--contrib/python/PyYAML/py2/ya.make50
-rw-r--r--contrib/python/PyYAML/py2/yaml/__init__.py594
-rw-r--r--contrib/python/PyYAML/py2/yaml/_yaml.h44
-rw-r--r--contrib/python/PyYAML/py2/yaml/_yaml.pxd460
-rw-r--r--contrib/python/PyYAML/py2/yaml/_yaml.pyx2976
-rw-r--r--contrib/python/PyYAML/py2/yaml/composer.py274
-rw-r--r--contrib/python/PyYAML/py2/yaml/constructor.py1224
-rw-r--r--contrib/python/PyYAML/py2/yaml/cyaml.py146
-rw-r--r--contrib/python/PyYAML/py2/yaml/dumper.py106
-rw-r--r--contrib/python/PyYAML/py2/yaml/emitter.py2266
-rw-r--r--contrib/python/PyYAML/py2/yaml/error.py150
-rw-r--r--contrib/python/PyYAML/py2/yaml/events.py172
-rw-r--r--contrib/python/PyYAML/py2/yaml/loader.py78
-rw-r--r--contrib/python/PyYAML/py2/yaml/nodes.py98
-rw-r--r--contrib/python/PyYAML/py2/yaml/parser.py1178
-rw-r--r--contrib/python/PyYAML/py2/yaml/reader.py362
-rw-r--r--contrib/python/PyYAML/py2/yaml/representer.py958
-rw-r--r--contrib/python/PyYAML/py2/yaml/resolver.py442
-rw-r--r--contrib/python/PyYAML/py2/yaml/scanner.py2850
-rw-r--r--contrib/python/PyYAML/py2/yaml/serializer.py222
-rw-r--r--contrib/python/PyYAML/py2/yaml/tokens.py208
-rw-r--r--contrib/python/PyYAML/py3/LICENSE36
-rw-r--r--contrib/python/PyYAML/py3/ya.make50
-rw-r--r--contrib/python/PyYAML/py3/yaml/_yaml.h44
-rw-r--r--contrib/python/PyYAML/py3/yaml/_yaml.pxd460
-rw-r--r--contrib/python/PyYAML/py3/yaml/_yaml.pyx2976
-rw-r--r--contrib/python/PyYAML/ya.make4
-rw-r--r--contrib/python/cffi/ya.make4
-rw-r--r--contrib/python/decorator/LICENSE.txt50
-rw-r--r--contrib/python/decorator/decorator.py698
-rw-r--r--contrib/python/decorator/ya.make14
-rw-r--r--contrib/python/ipython/py2/COPYING.rst148
-rw-r--r--contrib/python/ipython/py2/IPython/__init__.py292
-rw-r--r--contrib/python/ipython/py2/IPython/__main__.py28
-rw-r--r--contrib/python/ipython/py2/IPython/config.py36
-rw-r--r--contrib/python/ipython/py2/IPython/consoleapp.py24
-rw-r--r--contrib/python/ipython/py2/IPython/core/alias.py508
-rw-r--r--contrib/python/ipython/py2/IPython/core/application.py754
-rw-r--r--contrib/python/ipython/py2/IPython/core/autocall.py140
-rw-r--r--contrib/python/ipython/py2/IPython/core/builtin_trap.py226
-rw-r--r--contrib/python/ipython/py2/IPython/core/compilerop.py286
-rw-r--r--contrib/python/ipython/py2/IPython/core/completer.py2128
-rw-r--r--contrib/python/ipython/py2/IPython/core/completerlib.py686
-rw-r--r--contrib/python/ipython/py2/IPython/core/crashhandler.py432
-rw-r--r--contrib/python/ipython/py2/IPython/core/debugger.py1070
-rw-r--r--contrib/python/ipython/py2/IPython/core/display.py1856
-rw-r--r--contrib/python/ipython/py2/IPython/core/display_trap.py140
-rw-r--r--contrib/python/ipython/py2/IPython/core/displayhook.py560
-rw-r--r--contrib/python/ipython/py2/IPython/core/displaypub.py206
-rw-r--r--contrib/python/ipython/py2/IPython/core/error.py120
-rw-r--r--contrib/python/ipython/py2/IPython/core/events.py260
-rw-r--r--contrib/python/ipython/py2/IPython/core/excolors.py290
-rw-r--r--contrib/python/ipython/py2/IPython/core/extensions.py322
-rw-r--r--contrib/python/ipython/py2/IPython/core/formatters.py1802
-rw-r--r--contrib/python/ipython/py2/IPython/core/getipython.py48
-rw-r--r--contrib/python/ipython/py2/IPython/core/history.py1686
-rw-r--r--contrib/python/ipython/py2/IPython/core/historyapp.py298
-rw-r--r--contrib/python/ipython/py2/IPython/core/hooks.py424
-rw-r--r--contrib/python/ipython/py2/IPython/core/inputsplitter.py1362
-rw-r--r--contrib/python/ipython/py2/IPython/core/inputtransformer.py1088
-rw-r--r--contrib/python/ipython/py2/IPython/core/interactiveshell.py5824
-rw-r--r--contrib/python/ipython/py2/IPython/core/latex_symbols.py2600
-rw-r--r--contrib/python/ipython/py2/IPython/core/logger.py442
-rw-r--r--contrib/python/ipython/py2/IPython/core/macro.py114
-rw-r--r--contrib/python/ipython/py2/IPython/core/magic.py1344
-rw-r--r--contrib/python/ipython/py2/IPython/core/magic_arguments.py556
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/__init__.py82
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/auto.py256
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/basic.py1094
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/code.py1420
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/config.py314
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/display.py98
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/execution.py2616
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/extension.py134
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/history.py636
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/logging.py366
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/namespace.py1408
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/osm.py1572
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/pylab.py332
-rw-r--r--contrib/python/ipython/py2/IPython/core/magics/script.py528
-rw-r--r--contrib/python/ipython/py2/IPython/core/oinspect.py1652
-rw-r--r--contrib/python/ipython/py2/IPython/core/page.py738
-rw-r--r--contrib/python/ipython/py2/IPython/core/payload.py110
-rw-r--r--contrib/python/ipython/py2/IPython/core/payloadpage.py104
-rw-r--r--contrib/python/ipython/py2/IPython/core/prefilter.py1356
-rw-r--r--contrib/python/ipython/py2/IPython/core/profile/README_STARTUP22
-rw-r--r--contrib/python/ipython/py2/IPython/core/profileapp.py606
-rw-r--r--contrib/python/ipython/py2/IPython/core/profiledir.py402
-rw-r--r--contrib/python/ipython/py2/IPython/core/prompts.py48
-rw-r--r--contrib/python/ipython/py2/IPython/core/pylabtools.py756
-rw-r--r--contrib/python/ipython/py2/IPython/core/release.py234
-rw-r--r--contrib/python/ipython/py2/IPython/core/shadowns.py2
-rw-r--r--contrib/python/ipython/py2/IPython/core/shellapp.py724
-rw-r--r--contrib/python/ipython/py2/IPython/core/splitinput.py274
-rw-r--r--contrib/python/ipython/py2/IPython/core/ultratb.py2874
-rw-r--r--contrib/python/ipython/py2/IPython/core/usage.py648
-rw-r--r--contrib/python/ipython/py2/IPython/display.py32
-rw-r--r--contrib/python/ipython/py2/IPython/extensions/__init__.py4
-rw-r--r--contrib/python/ipython/py2/IPython/extensions/autoreload.py1068
-rw-r--r--contrib/python/ipython/py2/IPython/extensions/cythonmagic.py42
-rw-r--r--contrib/python/ipython/py2/IPython/extensions/rmagic.py24
-rw-r--r--contrib/python/ipython/py2/IPython/extensions/storemagic.py448
-rw-r--r--contrib/python/ipython/py2/IPython/extensions/sympyprinting.py64
-rw-r--r--contrib/python/ipython/py2/IPython/external/__init__.py10
-rw-r--r--contrib/python/ipython/py2/IPython/external/decorators/__init__.py18
-rw-r--r--contrib/python/ipython/py2/IPython/external/decorators/_decorators.py562
-rw-r--r--contrib/python/ipython/py2/IPython/external/decorators/_numpy_testing_noseclasses.py82
-rw-r--r--contrib/python/ipython/py2/IPython/external/decorators/_numpy_testing_utils.py218
-rw-r--r--contrib/python/ipython/py2/IPython/external/mathjax.py26
-rw-r--r--contrib/python/ipython/py2/IPython/external/qt_for_kernel.py182
-rw-r--r--contrib/python/ipython/py2/IPython/external/qt_loaders.py562
-rw-r--r--contrib/python/ipython/py2/IPython/frontend.py56
-rw-r--r--contrib/python/ipython/py2/IPython/html.py54
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/__init__.py68
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/__main__.py6
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/adapter.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/channels.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/channelsabc.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/client.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/clientabc.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/connect.py4
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/kernelspec.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/kernelspecapp.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/launcher.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/manager.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/managerabc.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/multikernelmanager.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/restarter.py2
-rw-r--r--contrib/python/ipython/py2/IPython/kernel/threaded.py2
-rw-r--r--contrib/python/ipython/py2/IPython/lib/__init__.py42
-rw-r--r--contrib/python/ipython/py2/IPython/lib/backgroundjobs.py980
-rw-r--r--contrib/python/ipython/py2/IPython/lib/clipboard.py144
-rw-r--r--contrib/python/ipython/py2/IPython/lib/deepreload.py712
-rw-r--r--contrib/python/ipython/py2/IPython/lib/demo.py1136
-rw-r--r--contrib/python/ipython/py2/IPython/lib/display.py1106
-rw-r--r--contrib/python/ipython/py2/IPython/lib/editorhooks.py256
-rw-r--r--contrib/python/ipython/py2/IPython/lib/guisupport.py274
-rw-r--r--contrib/python/ipython/py2/IPython/lib/inputhook.py1096
-rw-r--r--contrib/python/ipython/py2/IPython/lib/inputhookglut.py346
-rw-r--r--contrib/python/ipython/py2/IPython/lib/inputhookgtk.py70
-rw-r--r--contrib/python/ipython/py2/IPython/lib/inputhookgtk3.py68
-rw-r--r--contrib/python/ipython/py2/IPython/lib/inputhookpyglet.py222
-rw-r--r--contrib/python/ipython/py2/IPython/lib/inputhookqt4.py360
-rw-r--r--contrib/python/ipython/py2/IPython/lib/inputhookwx.py334
-rw-r--r--contrib/python/ipython/py2/IPython/lib/kernel.py26
-rw-r--r--contrib/python/ipython/py2/IPython/lib/latextools.py380
-rw-r--r--contrib/python/ipython/py2/IPython/lib/lexers.py1000
-rw-r--r--contrib/python/ipython/py2/IPython/lib/pretty.py1626
-rw-r--r--contrib/python/ipython/py2/IPython/lib/security.py228
-rw-r--r--contrib/python/ipython/py2/IPython/nbconvert.py36
-rw-r--r--contrib/python/ipython/py2/IPython/nbformat.py36
-rw-r--r--contrib/python/ipython/py2/IPython/parallel.py38
-rw-r--r--contrib/python/ipython/py2/IPython/paths.py240
-rw-r--r--contrib/python/ipython/py2/IPython/qt.py46
-rw-r--r--contrib/python/ipython/py2/IPython/sphinxext/custom_doctests.py310
-rw-r--r--contrib/python/ipython/py2/IPython/sphinxext/ipython_console_highlighting.py56
-rw-r--r--contrib/python/ipython/py2/IPython/sphinxext/ipython_directive.py2322
-rw-r--r--contrib/python/ipython/py2/IPython/terminal/console.py36
-rw-r--r--contrib/python/ipython/py2/IPython/terminal/embed.py536
-rw-r--r--contrib/python/ipython/py2/IPython/terminal/interactiveshell.py236
-rwxr-xr-xcontrib/python/ipython/py2/IPython/terminal/ipapp.py674
-rw-r--r--contrib/python/ipython/py2/IPython/testing/__init__.py76
-rw-r--r--contrib/python/ipython/py2/IPython/testing/__main__.py6
-rw-r--r--contrib/python/ipython/py2/IPython/testing/decorators.py740
-rw-r--r--contrib/python/ipython/py2/IPython/testing/globalipapp.py258
-rw-r--r--contrib/python/ipython/py2/IPython/testing/iptest.py856
-rw-r--r--contrib/python/ipython/py2/IPython/testing/iptestcontroller.py1064
-rw-r--r--contrib/python/ipython/py2/IPython/testing/ipunittest.py352
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/Makefile148
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/README.txt78
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/dtexample.py316
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/ipdoctest.py1522
-rwxr-xr-xcontrib/python/ipython/py2/IPython/testing/plugin/iptest.py38
-rwxr-xr-xcontrib/python/ipython/py2/IPython/testing/plugin/setup.py36
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/show_refs.py40
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/simple.py68
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/simplevars.py6
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/test_combo.txt72
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/test_example.txt48
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/test_exampleip.txt60
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/test_ipdoctest.py160
-rw-r--r--contrib/python/ipython/py2/IPython/testing/plugin/test_refs.py92
-rw-r--r--contrib/python/ipython/py2/IPython/testing/skipdoctest.py76
-rw-r--r--contrib/python/ipython/py2/IPython/testing/tools.py892
-rw-r--r--contrib/python/ipython/py2/IPython/utils/PyColorize.py680
-rw-r--r--contrib/python/ipython/py2/IPython/utils/_process_cli.py156
-rw-r--r--contrib/python/ipython/py2/IPython/utils/_process_common.py428
-rw-r--r--contrib/python/ipython/py2/IPython/utils/_process_posix.py450
-rw-r--r--contrib/python/ipython/py2/IPython/utils/_process_win32.py384
-rw-r--r--contrib/python/ipython/py2/IPython/utils/_process_win32_controller.py1154
-rw-r--r--contrib/python/ipython/py2/IPython/utils/_signatures.py1626
-rw-r--r--contrib/python/ipython/py2/IPython/utils/_sysinfo.py2
-rw-r--r--contrib/python/ipython/py2/IPython/utils/_tokenize_py2.py878
-rw-r--r--contrib/python/ipython/py2/IPython/utils/_tokenize_py3.py1190
-rw-r--r--contrib/python/ipython/py2/IPython/utils/capture.py312
-rw-r--r--contrib/python/ipython/py2/IPython/utils/coloransi.py374
-rw-r--r--contrib/python/ipython/py2/IPython/utils/contexts.py126
-rw-r--r--contrib/python/ipython/py2/IPython/utils/daemonize.py8
-rw-r--r--contrib/python/ipython/py2/IPython/utils/data.py74
-rw-r--r--contrib/python/ipython/py2/IPython/utils/decorators.py116
-rw-r--r--contrib/python/ipython/py2/IPython/utils/dir2.py90
-rw-r--r--contrib/python/ipython/py2/IPython/utils/encoding.py142
-rw-r--r--contrib/python/ipython/py2/IPython/utils/eventful.py14
-rw-r--r--contrib/python/ipython/py2/IPython/utils/frame.py196
-rw-r--r--contrib/python/ipython/py2/IPython/utils/generics.py68
-rw-r--r--contrib/python/ipython/py2/IPython/utils/importstring.py78
-rw-r--r--contrib/python/ipython/py2/IPython/utils/io.py440
-rw-r--r--contrib/python/ipython/py2/IPython/utils/ipstruct.py782
-rw-r--r--contrib/python/ipython/py2/IPython/utils/jsonutil.py10
-rw-r--r--contrib/python/ipython/py2/IPython/utils/localinterfaces.py10
-rw-r--r--contrib/python/ipython/py2/IPython/utils/log.py14
-rw-r--r--contrib/python/ipython/py2/IPython/utils/module_paths.py250
-rw-r--r--contrib/python/ipython/py2/IPython/utils/openpy.py496
-rw-r--r--contrib/python/ipython/py2/IPython/utils/path.py862
-rw-r--r--contrib/python/ipython/py2/IPython/utils/pickleutil.py10
-rw-r--r--contrib/python/ipython/py2/IPython/utils/process.py212
-rw-r--r--contrib/python/ipython/py2/IPython/utils/py3compat.py668
-rw-r--r--contrib/python/ipython/py2/IPython/utils/rlineimpl.py148
-rw-r--r--contrib/python/ipython/py2/IPython/utils/sentinel.py34
-rw-r--r--contrib/python/ipython/py2/IPython/utils/shimmodule.py184
-rw-r--r--contrib/python/ipython/py2/IPython/utils/signatures.py22
-rw-r--r--contrib/python/ipython/py2/IPython/utils/strdispatch.py136
-rw-r--r--contrib/python/ipython/py2/IPython/utils/sysinfo.py334
-rw-r--r--contrib/python/ipython/py2/IPython/utils/syspathcontext.py142
-rw-r--r--contrib/python/ipython/py2/IPython/utils/tempdir.py290
-rw-r--r--contrib/python/ipython/py2/IPython/utils/terminal.py222
-rw-r--r--contrib/python/ipython/py2/IPython/utils/text.py1552
-rw-r--r--contrib/python/ipython/py2/IPython/utils/timing.py236
-rw-r--r--contrib/python/ipython/py2/IPython/utils/tokenize2.py18
-rw-r--r--contrib/python/ipython/py2/IPython/utils/tokenutil.py254
-rw-r--r--contrib/python/ipython/py2/IPython/utils/traitlets.py14
-rw-r--r--contrib/python/ipython/py2/IPython/utils/tz.py92
-rw-r--r--contrib/python/ipython/py2/IPython/utils/ulinecache.py90
-rw-r--r--contrib/python/ipython/py2/IPython/utils/version.py72
-rw-r--r--contrib/python/ipython/py2/IPython/utils/warn.py92
-rw-r--r--contrib/python/ipython/py2/IPython/utils/wildcard.py224
-rw-r--r--contrib/python/ipython/py2/bin/ya.make10
-rw-r--r--contrib/python/ipython/ya.make4
-rw-r--r--contrib/python/jedi/LICENSE.txt42
-rw-r--r--contrib/python/jedi/jedi/__init__.py78
-rw-r--r--contrib/python/jedi/jedi/__main__.py72
-rw-r--r--contrib/python/jedi/jedi/_compatibility.py316
-rw-r--r--contrib/python/jedi/jedi/api/__init__.py456
-rw-r--r--contrib/python/jedi/jedi/api/classes.py816
-rw-r--r--contrib/python/jedi/jedi/api/helpers.py46
-rw-r--r--contrib/python/jedi/jedi/api/interpreter.py34
-rw-r--r--contrib/python/jedi/jedi/api/keywords.py106
-rw-r--r--contrib/python/jedi/jedi/api/replstartup.py48
-rw-r--r--contrib/python/jedi/jedi/cache.py182
-rw-r--r--contrib/python/jedi/jedi/debug.py134
-rw-r--r--contrib/python/jedi/jedi/evaluate/__init__.py216
-rw-r--r--contrib/python/jedi/jedi/evaluate/analysis.py276
-rw-r--r--contrib/python/jedi/jedi/evaluate/cache.py92
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/__init__.py26
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake.py56
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym18
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym52
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym198
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym14
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym486
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym8
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym12
-rw-r--r--contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym10
-rw-r--r--contrib/python/jedi/jedi/evaluate/docstrings.py264
-rw-r--r--contrib/python/jedi/jedi/evaluate/dynamic.py116
-rw-r--r--contrib/python/jedi/jedi/evaluate/finder.py176
-rw-r--r--contrib/python/jedi/jedi/evaluate/flow_analysis.py98
-rw-r--r--contrib/python/jedi/jedi/evaluate/helpers.py100
-rw-r--r--contrib/python/jedi/jedi/evaluate/imports.py490
-rw-r--r--contrib/python/jedi/jedi/evaluate/param.py122
-rw-r--r--contrib/python/jedi/jedi/evaluate/recursion.py80
-rw-r--r--contrib/python/jedi/jedi/evaluate/stdlib.py270
-rw-r--r--contrib/python/jedi/jedi/evaluate/sys_path.py202
-rw-r--r--contrib/python/jedi/jedi/refactoring.py318
-rw-r--r--contrib/python/jedi/jedi/settings.py322
-rw-r--r--contrib/python/jedi/jedi/utils.py214
-rw-r--r--contrib/python/jedi/ya.make76
-rw-r--r--contrib/python/pexpect/LICENSE30
-rw-r--r--contrib/python/pexpect/pexpect/ANSI.py702
-rw-r--r--contrib/python/pexpect/pexpect/FSM.py668
-rw-r--r--contrib/python/pexpect/pexpect/__init__.py168
-rw-r--r--contrib/python/pexpect/pexpect/_async.py114
-rw-r--r--contrib/python/pexpect/pexpect/exceptions.py70
-rw-r--r--contrib/python/pexpect/pexpect/expect.py510
-rw-r--r--contrib/python/pexpect/pexpect/fdpexpect.py222
-rw-r--r--contrib/python/pexpect/pexpect/popen_spawn.py338
-rw-r--r--contrib/python/pexpect/pexpect/pty_spawn.py1448
-rw-r--r--contrib/python/pexpect/pexpect/pxssh.py756
-rw-r--r--contrib/python/pexpect/pexpect/replwrap.py218
-rw-r--r--contrib/python/pexpect/pexpect/run.py314
-rw-r--r--contrib/python/pexpect/pexpect/screen.py858
-rw-r--r--contrib/python/pexpect/pexpect/spawnbase.py942
-rw-r--r--contrib/python/pexpect/pexpect/utils.py216
-rw-r--r--contrib/python/pexpect/ya.make46
-rw-r--r--contrib/python/pickleshare/pickleshare.py618
-rw-r--r--contrib/python/pickleshare/ya.make16
-rw-r--r--contrib/python/prompt-toolkit/py2/ya.make6
-rw-r--r--contrib/python/prompt-toolkit/ya.make2
-rw-r--r--contrib/python/ptyprocess/ptyprocess/__init__.py4
-rw-r--r--contrib/python/ptyprocess/ptyprocess/_fork_pty.py150
-rw-r--r--contrib/python/ptyprocess/ptyprocess/ptyprocess.py1594
-rw-r--r--contrib/python/ptyprocess/ptyprocess/util.py132
-rw-r--r--contrib/python/ptyprocess/ya.make20
-rw-r--r--contrib/python/py/ya.make58
-rw-r--r--contrib/python/pyparsing/py2/ya.make14
-rw-r--r--contrib/python/pyparsing/py3/ya.make12
-rw-r--r--contrib/python/pyparsing/ya.make4
-rw-r--r--contrib/python/setuptools/py2/pkg_resources/__init__.py5038
-rw-r--r--contrib/python/setuptools/py2/setuptools/__init__.py228
-rw-r--r--contrib/python/setuptools/py2/setuptools/archive_util.py324
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/__init__.py32
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/alias.py158
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/bdist_egg.py910
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/bdist_rpm.py86
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/bdist_wininst.py42
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/build_ext.py538
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/build_py.py400
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/develop.py356
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/easy_install.py4194
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/egg_info.py782
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/install.py246
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/install_egg_info.py118
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/install_lib.py236
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/install_scripts.py120
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/register.py12
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/rotate.py120
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/saveopts.py44
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/sdist.py262
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/setopt.py296
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/test.py348
-rw-r--r--contrib/python/setuptools/py2/setuptools/command/upload_docs.py274
-rw-r--r--contrib/python/setuptools/py2/setuptools/depends.py278
-rw-r--r--contrib/python/setuptools/py2/setuptools/dist.py1318
-rw-r--r--contrib/python/setuptools/py2/setuptools/extension.py92
-rw-r--r--contrib/python/setuptools/py2/setuptools/launch.py34
-rw-r--r--contrib/python/setuptools/py2/setuptools/lib2to3_ex.py116
-rw-r--r--contrib/python/setuptools/py2/setuptools/package_index.py1864
-rw-r--r--contrib/python/setuptools/py2/setuptools/py27compat.py14
-rw-r--r--contrib/python/setuptools/py2/setuptools/py31compat.py44
-rw-r--r--contrib/python/setuptools/py2/setuptools/sandbox.py796
-rw-r--r--contrib/python/setuptools/py2/setuptools/script.tmpl6
-rw-r--r--contrib/python/setuptools/py2/setuptools/site-patch.py120
-rw-r--r--contrib/python/setuptools/py2/setuptools/ssl_support.py438
-rw-r--r--contrib/python/setuptools/py2/setuptools/unicode_utils.py86
-rw-r--r--contrib/python/setuptools/py2/setuptools/windows_support.py58
-rw-r--r--contrib/python/setuptools/py2/ya.make90
-rw-r--r--contrib/python/setuptools/py3/pkg_resources/__init__.py4894
-rw-r--r--contrib/python/setuptools/py3/setuptools/__init__.py204
-rw-r--r--contrib/python/setuptools/py3/setuptools/archive_util.py258
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/__init__.py16
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/alias.py156
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/bdist_egg.py804
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py62
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/build_ext.py530
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/build_py.py342
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/develop.py296
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/easy_install.py3800
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/egg_info.py766
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/install.py246
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/install_egg_info.py118
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/install_lib.py236
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/install_scripts.py116
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/register.py12
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/rotate.py112
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/saveopts.py44
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/sdist.py206
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/setopt.py294
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/test.py266
-rw-r--r--contrib/python/setuptools/py3/setuptools/command/upload_docs.py258
-rw-r--r--contrib/python/setuptools/py3/setuptools/depends.py272
-rw-r--r--contrib/python/setuptools/py3/setuptools/dist.py802
-rw-r--r--contrib/python/setuptools/py3/setuptools/extension.py90
-rw-r--r--contrib/python/setuptools/py3/setuptools/launch.py34
-rw-r--r--contrib/python/setuptools/py3/setuptools/package_index.py1724
-rw-r--r--contrib/python/setuptools/py3/setuptools/sandbox.py744
-rw-r--r--contrib/python/setuptools/py3/setuptools/script.tmpl6
-rw-r--r--contrib/python/setuptools/py3/setuptools/unicode_utils.py80
-rw-r--r--contrib/python/setuptools/py3/setuptools/windows_support.py58
-rw-r--r--contrib/python/setuptools/py3/ya.make78
-rw-r--r--contrib/python/setuptools/ya.make6
-rw-r--r--contrib/python/traitlets/py2/COPYING.md124
-rw-r--r--contrib/python/traitlets/py2/traitlets/__init__.py6
-rw-r--r--contrib/python/traitlets/py2/traitlets/_version.py2
-rw-r--r--contrib/python/traitlets/py2/traitlets/config/__init__.py16
-rw-r--r--contrib/python/traitlets/py2/traitlets/config/application.py1184
-rw-r--r--contrib/python/traitlets/py2/traitlets/config/configurable.py794
-rw-r--r--contrib/python/traitlets/py2/traitlets/config/loader.py1640
-rw-r--r--contrib/python/traitlets/py2/traitlets/config/manager.py174
-rw-r--r--contrib/python/traitlets/py2/traitlets/log.py42
-rw-r--r--contrib/python/traitlets/py2/traitlets/traitlets.py4370
-rw-r--r--contrib/python/traitlets/py2/traitlets/utils/getargspec.py170
-rw-r--r--contrib/python/traitlets/py2/traitlets/utils/importstring.py80
-rw-r--r--contrib/python/traitlets/py2/traitlets/utils/sentinel.py34
-rw-r--r--contrib/python/traitlets/py2/ya.make46
-rw-r--r--contrib/python/traitlets/py3/COPYING.md124
-rw-r--r--contrib/python/traitlets/py3/traitlets/__init__.py6
-rw-r--r--contrib/python/traitlets/py3/traitlets/config/__init__.py16
-rw-r--r--contrib/python/traitlets/py3/traitlets/config/application.py920
-rw-r--r--contrib/python/traitlets/py3/traitlets/config/configurable.py694
-rw-r--r--contrib/python/traitlets/py3/traitlets/config/loader.py1134
-rw-r--r--contrib/python/traitlets/py3/traitlets/config/manager.py166
-rw-r--r--contrib/python/traitlets/py3/traitlets/log.py42
-rw-r--r--contrib/python/traitlets/py3/traitlets/traitlets.py3822
-rw-r--r--contrib/python/traitlets/py3/traitlets/utils/getargspec.py40
-rw-r--r--contrib/python/traitlets/py3/traitlets/utils/importstring.py70
-rw-r--r--contrib/python/traitlets/py3/traitlets/utils/sentinel.py30
-rw-r--r--contrib/python/traitlets/py3/ya.make42
-rw-r--r--contrib/python/traitlets/ya.make4
-rw-r--r--contrib/python/ya.make108
410 files changed, 90989 insertions, 90989 deletions
diff --git a/contrib/python/PyYAML/py2/LICENSE b/contrib/python/PyYAML/py2/LICENSE
index 63edc20f2f..2f1b8e15e5 100644
--- a/contrib/python/PyYAML/py2/LICENSE
+++ b/contrib/python/PyYAML/py2/LICENSE
@@ -1,20 +1,20 @@
Copyright (c) 2017-2021 Ingy döt Net
Copyright (c) 2006-2016 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/contrib/python/PyYAML/py2/ya.make b/contrib/python/PyYAML/py2/ya.make
index ca2ec858c2..1ec5c0c301 100644
--- a/contrib/python/PyYAML/py2/ya.make
+++ b/contrib/python/PyYAML/py2/ya.make
@@ -1,43 +1,43 @@
PY2_LIBRARY()
-
+
LICENSE(MIT)
VERSION(5.4.1)
OWNER(g:python-contrib borman g:testenv)
-
+
PEERDIR(
contrib/libs/yaml
)
-
+
ADDINCL(
contrib/python/PyYAML/py2/yaml
)
-PY_SRCS(
- TOP_LEVEL
+PY_SRCS(
+ TOP_LEVEL
_yaml/__init__.py
- yaml/__init__.py
- yaml/composer.py
- yaml/constructor.py
- yaml/cyaml.py
- yaml/dumper.py
- yaml/emitter.py
- yaml/error.py
- yaml/events.py
- yaml/loader.py
- yaml/nodes.py
- yaml/parser.py
- yaml/reader.py
- yaml/representer.py
- yaml/resolver.py
- yaml/scanner.py
- yaml/serializer.py
- yaml/tokens.py
+ yaml/__init__.py
+ yaml/composer.py
+ yaml/constructor.py
+ yaml/cyaml.py
+ yaml/dumper.py
+ yaml/emitter.py
+ yaml/error.py
+ yaml/events.py
+ yaml/loader.py
+ yaml/nodes.py
+ yaml/parser.py
+ yaml/reader.py
+ yaml/representer.py
+ yaml/resolver.py
+ yaml/scanner.py
+ yaml/serializer.py
+ yaml/tokens.py
CYTHON_C
yaml/_yaml.pyx
-)
-
+)
+
RESOURCE_FILES(
PREFIX contrib/python/PyYAML/
.dist-info/METADATA
@@ -48,4 +48,4 @@ NO_LINT()
NO_COMPILER_WARNINGS()
-END()
+END()
diff --git a/contrib/python/PyYAML/py2/yaml/__init__.py b/contrib/python/PyYAML/py2/yaml/__init__.py
index fcdf708064..3c988198d5 100644
--- a/contrib/python/PyYAML/py2/yaml/__init__.py
+++ b/contrib/python/PyYAML/py2/yaml/__init__.py
@@ -1,21 +1,21 @@
-
-from error import *
-
-from tokens import *
-from events import *
-from nodes import *
-
-from loader import *
-from dumper import *
-
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
__version__ = '5.4.1'
-
-try:
- from cyaml import *
- __with_libyaml__ = True
-except ImportError:
- __with_libyaml__ = False
-
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
#------------------------------------------------------------------------------
# Warnings control
@@ -55,82 +55,82 @@ def load_warning(method):
warnings.warn(message, YAMLLoadWarning, stacklevel=3)
#------------------------------------------------------------------------------
-def scan(stream, Loader=Loader):
- """
- Scan a YAML stream and produce scanning tokens.
- """
- loader = Loader(stream)
- try:
- while loader.check_token():
- yield loader.get_token()
- finally:
- loader.dispose()
-
-def parse(stream, Loader=Loader):
- """
- Parse a YAML stream and produce parsing events.
- """
- loader = Loader(stream)
- try:
- while loader.check_event():
- yield loader.get_event()
- finally:
- loader.dispose()
-
-def compose(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding representation tree.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_node()
- finally:
- loader.dispose()
-
-def compose_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding representation trees.
- """
- loader = Loader(stream)
- try:
- while loader.check_node():
- yield loader.get_node()
- finally:
- loader.dispose()
-
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
def load(stream, Loader=None):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- """
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
if Loader is None:
load_warning('load')
Loader = FullLoader
- loader = Loader(stream)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
def load_all(stream, Loader=None):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- """
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
if Loader is None:
load_warning('load_all')
Loader = FullLoader
- loader = Loader(stream)
- try:
- while loader.check_data():
- yield loader.get_data()
- finally:
- loader.dispose()
-
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
def full_load(stream):
"""
Parse the first YAML document in a stream
@@ -151,26 +151,26 @@ def full_load_all(stream):
"""
return load_all(stream, FullLoader)
-def safe_load(stream):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
Resolve only basic YAML tags. This is known
to be safe for untrusted input.
- """
- return load(stream, SafeLoader)
-
-def safe_load_all(stream):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
Resolve only basic YAML tags. This is known
to be safe for untrusted input.
- """
- return load_all(stream, SafeLoader)
-
+ """
+ return load_all(stream, SafeLoader)
+
def unsafe_load(stream):
"""
Parse the first YAML document in a stream
@@ -191,241 +191,241 @@ def unsafe_load_all(stream):
"""
return load_all(stream, UnsafeLoader)
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- """
- Emit YAML parsing events into a stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- from StringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- try:
- for event in events:
- dumper.emit(event)
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding='utf-8', explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of representation trees into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- from StringIO import StringIO
- else:
- from cStringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a representation tree into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
default_style=None, default_flow_style=False,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding='utf-8', explicit_start=None, explicit_end=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
- """
- Serialize a sequence of Python objects into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- from StringIO import StringIO
- else:
- from cStringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
- try:
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a Python object into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
- """
- Serialize a sequence of Python objects into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
- """
- Serialize a Python object into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
Loader=None, Dumper=Dumper):
- """
- Add an implicit scalar detector.
- If an implicit scalar value matches the given regexp,
- the corresponding tag is assigned to the scalar.
- first is a sequence of possible initial characters or None.
- """
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
if Loader is None:
loader.Loader.add_implicit_resolver(tag, regexp, first)
loader.FullLoader.add_implicit_resolver(tag, regexp, first)
loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
else:
Loader.add_implicit_resolver(tag, regexp, first)
- Dumper.add_implicit_resolver(tag, regexp, first)
-
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
- """
- Add a path based resolver for the given tag.
- A path is a list of keys that forms a path
- to a node in the representation tree.
- Keys can be string values, integers, or None.
- """
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
if Loader is None:
loader.Loader.add_path_resolver(tag, path, kind)
loader.FullLoader.add_path_resolver(tag, path, kind)
loader.UnsafeLoader.add_path_resolver(tag, path, kind)
else:
Loader.add_path_resolver(tag, path, kind)
- Dumper.add_path_resolver(tag, path, kind)
-
+ Dumper.add_path_resolver(tag, path, kind)
+
def add_constructor(tag, constructor, Loader=None):
- """
- Add a constructor for the given tag.
- Constructor is a function that accepts a Loader instance
- and a node object and produces the corresponding Python object.
- """
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
if Loader is None:
loader.Loader.add_constructor(tag, constructor)
loader.FullLoader.add_constructor(tag, constructor)
loader.UnsafeLoader.add_constructor(tag, constructor)
else:
Loader.add_constructor(tag, constructor)
-
+
def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
- """
- Add a multi-constructor for the given tag prefix.
- Multi-constructor is called for a node if its tag starts with tag_prefix.
- Multi-constructor accepts a Loader instance, a tag suffix,
- and a node object and produces the corresponding Python object.
- """
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
if Loader is None:
loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
else:
Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Representer is a function accepting a Dumper instance
- and an instance of the given data type
- and producing the corresponding representation node.
- """
- Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Multi-representer is a function accepting a Dumper instance
- and an instance of the given data type or subtype
- and producing the corresponding representation node.
- """
- Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
- """
- The metaclass for YAMLObject.
- """
- def __init__(cls, name, bases, kwds):
- super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
- if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
if isinstance(cls.yaml_loader, list):
for loader in cls.yaml_loader:
loader.add_constructor(cls.yaml_tag, cls.from_yaml)
else:
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
- cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(object):
- """
- An object that can dump itself to a YAML stream
- and load itself from a YAML stream.
- """
-
- __metaclass__ = YAMLObjectMetaclass
- __slots__ = () # no direct instantiation, so allow immutable subclasses
-
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
yaml_loader = [Loader, FullLoader, UnsafeLoader]
- yaml_dumper = Dumper
-
- yaml_tag = None
- yaml_flow_style = None
-
- def from_yaml(cls, loader, node):
- """
- Convert a representation node to a Python object.
- """
- return loader.construct_yaml_object(node, cls)
- from_yaml = classmethod(from_yaml)
-
- def to_yaml(cls, dumper, data):
- """
- Convert a Python object to a representation node.
- """
- return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
- to_yaml = classmethod(to_yaml)
-
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/contrib/python/PyYAML/py2/yaml/_yaml.h b/contrib/python/PyYAML/py2/yaml/_yaml.h
index 622cfc561a..21fd6a991b 100644
--- a/contrib/python/PyYAML/py2/yaml/_yaml.h
+++ b/contrib/python/PyYAML/py2/yaml/_yaml.h
@@ -1,23 +1,23 @@
-
-#include <yaml.h>
-
-#if PY_MAJOR_VERSION < 3
-
+
+#include <yaml.h>
+
+#if PY_MAJOR_VERSION < 3
+
#define PyUnicode_FromString(s) PyUnicode_DecodeUTF8((s), strlen(s), "strict")
-
-#else
-
-#define PyString_CheckExact PyBytes_CheckExact
-#define PyString_AS_STRING PyBytes_AS_STRING
-#define PyString_GET_SIZE PyBytes_GET_SIZE
-#define PyString_FromStringAndSize PyBytes_FromStringAndSize
-
-#endif
-
-#ifdef _MSC_VER /* MS Visual C++ 6.0 */
-#if _MSC_VER == 1200
-
-#define PyLong_FromUnsignedLongLong(z) PyInt_FromLong(i)
-
-#endif
-#endif
+
+#else
+
+#define PyString_CheckExact PyBytes_CheckExact
+#define PyString_AS_STRING PyBytes_AS_STRING
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define PyString_FromStringAndSize PyBytes_FromStringAndSize
+
+#endif
+
+#ifdef _MSC_VER /* MS Visual C++ 6.0 */
+#if _MSC_VER == 1200
+
+#define PyLong_FromUnsignedLongLong(z) PyInt_FromLong(i)
+
+#endif
+#endif
diff --git a/contrib/python/PyYAML/py2/yaml/_yaml.pxd b/contrib/python/PyYAML/py2/yaml/_yaml.pxd
index b6629e26ae..7937c9db51 100644
--- a/contrib/python/PyYAML/py2/yaml/_yaml.pxd
+++ b/contrib/python/PyYAML/py2/yaml/_yaml.pxd
@@ -1,251 +1,251 @@
-
+
cdef extern from "_yaml.h":
-
- void malloc(int l)
- void memcpy(char *d, char *s, int l)
- int strlen(char *s)
- int PyString_CheckExact(object o)
- int PyUnicode_CheckExact(object o)
- char *PyString_AS_STRING(object o)
- int PyString_GET_SIZE(object o)
- object PyString_FromStringAndSize(char *v, int l)
- object PyUnicode_FromString(char *u)
- object PyUnicode_DecodeUTF8(char *u, int s, char *e)
- object PyUnicode_AsUTF8String(object o)
- int PY_MAJOR_VERSION
-
- ctypedef enum:
- SIZEOF_VOID_P
- ctypedef enum yaml_encoding_t:
- YAML_ANY_ENCODING
- YAML_UTF8_ENCODING
- YAML_UTF16LE_ENCODING
- YAML_UTF16BE_ENCODING
- ctypedef enum yaml_break_t:
- YAML_ANY_BREAK
- YAML_CR_BREAK
- YAML_LN_BREAK
- YAML_CRLN_BREAK
- ctypedef enum yaml_error_type_t:
- YAML_NO_ERROR
- YAML_MEMORY_ERROR
- YAML_READER_ERROR
- YAML_SCANNER_ERROR
- YAML_PARSER_ERROR
- YAML_WRITER_ERROR
- YAML_EMITTER_ERROR
- ctypedef enum yaml_scalar_style_t:
- YAML_ANY_SCALAR_STYLE
- YAML_PLAIN_SCALAR_STYLE
- YAML_SINGLE_QUOTED_SCALAR_STYLE
- YAML_DOUBLE_QUOTED_SCALAR_STYLE
- YAML_LITERAL_SCALAR_STYLE
- YAML_FOLDED_SCALAR_STYLE
- ctypedef enum yaml_sequence_style_t:
- YAML_ANY_SEQUENCE_STYLE
- YAML_BLOCK_SEQUENCE_STYLE
- YAML_FLOW_SEQUENCE_STYLE
- ctypedef enum yaml_mapping_style_t:
- YAML_ANY_MAPPING_STYLE
- YAML_BLOCK_MAPPING_STYLE
- YAML_FLOW_MAPPING_STYLE
- ctypedef enum yaml_token_type_t:
- YAML_NO_TOKEN
- YAML_STREAM_START_TOKEN
- YAML_STREAM_END_TOKEN
- YAML_VERSION_DIRECTIVE_TOKEN
- YAML_TAG_DIRECTIVE_TOKEN
- YAML_DOCUMENT_START_TOKEN
- YAML_DOCUMENT_END_TOKEN
- YAML_BLOCK_SEQUENCE_START_TOKEN
- YAML_BLOCK_MAPPING_START_TOKEN
- YAML_BLOCK_END_TOKEN
- YAML_FLOW_SEQUENCE_START_TOKEN
- YAML_FLOW_SEQUENCE_END_TOKEN
- YAML_FLOW_MAPPING_START_TOKEN
- YAML_FLOW_MAPPING_END_TOKEN
- YAML_BLOCK_ENTRY_TOKEN
- YAML_FLOW_ENTRY_TOKEN
- YAML_KEY_TOKEN
- YAML_VALUE_TOKEN
- YAML_ALIAS_TOKEN
- YAML_ANCHOR_TOKEN
- YAML_TAG_TOKEN
- YAML_SCALAR_TOKEN
- ctypedef enum yaml_event_type_t:
- YAML_NO_EVENT
- YAML_STREAM_START_EVENT
- YAML_STREAM_END_EVENT
- YAML_DOCUMENT_START_EVENT
- YAML_DOCUMENT_END_EVENT
- YAML_ALIAS_EVENT
- YAML_SCALAR_EVENT
- YAML_SEQUENCE_START_EVENT
- YAML_SEQUENCE_END_EVENT
- YAML_MAPPING_START_EVENT
- YAML_MAPPING_END_EVENT
-
+
+ void malloc(int l)
+ void memcpy(char *d, char *s, int l)
+ int strlen(char *s)
+ int PyString_CheckExact(object o)
+ int PyUnicode_CheckExact(object o)
+ char *PyString_AS_STRING(object o)
+ int PyString_GET_SIZE(object o)
+ object PyString_FromStringAndSize(char *v, int l)
+ object PyUnicode_FromString(char *u)
+ object PyUnicode_DecodeUTF8(char *u, int s, char *e)
+ object PyUnicode_AsUTF8String(object o)
+ int PY_MAJOR_VERSION
+
+ ctypedef enum:
+ SIZEOF_VOID_P
+ ctypedef enum yaml_encoding_t:
+ YAML_ANY_ENCODING
+ YAML_UTF8_ENCODING
+ YAML_UTF16LE_ENCODING
+ YAML_UTF16BE_ENCODING
+ ctypedef enum yaml_break_t:
+ YAML_ANY_BREAK
+ YAML_CR_BREAK
+ YAML_LN_BREAK
+ YAML_CRLN_BREAK
+ ctypedef enum yaml_error_type_t:
+ YAML_NO_ERROR
+ YAML_MEMORY_ERROR
+ YAML_READER_ERROR
+ YAML_SCANNER_ERROR
+ YAML_PARSER_ERROR
+ YAML_WRITER_ERROR
+ YAML_EMITTER_ERROR
+ ctypedef enum yaml_scalar_style_t:
+ YAML_ANY_SCALAR_STYLE
+ YAML_PLAIN_SCALAR_STYLE
+ YAML_SINGLE_QUOTED_SCALAR_STYLE
+ YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ YAML_LITERAL_SCALAR_STYLE
+ YAML_FOLDED_SCALAR_STYLE
+ ctypedef enum yaml_sequence_style_t:
+ YAML_ANY_SEQUENCE_STYLE
+ YAML_BLOCK_SEQUENCE_STYLE
+ YAML_FLOW_SEQUENCE_STYLE
+ ctypedef enum yaml_mapping_style_t:
+ YAML_ANY_MAPPING_STYLE
+ YAML_BLOCK_MAPPING_STYLE
+ YAML_FLOW_MAPPING_STYLE
+ ctypedef enum yaml_token_type_t:
+ YAML_NO_TOKEN
+ YAML_STREAM_START_TOKEN
+ YAML_STREAM_END_TOKEN
+ YAML_VERSION_DIRECTIVE_TOKEN
+ YAML_TAG_DIRECTIVE_TOKEN
+ YAML_DOCUMENT_START_TOKEN
+ YAML_DOCUMENT_END_TOKEN
+ YAML_BLOCK_SEQUENCE_START_TOKEN
+ YAML_BLOCK_MAPPING_START_TOKEN
+ YAML_BLOCK_END_TOKEN
+ YAML_FLOW_SEQUENCE_START_TOKEN
+ YAML_FLOW_SEQUENCE_END_TOKEN
+ YAML_FLOW_MAPPING_START_TOKEN
+ YAML_FLOW_MAPPING_END_TOKEN
+ YAML_BLOCK_ENTRY_TOKEN
+ YAML_FLOW_ENTRY_TOKEN
+ YAML_KEY_TOKEN
+ YAML_VALUE_TOKEN
+ YAML_ALIAS_TOKEN
+ YAML_ANCHOR_TOKEN
+ YAML_TAG_TOKEN
+ YAML_SCALAR_TOKEN
+ ctypedef enum yaml_event_type_t:
+ YAML_NO_EVENT
+ YAML_STREAM_START_EVENT
+ YAML_STREAM_END_EVENT
+ YAML_DOCUMENT_START_EVENT
+ YAML_DOCUMENT_END_EVENT
+ YAML_ALIAS_EVENT
+ YAML_SCALAR_EVENT
+ YAML_SEQUENCE_START_EVENT
+ YAML_SEQUENCE_END_EVENT
+ YAML_MAPPING_START_EVENT
+ YAML_MAPPING_END_EVENT
+
ctypedef int yaml_read_handler_t(void *data, char *buffer,
- size_t size, size_t *size_read) except 0
-
+ size_t size, size_t *size_read) except 0
+
ctypedef int yaml_write_handler_t(void *data, char *buffer,
- size_t size) except 0
-
- ctypedef struct yaml_mark_t:
+ size_t size) except 0
+
+ ctypedef struct yaml_mark_t:
size_t index
size_t line
size_t column
- ctypedef struct yaml_version_directive_t:
- int major
- int minor
- ctypedef struct yaml_tag_directive_t:
+ ctypedef struct yaml_version_directive_t:
+ int major
+ int minor
+ ctypedef struct yaml_tag_directive_t:
char *handle
char *prefix
-
- ctypedef struct _yaml_token_stream_start_data_t:
- yaml_encoding_t encoding
- ctypedef struct _yaml_token_alias_data_t:
- char *value
- ctypedef struct _yaml_token_anchor_data_t:
- char *value
- ctypedef struct _yaml_token_tag_data_t:
+
+ ctypedef struct _yaml_token_stream_start_data_t:
+ yaml_encoding_t encoding
+ ctypedef struct _yaml_token_alias_data_t:
+ char *value
+ ctypedef struct _yaml_token_anchor_data_t:
+ char *value
+ ctypedef struct _yaml_token_tag_data_t:
char *handle
char *suffix
- ctypedef struct _yaml_token_scalar_data_t:
- char *value
+ ctypedef struct _yaml_token_scalar_data_t:
+ char *value
size_t length
- yaml_scalar_style_t style
- ctypedef struct _yaml_token_version_directive_data_t:
- int major
- int minor
- ctypedef struct _yaml_token_tag_directive_data_t:
+ yaml_scalar_style_t style
+ ctypedef struct _yaml_token_version_directive_data_t:
+ int major
+ int minor
+ ctypedef struct _yaml_token_tag_directive_data_t:
char *handle
char *prefix
- ctypedef union _yaml_token_data_t:
- _yaml_token_stream_start_data_t stream_start
- _yaml_token_alias_data_t alias
- _yaml_token_anchor_data_t anchor
- _yaml_token_tag_data_t tag
- _yaml_token_scalar_data_t scalar
- _yaml_token_version_directive_data_t version_directive
- _yaml_token_tag_directive_data_t tag_directive
- ctypedef struct yaml_token_t:
- yaml_token_type_t type
- _yaml_token_data_t data
- yaml_mark_t start_mark
- yaml_mark_t end_mark
-
- ctypedef struct _yaml_event_stream_start_data_t:
- yaml_encoding_t encoding
- ctypedef struct _yaml_event_document_start_data_tag_directives_t:
- yaml_tag_directive_t *start
- yaml_tag_directive_t *end
- ctypedef struct _yaml_event_document_start_data_t:
- yaml_version_directive_t *version_directive
- _yaml_event_document_start_data_tag_directives_t tag_directives
- int implicit
- ctypedef struct _yaml_event_document_end_data_t:
- int implicit
- ctypedef struct _yaml_event_alias_data_t:
- char *anchor
- ctypedef struct _yaml_event_scalar_data_t:
- char *anchor
- char *tag
- char *value
+ ctypedef union _yaml_token_data_t:
+ _yaml_token_stream_start_data_t stream_start
+ _yaml_token_alias_data_t alias
+ _yaml_token_anchor_data_t anchor
+ _yaml_token_tag_data_t tag
+ _yaml_token_scalar_data_t scalar
+ _yaml_token_version_directive_data_t version_directive
+ _yaml_token_tag_directive_data_t tag_directive
+ ctypedef struct yaml_token_t:
+ yaml_token_type_t type
+ _yaml_token_data_t data
+ yaml_mark_t start_mark
+ yaml_mark_t end_mark
+
+ ctypedef struct _yaml_event_stream_start_data_t:
+ yaml_encoding_t encoding
+ ctypedef struct _yaml_event_document_start_data_tag_directives_t:
+ yaml_tag_directive_t *start
+ yaml_tag_directive_t *end
+ ctypedef struct _yaml_event_document_start_data_t:
+ yaml_version_directive_t *version_directive
+ _yaml_event_document_start_data_tag_directives_t tag_directives
+ int implicit
+ ctypedef struct _yaml_event_document_end_data_t:
+ int implicit
+ ctypedef struct _yaml_event_alias_data_t:
+ char *anchor
+ ctypedef struct _yaml_event_scalar_data_t:
+ char *anchor
+ char *tag
+ char *value
size_t length
- int plain_implicit
- int quoted_implicit
- yaml_scalar_style_t style
- ctypedef struct _yaml_event_sequence_start_data_t:
- char *anchor
- char *tag
- int implicit
- yaml_sequence_style_t style
- ctypedef struct _yaml_event_mapping_start_data_t:
- char *anchor
- char *tag
- int implicit
- yaml_mapping_style_t style
- ctypedef union _yaml_event_data_t:
- _yaml_event_stream_start_data_t stream_start
- _yaml_event_document_start_data_t document_start
- _yaml_event_document_end_data_t document_end
- _yaml_event_alias_data_t alias
- _yaml_event_scalar_data_t scalar
- _yaml_event_sequence_start_data_t sequence_start
- _yaml_event_mapping_start_data_t mapping_start
- ctypedef struct yaml_event_t:
- yaml_event_type_t type
- _yaml_event_data_t data
- yaml_mark_t start_mark
- yaml_mark_t end_mark
-
- ctypedef struct yaml_parser_t:
- yaml_error_type_t error
- char *problem
+ int plain_implicit
+ int quoted_implicit
+ yaml_scalar_style_t style
+ ctypedef struct _yaml_event_sequence_start_data_t:
+ char *anchor
+ char *tag
+ int implicit
+ yaml_sequence_style_t style
+ ctypedef struct _yaml_event_mapping_start_data_t:
+ char *anchor
+ char *tag
+ int implicit
+ yaml_mapping_style_t style
+ ctypedef union _yaml_event_data_t:
+ _yaml_event_stream_start_data_t stream_start
+ _yaml_event_document_start_data_t document_start
+ _yaml_event_document_end_data_t document_end
+ _yaml_event_alias_data_t alias
+ _yaml_event_scalar_data_t scalar
+ _yaml_event_sequence_start_data_t sequence_start
+ _yaml_event_mapping_start_data_t mapping_start
+ ctypedef struct yaml_event_t:
+ yaml_event_type_t type
+ _yaml_event_data_t data
+ yaml_mark_t start_mark
+ yaml_mark_t end_mark
+
+ ctypedef struct yaml_parser_t:
+ yaml_error_type_t error
+ char *problem
size_t problem_offset
- int problem_value
- yaml_mark_t problem_mark
- char *context
- yaml_mark_t context_mark
-
- ctypedef struct yaml_emitter_t:
- yaml_error_type_t error
- char *problem
-
- char *yaml_get_version_string()
- void yaml_get_version(int *major, int *minor, int *patch)
-
- void yaml_token_delete(yaml_token_t *token)
-
- int yaml_stream_start_event_initialize(yaml_event_t *event,
- yaml_encoding_t encoding)
- int yaml_stream_end_event_initialize(yaml_event_t *event)
- int yaml_document_start_event_initialize(yaml_event_t *event,
- yaml_version_directive_t *version_directive,
- yaml_tag_directive_t *tag_directives_start,
- yaml_tag_directive_t *tag_directives_end,
- int implicit)
- int yaml_document_end_event_initialize(yaml_event_t *event,
- int implicit)
+ int problem_value
+ yaml_mark_t problem_mark
+ char *context
+ yaml_mark_t context_mark
+
+ ctypedef struct yaml_emitter_t:
+ yaml_error_type_t error
+ char *problem
+
+ char *yaml_get_version_string()
+ void yaml_get_version(int *major, int *minor, int *patch)
+
+ void yaml_token_delete(yaml_token_t *token)
+
+ int yaml_stream_start_event_initialize(yaml_event_t *event,
+ yaml_encoding_t encoding)
+ int yaml_stream_end_event_initialize(yaml_event_t *event)
+ int yaml_document_start_event_initialize(yaml_event_t *event,
+ yaml_version_directive_t *version_directive,
+ yaml_tag_directive_t *tag_directives_start,
+ yaml_tag_directive_t *tag_directives_end,
+ int implicit)
+ int yaml_document_end_event_initialize(yaml_event_t *event,
+ int implicit)
int yaml_alias_event_initialize(yaml_event_t *event, char *anchor)
- int yaml_scalar_event_initialize(yaml_event_t *event,
+ int yaml_scalar_event_initialize(yaml_event_t *event,
char *anchor, char *tag, char *value, size_t length,
- int plain_implicit, int quoted_implicit,
- yaml_scalar_style_t style)
- int yaml_sequence_start_event_initialize(yaml_event_t *event,
+ int plain_implicit, int quoted_implicit,
+ yaml_scalar_style_t style)
+ int yaml_sequence_start_event_initialize(yaml_event_t *event,
char *anchor, char *tag, int implicit, yaml_sequence_style_t style)
- int yaml_sequence_end_event_initialize(yaml_event_t *event)
- int yaml_mapping_start_event_initialize(yaml_event_t *event,
+ int yaml_sequence_end_event_initialize(yaml_event_t *event)
+ int yaml_mapping_start_event_initialize(yaml_event_t *event,
char *anchor, char *tag, int implicit, yaml_mapping_style_t style)
- int yaml_mapping_end_event_initialize(yaml_event_t *event)
- void yaml_event_delete(yaml_event_t *event)
-
- int yaml_parser_initialize(yaml_parser_t *parser)
- void yaml_parser_delete(yaml_parser_t *parser)
- void yaml_parser_set_input_string(yaml_parser_t *parser,
+ int yaml_mapping_end_event_initialize(yaml_event_t *event)
+ void yaml_event_delete(yaml_event_t *event)
+
+ int yaml_parser_initialize(yaml_parser_t *parser)
+ void yaml_parser_delete(yaml_parser_t *parser)
+ void yaml_parser_set_input_string(yaml_parser_t *parser,
char *input, size_t size)
- void yaml_parser_set_input(yaml_parser_t *parser,
- yaml_read_handler_t *handler, void *data)
- void yaml_parser_set_encoding(yaml_parser_t *parser,
- yaml_encoding_t encoding)
- int yaml_parser_scan(yaml_parser_t *parser, yaml_token_t *token) except *
- int yaml_parser_parse(yaml_parser_t *parser, yaml_event_t *event) except *
-
- int yaml_emitter_initialize(yaml_emitter_t *emitter)
- void yaml_emitter_delete(yaml_emitter_t *emitter)
- void yaml_emitter_set_output_string(yaml_emitter_t *emitter,
+ void yaml_parser_set_input(yaml_parser_t *parser,
+ yaml_read_handler_t *handler, void *data)
+ void yaml_parser_set_encoding(yaml_parser_t *parser,
+ yaml_encoding_t encoding)
+ int yaml_parser_scan(yaml_parser_t *parser, yaml_token_t *token) except *
+ int yaml_parser_parse(yaml_parser_t *parser, yaml_event_t *event) except *
+
+ int yaml_emitter_initialize(yaml_emitter_t *emitter)
+ void yaml_emitter_delete(yaml_emitter_t *emitter)
+ void yaml_emitter_set_output_string(yaml_emitter_t *emitter,
char *output, size_t size, size_t *size_written)
- void yaml_emitter_set_output(yaml_emitter_t *emitter,
- yaml_write_handler_t *handler, void *data)
- void yaml_emitter_set_encoding(yaml_emitter_t *emitter,
- yaml_encoding_t encoding)
- void yaml_emitter_set_canonical(yaml_emitter_t *emitter, int canonical)
- void yaml_emitter_set_indent(yaml_emitter_t *emitter, int indent)
- void yaml_emitter_set_width(yaml_emitter_t *emitter, int width)
- void yaml_emitter_set_unicode(yaml_emitter_t *emitter, int unicode)
- void yaml_emitter_set_break(yaml_emitter_t *emitter,
- yaml_break_t line_break)
- int yaml_emitter_emit(yaml_emitter_t *emitter, yaml_event_t *event) except *
- int yaml_emitter_flush(yaml_emitter_t *emitter)
-
+ void yaml_emitter_set_output(yaml_emitter_t *emitter,
+ yaml_write_handler_t *handler, void *data)
+ void yaml_emitter_set_encoding(yaml_emitter_t *emitter,
+ yaml_encoding_t encoding)
+ void yaml_emitter_set_canonical(yaml_emitter_t *emitter, int canonical)
+ void yaml_emitter_set_indent(yaml_emitter_t *emitter, int indent)
+ void yaml_emitter_set_width(yaml_emitter_t *emitter, int width)
+ void yaml_emitter_set_unicode(yaml_emitter_t *emitter, int unicode)
+ void yaml_emitter_set_break(yaml_emitter_t *emitter,
+ yaml_break_t line_break)
+ int yaml_emitter_emit(yaml_emitter_t *emitter, yaml_event_t *event) except *
+ int yaml_emitter_flush(yaml_emitter_t *emitter)
+
diff --git a/contrib/python/PyYAML/py2/yaml/_yaml.pyx b/contrib/python/PyYAML/py2/yaml/_yaml.pyx
index efdb347aff..ff4efe80b5 100644
--- a/contrib/python/PyYAML/py2/yaml/_yaml.pyx
+++ b/contrib/python/PyYAML/py2/yaml/_yaml.pyx
@@ -1,1527 +1,1527 @@
-
-import yaml
-
-def get_version_string():
+
+import yaml
+
+def get_version_string():
cdef char *value
- value = yaml_get_version_string()
- if PY_MAJOR_VERSION < 3:
- return value
- else:
- return PyUnicode_FromString(value)
-
-def get_version():
- cdef int major, minor, patch
- yaml_get_version(&major, &minor, &patch)
- return (major, minor, patch)
-
-#Mark = yaml.error.Mark
-YAMLError = yaml.error.YAMLError
-ReaderError = yaml.reader.ReaderError
-ScannerError = yaml.scanner.ScannerError
-ParserError = yaml.parser.ParserError
-ComposerError = yaml.composer.ComposerError
-ConstructorError = yaml.constructor.ConstructorError
-EmitterError = yaml.emitter.EmitterError
-SerializerError = yaml.serializer.SerializerError
-RepresenterError = yaml.representer.RepresenterError
-
-StreamStartToken = yaml.tokens.StreamStartToken
-StreamEndToken = yaml.tokens.StreamEndToken
-DirectiveToken = yaml.tokens.DirectiveToken
-DocumentStartToken = yaml.tokens.DocumentStartToken
-DocumentEndToken = yaml.tokens.DocumentEndToken
-BlockSequenceStartToken = yaml.tokens.BlockSequenceStartToken
-BlockMappingStartToken = yaml.tokens.BlockMappingStartToken
-BlockEndToken = yaml.tokens.BlockEndToken
-FlowSequenceStartToken = yaml.tokens.FlowSequenceStartToken
-FlowMappingStartToken = yaml.tokens.FlowMappingStartToken
-FlowSequenceEndToken = yaml.tokens.FlowSequenceEndToken
-FlowMappingEndToken = yaml.tokens.FlowMappingEndToken
-KeyToken = yaml.tokens.KeyToken
-ValueToken = yaml.tokens.ValueToken
-BlockEntryToken = yaml.tokens.BlockEntryToken
-FlowEntryToken = yaml.tokens.FlowEntryToken
-AliasToken = yaml.tokens.AliasToken
-AnchorToken = yaml.tokens.AnchorToken
-TagToken = yaml.tokens.TagToken
-ScalarToken = yaml.tokens.ScalarToken
-
-StreamStartEvent = yaml.events.StreamStartEvent
-StreamEndEvent = yaml.events.StreamEndEvent
-DocumentStartEvent = yaml.events.DocumentStartEvent
-DocumentEndEvent = yaml.events.DocumentEndEvent
-AliasEvent = yaml.events.AliasEvent
-ScalarEvent = yaml.events.ScalarEvent
-SequenceStartEvent = yaml.events.SequenceStartEvent
-SequenceEndEvent = yaml.events.SequenceEndEvent
-MappingStartEvent = yaml.events.MappingStartEvent
-MappingEndEvent = yaml.events.MappingEndEvent
-
-ScalarNode = yaml.nodes.ScalarNode
-SequenceNode = yaml.nodes.SequenceNode
-MappingNode = yaml.nodes.MappingNode
-
-cdef class Mark:
- cdef readonly object name
+ value = yaml_get_version_string()
+ if PY_MAJOR_VERSION < 3:
+ return value
+ else:
+ return PyUnicode_FromString(value)
+
+def get_version():
+ cdef int major, minor, patch
+ yaml_get_version(&major, &minor, &patch)
+ return (major, minor, patch)
+
+#Mark = yaml.error.Mark
+YAMLError = yaml.error.YAMLError
+ReaderError = yaml.reader.ReaderError
+ScannerError = yaml.scanner.ScannerError
+ParserError = yaml.parser.ParserError
+ComposerError = yaml.composer.ComposerError
+ConstructorError = yaml.constructor.ConstructorError
+EmitterError = yaml.emitter.EmitterError
+SerializerError = yaml.serializer.SerializerError
+RepresenterError = yaml.representer.RepresenterError
+
+StreamStartToken = yaml.tokens.StreamStartToken
+StreamEndToken = yaml.tokens.StreamEndToken
+DirectiveToken = yaml.tokens.DirectiveToken
+DocumentStartToken = yaml.tokens.DocumentStartToken
+DocumentEndToken = yaml.tokens.DocumentEndToken
+BlockSequenceStartToken = yaml.tokens.BlockSequenceStartToken
+BlockMappingStartToken = yaml.tokens.BlockMappingStartToken
+BlockEndToken = yaml.tokens.BlockEndToken
+FlowSequenceStartToken = yaml.tokens.FlowSequenceStartToken
+FlowMappingStartToken = yaml.tokens.FlowMappingStartToken
+FlowSequenceEndToken = yaml.tokens.FlowSequenceEndToken
+FlowMappingEndToken = yaml.tokens.FlowMappingEndToken
+KeyToken = yaml.tokens.KeyToken
+ValueToken = yaml.tokens.ValueToken
+BlockEntryToken = yaml.tokens.BlockEntryToken
+FlowEntryToken = yaml.tokens.FlowEntryToken
+AliasToken = yaml.tokens.AliasToken
+AnchorToken = yaml.tokens.AnchorToken
+TagToken = yaml.tokens.TagToken
+ScalarToken = yaml.tokens.ScalarToken
+
+StreamStartEvent = yaml.events.StreamStartEvent
+StreamEndEvent = yaml.events.StreamEndEvent
+DocumentStartEvent = yaml.events.DocumentStartEvent
+DocumentEndEvent = yaml.events.DocumentEndEvent
+AliasEvent = yaml.events.AliasEvent
+ScalarEvent = yaml.events.ScalarEvent
+SequenceStartEvent = yaml.events.SequenceStartEvent
+SequenceEndEvent = yaml.events.SequenceEndEvent
+MappingStartEvent = yaml.events.MappingStartEvent
+MappingEndEvent = yaml.events.MappingEndEvent
+
+ScalarNode = yaml.nodes.ScalarNode
+SequenceNode = yaml.nodes.SequenceNode
+MappingNode = yaml.nodes.MappingNode
+
+cdef class Mark:
+ cdef readonly object name
cdef readonly size_t index
cdef readonly size_t line
cdef readonly size_t column
- cdef readonly buffer
- cdef readonly pointer
-
+ cdef readonly buffer
+ cdef readonly pointer
+
def __init__(self, object name, size_t index, size_t line, size_t column,
- object buffer, object pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self):
- return None
-
- def __str__(self):
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- return where
-
-#class YAMLError(Exception):
-# pass
-#
-#class MarkedYAMLError(YAMLError):
-#
-# def __init__(self, context=None, context_mark=None,
-# problem=None, problem_mark=None, note=None):
-# self.context = context
-# self.context_mark = context_mark
-# self.problem = problem
-# self.problem_mark = problem_mark
-# self.note = note
-#
-# def __str__(self):
-# lines = []
-# if self.context is not None:
-# lines.append(self.context)
-# if self.context_mark is not None \
-# and (self.problem is None or self.problem_mark is None
-# or self.context_mark.name != self.problem_mark.name
-# or self.context_mark.line != self.problem_mark.line
-# or self.context_mark.column != self.problem_mark.column):
-# lines.append(str(self.context_mark))
-# if self.problem is not None:
-# lines.append(self.problem)
-# if self.problem_mark is not None:
-# lines.append(str(self.problem_mark))
-# if self.note is not None:
-# lines.append(self.note)
-# return '\n'.join(lines)
-#
-#class ReaderError(YAMLError):
-#
-# def __init__(self, name, position, character, encoding, reason):
-# self.name = name
-# self.character = character
-# self.position = position
-# self.encoding = encoding
-# self.reason = reason
-#
-# def __str__(self):
-# if isinstance(self.character, str):
-# return "'%s' codec can't decode byte #x%02x: %s\n" \
-# " in \"%s\", position %d" \
-# % (self.encoding, ord(self.character), self.reason,
-# self.name, self.position)
-# else:
-# return "unacceptable character #x%04x: %s\n" \
-# " in \"%s\", position %d" \
-# % (ord(self.character), self.reason,
-# self.name, self.position)
-#
-#class ScannerError(MarkedYAMLError):
-# pass
-#
-#class ParserError(MarkedYAMLError):
-# pass
-#
-#class EmitterError(YAMLError):
-# pass
-#
-#cdef class Token:
-# cdef readonly Mark start_mark
-# cdef readonly Mark end_mark
-# def __init__(self, Mark start_mark, Mark end_mark):
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-#
-#cdef class StreamStartToken(Token):
-# cdef readonly object encoding
-# def __init__(self, Mark start_mark, Mark end_mark, encoding):
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-# self.encoding = encoding
-#
-#cdef class StreamEndToken(Token):
-# pass
-#
-#cdef class DirectiveToken(Token):
-# cdef readonly object name
-# cdef readonly object value
-# def __init__(self, name, value, Mark start_mark, Mark end_mark):
-# self.name = name
-# self.value = value
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-#
-#cdef class DocumentStartToken(Token):
-# pass
-#
-#cdef class DocumentEndToken(Token):
-# pass
-#
-#cdef class BlockSequenceStartToken(Token):
-# pass
-#
-#cdef class BlockMappingStartToken(Token):
-# pass
-#
-#cdef class BlockEndToken(Token):
-# pass
-#
-#cdef class FlowSequenceStartToken(Token):
-# pass
-#
-#cdef class FlowMappingStartToken(Token):
-# pass
-#
-#cdef class FlowSequenceEndToken(Token):
-# pass
-#
-#cdef class FlowMappingEndToken(Token):
-# pass
-#
-#cdef class KeyToken(Token):
-# pass
-#
-#cdef class ValueToken(Token):
-# pass
-#
-#cdef class BlockEntryToken(Token):
-# pass
-#
-#cdef class FlowEntryToken(Token):
-# pass
-#
-#cdef class AliasToken(Token):
-# cdef readonly object value
-# def __init__(self, value, Mark start_mark, Mark end_mark):
-# self.value = value
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-#
-#cdef class AnchorToken(Token):
-# cdef readonly object value
-# def __init__(self, value, Mark start_mark, Mark end_mark):
-# self.value = value
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-#
-#cdef class TagToken(Token):
-# cdef readonly object value
-# def __init__(self, value, Mark start_mark, Mark end_mark):
-# self.value = value
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-#
-#cdef class ScalarToken(Token):
-# cdef readonly object value
-# cdef readonly object plain
-# cdef readonly object style
-# def __init__(self, value, plain, Mark start_mark, Mark end_mark, style=None):
-# self.value = value
-# self.plain = plain
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-# self.style = style
-
-cdef class CParser:
-
- cdef yaml_parser_t parser
- cdef yaml_event_t parsed_event
-
- cdef object stream
- cdef object stream_name
- cdef object current_token
- cdef object current_event
- cdef object anchors
- cdef object stream_cache
+ object buffer, object pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self):
+ return None
+
+ def __str__(self):
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ return where
+
+#class YAMLError(Exception):
+# pass
+#
+#class MarkedYAMLError(YAMLError):
+#
+# def __init__(self, context=None, context_mark=None,
+# problem=None, problem_mark=None, note=None):
+# self.context = context
+# self.context_mark = context_mark
+# self.problem = problem
+# self.problem_mark = problem_mark
+# self.note = note
+#
+# def __str__(self):
+# lines = []
+# if self.context is not None:
+# lines.append(self.context)
+# if self.context_mark is not None \
+# and (self.problem is None or self.problem_mark is None
+# or self.context_mark.name != self.problem_mark.name
+# or self.context_mark.line != self.problem_mark.line
+# or self.context_mark.column != self.problem_mark.column):
+# lines.append(str(self.context_mark))
+# if self.problem is not None:
+# lines.append(self.problem)
+# if self.problem_mark is not None:
+# lines.append(str(self.problem_mark))
+# if self.note is not None:
+# lines.append(self.note)
+# return '\n'.join(lines)
+#
+#class ReaderError(YAMLError):
+#
+# def __init__(self, name, position, character, encoding, reason):
+# self.name = name
+# self.character = character
+# self.position = position
+# self.encoding = encoding
+# self.reason = reason
+#
+# def __str__(self):
+# if isinstance(self.character, str):
+# return "'%s' codec can't decode byte #x%02x: %s\n" \
+# " in \"%s\", position %d" \
+# % (self.encoding, ord(self.character), self.reason,
+# self.name, self.position)
+# else:
+# return "unacceptable character #x%04x: %s\n" \
+# " in \"%s\", position %d" \
+# % (ord(self.character), self.reason,
+# self.name, self.position)
+#
+#class ScannerError(MarkedYAMLError):
+# pass
+#
+#class ParserError(MarkedYAMLError):
+# pass
+#
+#class EmitterError(YAMLError):
+# pass
+#
+#cdef class Token:
+# cdef readonly Mark start_mark
+# cdef readonly Mark end_mark
+# def __init__(self, Mark start_mark, Mark end_mark):
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class StreamStartToken(Token):
+# cdef readonly object encoding
+# def __init__(self, Mark start_mark, Mark end_mark, encoding):
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+# self.encoding = encoding
+#
+#cdef class StreamEndToken(Token):
+# pass
+#
+#cdef class DirectiveToken(Token):
+# cdef readonly object name
+# cdef readonly object value
+# def __init__(self, name, value, Mark start_mark, Mark end_mark):
+# self.name = name
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class DocumentStartToken(Token):
+# pass
+#
+#cdef class DocumentEndToken(Token):
+# pass
+#
+#cdef class BlockSequenceStartToken(Token):
+# pass
+#
+#cdef class BlockMappingStartToken(Token):
+# pass
+#
+#cdef class BlockEndToken(Token):
+# pass
+#
+#cdef class FlowSequenceStartToken(Token):
+# pass
+#
+#cdef class FlowMappingStartToken(Token):
+# pass
+#
+#cdef class FlowSequenceEndToken(Token):
+# pass
+#
+#cdef class FlowMappingEndToken(Token):
+# pass
+#
+#cdef class KeyToken(Token):
+# pass
+#
+#cdef class ValueToken(Token):
+# pass
+#
+#cdef class BlockEntryToken(Token):
+# pass
+#
+#cdef class FlowEntryToken(Token):
+# pass
+#
+#cdef class AliasToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class AnchorToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class TagToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class ScalarToken(Token):
+# cdef readonly object value
+# cdef readonly object plain
+# cdef readonly object style
+# def __init__(self, value, plain, Mark start_mark, Mark end_mark, style=None):
+# self.value = value
+# self.plain = plain
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+# self.style = style
+
+cdef class CParser:
+
+ cdef yaml_parser_t parser
+ cdef yaml_event_t parsed_event
+
+ cdef object stream
+ cdef object stream_name
+ cdef object current_token
+ cdef object current_event
+ cdef object anchors
+ cdef object stream_cache
cdef int stream_cache_len
cdef int stream_cache_pos
- cdef int unicode_source
-
- def __init__(self, stream):
- cdef is_readable
- if yaml_parser_initialize(&self.parser) == 0:
- raise MemoryError
- self.parsed_event.type = YAML_NO_EVENT
- is_readable = 1
- try:
- stream.read
- except AttributeError:
- is_readable = 0
- self.unicode_source = 0
- if is_readable:
- self.stream = stream
- try:
- self.stream_name = stream.name
- except AttributeError:
- if PY_MAJOR_VERSION < 3:
- self.stream_name = '<file>'
- else:
- self.stream_name = u'<file>'
- self.stream_cache = None
- self.stream_cache_len = 0
- self.stream_cache_pos = 0
- yaml_parser_set_input(&self.parser, input_handler, <void *>self)
- else:
- if PyUnicode_CheckExact(stream) != 0:
- stream = PyUnicode_AsUTF8String(stream)
- if PY_MAJOR_VERSION < 3:
- self.stream_name = '<unicode string>'
- else:
- self.stream_name = u'<unicode string>'
- self.unicode_source = 1
- else:
- if PY_MAJOR_VERSION < 3:
- self.stream_name = '<byte string>'
- else:
- self.stream_name = u'<byte string>'
- if PyString_CheckExact(stream) == 0:
- if PY_MAJOR_VERSION < 3:
- raise TypeError("a string or stream input is required")
- else:
- raise TypeError(u"a string or stream input is required")
- self.stream = stream
+ cdef int unicode_source
+
+ def __init__(self, stream):
+ cdef is_readable
+ if yaml_parser_initialize(&self.parser) == 0:
+ raise MemoryError
+ self.parsed_event.type = YAML_NO_EVENT
+ is_readable = 1
+ try:
+ stream.read
+ except AttributeError:
+ is_readable = 0
+ self.unicode_source = 0
+ if is_readable:
+ self.stream = stream
+ try:
+ self.stream_name = stream.name
+ except AttributeError:
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<file>'
+ else:
+ self.stream_name = u'<file>'
+ self.stream_cache = None
+ self.stream_cache_len = 0
+ self.stream_cache_pos = 0
+ yaml_parser_set_input(&self.parser, input_handler, <void *>self)
+ else:
+ if PyUnicode_CheckExact(stream) != 0:
+ stream = PyUnicode_AsUTF8String(stream)
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<unicode string>'
+ else:
+ self.stream_name = u'<unicode string>'
+ self.unicode_source = 1
+ else:
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<byte string>'
+ else:
+ self.stream_name = u'<byte string>'
+ if PyString_CheckExact(stream) == 0:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("a string or stream input is required")
+ else:
+ raise TypeError(u"a string or stream input is required")
+ self.stream = stream
yaml_parser_set_input_string(&self.parser, PyString_AS_STRING(stream), PyString_GET_SIZE(stream))
- self.current_token = None
- self.current_event = None
- self.anchors = {}
-
- def __dealloc__(self):
- yaml_parser_delete(&self.parser)
- yaml_event_delete(&self.parsed_event)
-
- def dispose(self):
- pass
-
- cdef object _parser_error(self):
- if self.parser.error == YAML_MEMORY_ERROR:
- return MemoryError
- elif self.parser.error == YAML_READER_ERROR:
- if PY_MAJOR_VERSION < 3:
- return ReaderError(self.stream_name, self.parser.problem_offset,
- self.parser.problem_value, '?', self.parser.problem)
- else:
- return ReaderError(self.stream_name, self.parser.problem_offset,
- self.parser.problem_value, u'?', PyUnicode_FromString(self.parser.problem))
- elif self.parser.error == YAML_SCANNER_ERROR \
- or self.parser.error == YAML_PARSER_ERROR:
- context_mark = None
- problem_mark = None
- if self.parser.context != NULL:
- context_mark = Mark(self.stream_name,
- self.parser.context_mark.index,
- self.parser.context_mark.line,
- self.parser.context_mark.column, None, None)
- if self.parser.problem != NULL:
- problem_mark = Mark(self.stream_name,
- self.parser.problem_mark.index,
- self.parser.problem_mark.line,
- self.parser.problem_mark.column, None, None)
- context = None
- if self.parser.context != NULL:
- if PY_MAJOR_VERSION < 3:
- context = self.parser.context
- else:
- context = PyUnicode_FromString(self.parser.context)
- if PY_MAJOR_VERSION < 3:
- problem = self.parser.problem
- else:
- problem = PyUnicode_FromString(self.parser.problem)
- if self.parser.error == YAML_SCANNER_ERROR:
- return ScannerError(context, context_mark, problem, problem_mark)
- else:
- return ParserError(context, context_mark, problem, problem_mark)
- if PY_MAJOR_VERSION < 3:
- raise ValueError("no parser error")
- else:
- raise ValueError(u"no parser error")
-
- def raw_scan(self):
- cdef yaml_token_t token
- cdef int done
- cdef int count
- count = 0
- done = 0
- while done == 0:
- if yaml_parser_scan(&self.parser, &token) == 0:
- error = self._parser_error()
- raise error
- if token.type == YAML_NO_TOKEN:
- done = 1
- else:
- count = count+1
- yaml_token_delete(&token)
- return count
-
- cdef object _scan(self):
- cdef yaml_token_t token
- if yaml_parser_scan(&self.parser, &token) == 0:
- error = self._parser_error()
- raise error
- token_object = self._token_to_object(&token)
- yaml_token_delete(&token)
- return token_object
-
- cdef object _token_to_object(self, yaml_token_t *token):
- start_mark = Mark(self.stream_name,
- token.start_mark.index,
- token.start_mark.line,
- token.start_mark.column,
- None, None)
- end_mark = Mark(self.stream_name,
- token.end_mark.index,
- token.end_mark.line,
- token.end_mark.column,
- None, None)
- if token.type == YAML_NO_TOKEN:
- return None
- elif token.type == YAML_STREAM_START_TOKEN:
- encoding = None
- if token.data.stream_start.encoding == YAML_UTF8_ENCODING:
- if self.unicode_source == 0:
- encoding = u"utf-8"
- elif token.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
- encoding = u"utf-16-le"
- elif token.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
- encoding = u"utf-16-be"
- return StreamStartToken(start_mark, end_mark, encoding)
- elif token.type == YAML_STREAM_END_TOKEN:
- return StreamEndToken(start_mark, end_mark)
- elif token.type == YAML_VERSION_DIRECTIVE_TOKEN:
- return DirectiveToken(u"YAML",
- (token.data.version_directive.major,
- token.data.version_directive.minor),
- start_mark, end_mark)
- elif token.type == YAML_TAG_DIRECTIVE_TOKEN:
+ self.current_token = None
+ self.current_event = None
+ self.anchors = {}
+
+ def __dealloc__(self):
+ yaml_parser_delete(&self.parser)
+ yaml_event_delete(&self.parsed_event)
+
+ def dispose(self):
+ pass
+
+ cdef object _parser_error(self):
+ if self.parser.error == YAML_MEMORY_ERROR:
+ return MemoryError
+ elif self.parser.error == YAML_READER_ERROR:
+ if PY_MAJOR_VERSION < 3:
+ return ReaderError(self.stream_name, self.parser.problem_offset,
+ self.parser.problem_value, '?', self.parser.problem)
+ else:
+ return ReaderError(self.stream_name, self.parser.problem_offset,
+ self.parser.problem_value, u'?', PyUnicode_FromString(self.parser.problem))
+ elif self.parser.error == YAML_SCANNER_ERROR \
+ or self.parser.error == YAML_PARSER_ERROR:
+ context_mark = None
+ problem_mark = None
+ if self.parser.context != NULL:
+ context_mark = Mark(self.stream_name,
+ self.parser.context_mark.index,
+ self.parser.context_mark.line,
+ self.parser.context_mark.column, None, None)
+ if self.parser.problem != NULL:
+ problem_mark = Mark(self.stream_name,
+ self.parser.problem_mark.index,
+ self.parser.problem_mark.line,
+ self.parser.problem_mark.column, None, None)
+ context = None
+ if self.parser.context != NULL:
+ if PY_MAJOR_VERSION < 3:
+ context = self.parser.context
+ else:
+ context = PyUnicode_FromString(self.parser.context)
+ if PY_MAJOR_VERSION < 3:
+ problem = self.parser.problem
+ else:
+ problem = PyUnicode_FromString(self.parser.problem)
+ if self.parser.error == YAML_SCANNER_ERROR:
+ return ScannerError(context, context_mark, problem, problem_mark)
+ else:
+ return ParserError(context, context_mark, problem, problem_mark)
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("no parser error")
+ else:
+ raise ValueError(u"no parser error")
+
+ def raw_scan(self):
+ cdef yaml_token_t token
+ cdef int done
+ cdef int count
+ count = 0
+ done = 0
+ while done == 0:
+ if yaml_parser_scan(&self.parser, &token) == 0:
+ error = self._parser_error()
+ raise error
+ if token.type == YAML_NO_TOKEN:
+ done = 1
+ else:
+ count = count+1
+ yaml_token_delete(&token)
+ return count
+
+ cdef object _scan(self):
+ cdef yaml_token_t token
+ if yaml_parser_scan(&self.parser, &token) == 0:
+ error = self._parser_error()
+ raise error
+ token_object = self._token_to_object(&token)
+ yaml_token_delete(&token)
+ return token_object
+
+ cdef object _token_to_object(self, yaml_token_t *token):
+ start_mark = Mark(self.stream_name,
+ token.start_mark.index,
+ token.start_mark.line,
+ token.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ token.end_mark.index,
+ token.end_mark.line,
+ token.end_mark.column,
+ None, None)
+ if token.type == YAML_NO_TOKEN:
+ return None
+ elif token.type == YAML_STREAM_START_TOKEN:
+ encoding = None
+ if token.data.stream_start.encoding == YAML_UTF8_ENCODING:
+ if self.unicode_source == 0:
+ encoding = u"utf-8"
+ elif token.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
+ encoding = u"utf-16-le"
+ elif token.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
+ encoding = u"utf-16-be"
+ return StreamStartToken(start_mark, end_mark, encoding)
+ elif token.type == YAML_STREAM_END_TOKEN:
+ return StreamEndToken(start_mark, end_mark)
+ elif token.type == YAML_VERSION_DIRECTIVE_TOKEN:
+ return DirectiveToken(u"YAML",
+ (token.data.version_directive.major,
+ token.data.version_directive.minor),
+ start_mark, end_mark)
+ elif token.type == YAML_TAG_DIRECTIVE_TOKEN:
handle = PyUnicode_FromString(token.data.tag_directive.handle)
prefix = PyUnicode_FromString(token.data.tag_directive.prefix)
- return DirectiveToken(u"TAG", (handle, prefix),
- start_mark, end_mark)
- elif token.type == YAML_DOCUMENT_START_TOKEN:
- return DocumentStartToken(start_mark, end_mark)
- elif token.type == YAML_DOCUMENT_END_TOKEN:
- return DocumentEndToken(start_mark, end_mark)
- elif token.type == YAML_BLOCK_SEQUENCE_START_TOKEN:
- return BlockSequenceStartToken(start_mark, end_mark)
- elif token.type == YAML_BLOCK_MAPPING_START_TOKEN:
- return BlockMappingStartToken(start_mark, end_mark)
- elif token.type == YAML_BLOCK_END_TOKEN:
- return BlockEndToken(start_mark, end_mark)
- elif token.type == YAML_FLOW_SEQUENCE_START_TOKEN:
- return FlowSequenceStartToken(start_mark, end_mark)
- elif token.type == YAML_FLOW_SEQUENCE_END_TOKEN:
- return FlowSequenceEndToken(start_mark, end_mark)
- elif token.type == YAML_FLOW_MAPPING_START_TOKEN:
- return FlowMappingStartToken(start_mark, end_mark)
- elif token.type == YAML_FLOW_MAPPING_END_TOKEN:
- return FlowMappingEndToken(start_mark, end_mark)
- elif token.type == YAML_BLOCK_ENTRY_TOKEN:
- return BlockEntryToken(start_mark, end_mark)
- elif token.type == YAML_FLOW_ENTRY_TOKEN:
- return FlowEntryToken(start_mark, end_mark)
- elif token.type == YAML_KEY_TOKEN:
- return KeyToken(start_mark, end_mark)
- elif token.type == YAML_VALUE_TOKEN:
- return ValueToken(start_mark, end_mark)
- elif token.type == YAML_ALIAS_TOKEN:
- value = PyUnicode_FromString(token.data.alias.value)
- return AliasToken(value, start_mark, end_mark)
- elif token.type == YAML_ANCHOR_TOKEN:
- value = PyUnicode_FromString(token.data.anchor.value)
- return AnchorToken(value, start_mark, end_mark)
- elif token.type == YAML_TAG_TOKEN:
+ return DirectiveToken(u"TAG", (handle, prefix),
+ start_mark, end_mark)
+ elif token.type == YAML_DOCUMENT_START_TOKEN:
+ return DocumentStartToken(start_mark, end_mark)
+ elif token.type == YAML_DOCUMENT_END_TOKEN:
+ return DocumentEndToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_SEQUENCE_START_TOKEN:
+ return BlockSequenceStartToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_MAPPING_START_TOKEN:
+ return BlockMappingStartToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_END_TOKEN:
+ return BlockEndToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_SEQUENCE_START_TOKEN:
+ return FlowSequenceStartToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_SEQUENCE_END_TOKEN:
+ return FlowSequenceEndToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_MAPPING_START_TOKEN:
+ return FlowMappingStartToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_MAPPING_END_TOKEN:
+ return FlowMappingEndToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_ENTRY_TOKEN:
+ return BlockEntryToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_ENTRY_TOKEN:
+ return FlowEntryToken(start_mark, end_mark)
+ elif token.type == YAML_KEY_TOKEN:
+ return KeyToken(start_mark, end_mark)
+ elif token.type == YAML_VALUE_TOKEN:
+ return ValueToken(start_mark, end_mark)
+ elif token.type == YAML_ALIAS_TOKEN:
+ value = PyUnicode_FromString(token.data.alias.value)
+ return AliasToken(value, start_mark, end_mark)
+ elif token.type == YAML_ANCHOR_TOKEN:
+ value = PyUnicode_FromString(token.data.anchor.value)
+ return AnchorToken(value, start_mark, end_mark)
+ elif token.type == YAML_TAG_TOKEN:
handle = PyUnicode_FromString(token.data.tag.handle)
suffix = PyUnicode_FromString(token.data.tag.suffix)
- if not handle:
- handle = None
- return TagToken((handle, suffix), start_mark, end_mark)
- elif token.type == YAML_SCALAR_TOKEN:
+ if not handle:
+ handle = None
+ return TagToken((handle, suffix), start_mark, end_mark)
+ elif token.type == YAML_SCALAR_TOKEN:
value = PyUnicode_DecodeUTF8(token.data.scalar.value,
- token.data.scalar.length, 'strict')
- plain = False
- style = None
- if token.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
- plain = True
- style = u''
- elif token.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
- style = u'\''
- elif token.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
- style = u'"'
- elif token.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
- style = u'|'
- elif token.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
- style = u'>'
- return ScalarToken(value, plain,
- start_mark, end_mark, style)
- else:
- if PY_MAJOR_VERSION < 3:
- raise ValueError("unknown token type")
- else:
- raise ValueError(u"unknown token type")
-
- def get_token(self):
- if self.current_token is not None:
- value = self.current_token
- self.current_token = None
- else:
- value = self._scan()
- return value
-
- def peek_token(self):
- if self.current_token is None:
- self.current_token = self._scan()
- return self.current_token
-
- def check_token(self, *choices):
- if self.current_token is None:
- self.current_token = self._scan()
- if self.current_token is None:
- return False
- if not choices:
- return True
- token_class = self.current_token.__class__
- for choice in choices:
- if token_class is choice:
- return True
- return False
-
- def raw_parse(self):
- cdef yaml_event_t event
- cdef int done
- cdef int count
- count = 0
- done = 0
- while done == 0:
- if yaml_parser_parse(&self.parser, &event) == 0:
- error = self._parser_error()
- raise error
- if event.type == YAML_NO_EVENT:
- done = 1
- else:
- count = count+1
- yaml_event_delete(&event)
- return count
-
- cdef object _parse(self):
- cdef yaml_event_t event
- if yaml_parser_parse(&self.parser, &event) == 0:
- error = self._parser_error()
- raise error
- event_object = self._event_to_object(&event)
- yaml_event_delete(&event)
- return event_object
-
- cdef object _event_to_object(self, yaml_event_t *event):
- cdef yaml_tag_directive_t *tag_directive
- start_mark = Mark(self.stream_name,
- event.start_mark.index,
- event.start_mark.line,
- event.start_mark.column,
- None, None)
- end_mark = Mark(self.stream_name,
- event.end_mark.index,
- event.end_mark.line,
- event.end_mark.column,
- None, None)
- if event.type == YAML_NO_EVENT:
- return None
- elif event.type == YAML_STREAM_START_EVENT:
- encoding = None
- if event.data.stream_start.encoding == YAML_UTF8_ENCODING:
- if self.unicode_source == 0:
- encoding = u"utf-8"
- elif event.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
- encoding = u"utf-16-le"
- elif event.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
- encoding = u"utf-16-be"
- return StreamStartEvent(start_mark, end_mark, encoding)
- elif event.type == YAML_STREAM_END_EVENT:
- return StreamEndEvent(start_mark, end_mark)
- elif event.type == YAML_DOCUMENT_START_EVENT:
- explicit = False
- if event.data.document_start.implicit == 0:
- explicit = True
- version = None
- if event.data.document_start.version_directive != NULL:
- version = (event.data.document_start.version_directive.major,
- event.data.document_start.version_directive.minor)
- tags = None
- if event.data.document_start.tag_directives.start != NULL:
- tags = {}
- tag_directive = event.data.document_start.tag_directives.start
- while tag_directive != event.data.document_start.tag_directives.end:
+ token.data.scalar.length, 'strict')
+ plain = False
+ style = None
+ if token.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ plain = True
+ style = u''
+ elif token.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif token.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif token.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif token.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ return ScalarToken(value, plain,
+ start_mark, end_mark, style)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("unknown token type")
+ else:
+ raise ValueError(u"unknown token type")
+
+ def get_token(self):
+ if self.current_token is not None:
+ value = self.current_token
+ self.current_token = None
+ else:
+ value = self._scan()
+ return value
+
+ def peek_token(self):
+ if self.current_token is None:
+ self.current_token = self._scan()
+ return self.current_token
+
+ def check_token(self, *choices):
+ if self.current_token is None:
+ self.current_token = self._scan()
+ if self.current_token is None:
+ return False
+ if not choices:
+ return True
+ token_class = self.current_token.__class__
+ for choice in choices:
+ if token_class is choice:
+ return True
+ return False
+
+ def raw_parse(self):
+ cdef yaml_event_t event
+ cdef int done
+ cdef int count
+ count = 0
+ done = 0
+ while done == 0:
+ if yaml_parser_parse(&self.parser, &event) == 0:
+ error = self._parser_error()
+ raise error
+ if event.type == YAML_NO_EVENT:
+ done = 1
+ else:
+ count = count+1
+ yaml_event_delete(&event)
+ return count
+
+ cdef object _parse(self):
+ cdef yaml_event_t event
+ if yaml_parser_parse(&self.parser, &event) == 0:
+ error = self._parser_error()
+ raise error
+ event_object = self._event_to_object(&event)
+ yaml_event_delete(&event)
+ return event_object
+
+ cdef object _event_to_object(self, yaml_event_t *event):
+ cdef yaml_tag_directive_t *tag_directive
+ start_mark = Mark(self.stream_name,
+ event.start_mark.index,
+ event.start_mark.line,
+ event.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ event.end_mark.index,
+ event.end_mark.line,
+ event.end_mark.column,
+ None, None)
+ if event.type == YAML_NO_EVENT:
+ return None
+ elif event.type == YAML_STREAM_START_EVENT:
+ encoding = None
+ if event.data.stream_start.encoding == YAML_UTF8_ENCODING:
+ if self.unicode_source == 0:
+ encoding = u"utf-8"
+ elif event.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
+ encoding = u"utf-16-le"
+ elif event.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
+ encoding = u"utf-16-be"
+ return StreamStartEvent(start_mark, end_mark, encoding)
+ elif event.type == YAML_STREAM_END_EVENT:
+ return StreamEndEvent(start_mark, end_mark)
+ elif event.type == YAML_DOCUMENT_START_EVENT:
+ explicit = False
+ if event.data.document_start.implicit == 0:
+ explicit = True
+ version = None
+ if event.data.document_start.version_directive != NULL:
+ version = (event.data.document_start.version_directive.major,
+ event.data.document_start.version_directive.minor)
+ tags = None
+ if event.data.document_start.tag_directives.start != NULL:
+ tags = {}
+ tag_directive = event.data.document_start.tag_directives.start
+ while tag_directive != event.data.document_start.tag_directives.end:
handle = PyUnicode_FromString(tag_directive.handle)
prefix = PyUnicode_FromString(tag_directive.prefix)
- tags[handle] = prefix
- tag_directive = tag_directive+1
- return DocumentStartEvent(start_mark, end_mark,
- explicit, version, tags)
- elif event.type == YAML_DOCUMENT_END_EVENT:
- explicit = False
- if event.data.document_end.implicit == 0:
- explicit = True
- return DocumentEndEvent(start_mark, end_mark, explicit)
- elif event.type == YAML_ALIAS_EVENT:
- anchor = PyUnicode_FromString(event.data.alias.anchor)
- return AliasEvent(anchor, start_mark, end_mark)
- elif event.type == YAML_SCALAR_EVENT:
- anchor = None
- if event.data.scalar.anchor != NULL:
- anchor = PyUnicode_FromString(event.data.scalar.anchor)
- tag = None
- if event.data.scalar.tag != NULL:
- tag = PyUnicode_FromString(event.data.scalar.tag)
+ tags[handle] = prefix
+ tag_directive = tag_directive+1
+ return DocumentStartEvent(start_mark, end_mark,
+ explicit, version, tags)
+ elif event.type == YAML_DOCUMENT_END_EVENT:
+ explicit = False
+ if event.data.document_end.implicit == 0:
+ explicit = True
+ return DocumentEndEvent(start_mark, end_mark, explicit)
+ elif event.type == YAML_ALIAS_EVENT:
+ anchor = PyUnicode_FromString(event.data.alias.anchor)
+ return AliasEvent(anchor, start_mark, end_mark)
+ elif event.type == YAML_SCALAR_EVENT:
+ anchor = None
+ if event.data.scalar.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.scalar.anchor)
+ tag = None
+ if event.data.scalar.tag != NULL:
+ tag = PyUnicode_FromString(event.data.scalar.tag)
value = PyUnicode_DecodeUTF8(event.data.scalar.value,
- event.data.scalar.length, 'strict')
- plain_implicit = False
- if event.data.scalar.plain_implicit == 1:
- plain_implicit = True
- quoted_implicit = False
- if event.data.scalar.quoted_implicit == 1:
- quoted_implicit = True
- style = None
- if event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
- style = u''
- elif event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
- style = u'\''
- elif event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
- style = u'"'
- elif event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
- style = u'|'
- elif event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
- style = u'>'
- return ScalarEvent(anchor, tag,
- (plain_implicit, quoted_implicit),
- value, start_mark, end_mark, style)
- elif event.type == YAML_SEQUENCE_START_EVENT:
- anchor = None
- if event.data.sequence_start.anchor != NULL:
- anchor = PyUnicode_FromString(event.data.sequence_start.anchor)
- tag = None
- if event.data.sequence_start.tag != NULL:
- tag = PyUnicode_FromString(event.data.sequence_start.tag)
- implicit = False
- if event.data.sequence_start.implicit == 1:
- implicit = True
- flow_style = None
- if event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
- flow_style = True
- elif event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
- flow_style = False
- return SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style)
- elif event.type == YAML_MAPPING_START_EVENT:
- anchor = None
- if event.data.mapping_start.anchor != NULL:
- anchor = PyUnicode_FromString(event.data.mapping_start.anchor)
- tag = None
- if event.data.mapping_start.tag != NULL:
- tag = PyUnicode_FromString(event.data.mapping_start.tag)
- implicit = False
- if event.data.mapping_start.implicit == 1:
- implicit = True
- flow_style = None
- if event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
- flow_style = True
- elif event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
- flow_style = False
- return MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style)
- elif event.type == YAML_SEQUENCE_END_EVENT:
- return SequenceEndEvent(start_mark, end_mark)
- elif event.type == YAML_MAPPING_END_EVENT:
- return MappingEndEvent(start_mark, end_mark)
- else:
- if PY_MAJOR_VERSION < 3:
- raise ValueError("unknown event type")
- else:
- raise ValueError(u"unknown event type")
-
- def get_event(self):
- if self.current_event is not None:
- value = self.current_event
- self.current_event = None
- else:
- value = self._parse()
- return value
-
- def peek_event(self):
- if self.current_event is None:
- self.current_event = self._parse()
- return self.current_event
-
- def check_event(self, *choices):
- if self.current_event is None:
- self.current_event = self._parse()
- if self.current_event is None:
- return False
- if not choices:
- return True
- event_class = self.current_event.__class__
- for choice in choices:
- if event_class is choice:
- return True
- return False
-
- def check_node(self):
- self._parse_next_event()
- if self.parsed_event.type == YAML_STREAM_START_EVENT:
- yaml_event_delete(&self.parsed_event)
- self._parse_next_event()
- if self.parsed_event.type != YAML_STREAM_END_EVENT:
- return True
- return False
-
- def get_node(self):
- self._parse_next_event()
- if self.parsed_event.type != YAML_STREAM_END_EVENT:
- return self._compose_document()
-
- def get_single_node(self):
- self._parse_next_event()
- yaml_event_delete(&self.parsed_event)
- self._parse_next_event()
- document = None
- if self.parsed_event.type != YAML_STREAM_END_EVENT:
- document = self._compose_document()
- self._parse_next_event()
- if self.parsed_event.type != YAML_STREAM_END_EVENT:
- mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- if PY_MAJOR_VERSION < 3:
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document", mark)
- else:
- raise ComposerError(u"expected a single document in the stream",
- document.start_mark, u"but found another document", mark)
- return document
-
- cdef object _compose_document(self):
- yaml_event_delete(&self.parsed_event)
- node = self._compose_node(None, None)
- self._parse_next_event()
- yaml_event_delete(&self.parsed_event)
- self.anchors = {}
- return node
-
- cdef object _compose_node(self, object parent, object index):
- self._parse_next_event()
- if self.parsed_event.type == YAML_ALIAS_EVENT:
- anchor = PyUnicode_FromString(self.parsed_event.data.alias.anchor)
- if anchor not in self.anchors:
- mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- if PY_MAJOR_VERSION < 3:
- raise ComposerError(None, None, "found undefined alias", mark)
- else:
- raise ComposerError(None, None, u"found undefined alias", mark)
- yaml_event_delete(&self.parsed_event)
- return self.anchors[anchor]
- anchor = None
- if self.parsed_event.type == YAML_SCALAR_EVENT \
- and self.parsed_event.data.scalar.anchor != NULL:
- anchor = PyUnicode_FromString(self.parsed_event.data.scalar.anchor)
- elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT \
- and self.parsed_event.data.sequence_start.anchor != NULL:
- anchor = PyUnicode_FromString(self.parsed_event.data.sequence_start.anchor)
- elif self.parsed_event.type == YAML_MAPPING_START_EVENT \
- and self.parsed_event.data.mapping_start.anchor != NULL:
- anchor = PyUnicode_FromString(self.parsed_event.data.mapping_start.anchor)
- if anchor is not None:
- if anchor in self.anchors:
- mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- if PY_MAJOR_VERSION < 3:
+ event.data.scalar.length, 'strict')
+ plain_implicit = False
+ if event.data.scalar.plain_implicit == 1:
+ plain_implicit = True
+ quoted_implicit = False
+ if event.data.scalar.quoted_implicit == 1:
+ quoted_implicit = True
+ style = None
+ if event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ style = u''
+ elif event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ return ScalarEvent(anchor, tag,
+ (plain_implicit, quoted_implicit),
+ value, start_mark, end_mark, style)
+ elif event.type == YAML_SEQUENCE_START_EVENT:
+ anchor = None
+ if event.data.sequence_start.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.sequence_start.anchor)
+ tag = None
+ if event.data.sequence_start.tag != NULL:
+ tag = PyUnicode_FromString(event.data.sequence_start.tag)
+ implicit = False
+ if event.data.sequence_start.implicit == 1:
+ implicit = True
+ flow_style = None
+ if event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
+ flow_style = True
+ elif event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
+ flow_style = False
+ return SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style)
+ elif event.type == YAML_MAPPING_START_EVENT:
+ anchor = None
+ if event.data.mapping_start.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.mapping_start.anchor)
+ tag = None
+ if event.data.mapping_start.tag != NULL:
+ tag = PyUnicode_FromString(event.data.mapping_start.tag)
+ implicit = False
+ if event.data.mapping_start.implicit == 1:
+ implicit = True
+ flow_style = None
+ if event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
+ flow_style = True
+ elif event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
+ flow_style = False
+ return MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style)
+ elif event.type == YAML_SEQUENCE_END_EVENT:
+ return SequenceEndEvent(start_mark, end_mark)
+ elif event.type == YAML_MAPPING_END_EVENT:
+ return MappingEndEvent(start_mark, end_mark)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("unknown event type")
+ else:
+ raise ValueError(u"unknown event type")
+
+ def get_event(self):
+ if self.current_event is not None:
+ value = self.current_event
+ self.current_event = None
+ else:
+ value = self._parse()
+ return value
+
+ def peek_event(self):
+ if self.current_event is None:
+ self.current_event = self._parse()
+ return self.current_event
+
+ def check_event(self, *choices):
+ if self.current_event is None:
+ self.current_event = self._parse()
+ if self.current_event is None:
+ return False
+ if not choices:
+ return True
+ event_class = self.current_event.__class__
+ for choice in choices:
+ if event_class is choice:
+ return True
+ return False
+
+ def check_node(self):
+ self._parse_next_event()
+ if self.parsed_event.type == YAML_STREAM_START_EVENT:
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ return True
+ return False
+
+ def get_node(self):
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ return self._compose_document()
+
+ def get_single_node(self):
+ self._parse_next_event()
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ document = None
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ document = self._compose_document()
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document", mark)
+ else:
+ raise ComposerError(u"expected a single document in the stream",
+ document.start_mark, u"but found another document", mark)
+ return document
+
+ cdef object _compose_document(self):
+ yaml_event_delete(&self.parsed_event)
+ node = self._compose_node(None, None)
+ self._parse_next_event()
+ yaml_event_delete(&self.parsed_event)
+ self.anchors = {}
+ return node
+
+ cdef object _compose_node(self, object parent, object index):
+ self._parse_next_event()
+ if self.parsed_event.type == YAML_ALIAS_EVENT:
+ anchor = PyUnicode_FromString(self.parsed_event.data.alias.anchor)
+ if anchor not in self.anchors:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError(None, None, "found undefined alias", mark)
+ else:
+ raise ComposerError(None, None, u"found undefined alias", mark)
+ yaml_event_delete(&self.parsed_event)
+ return self.anchors[anchor]
+ anchor = None
+ if self.parsed_event.type == YAML_SCALAR_EVENT \
+ and self.parsed_event.data.scalar.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.scalar.anchor)
+ elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT \
+ and self.parsed_event.data.sequence_start.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.sequence_start.anchor)
+ elif self.parsed_event.type == YAML_MAPPING_START_EVENT \
+ and self.parsed_event.data.mapping_start.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.mapping_start.anchor)
+ if anchor is not None:
+ if anchor in self.anchors:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
raise ComposerError("found duplicate anchor; first occurrence",
self.anchors[anchor].start_mark, "second occurrence", mark)
- else:
+ else:
raise ComposerError(u"found duplicate anchor; first occurrence",
self.anchors[anchor].start_mark, u"second occurrence", mark)
- self.descend_resolver(parent, index)
- if self.parsed_event.type == YAML_SCALAR_EVENT:
- node = self._compose_scalar_node(anchor)
- elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT:
- node = self._compose_sequence_node(anchor)
- elif self.parsed_event.type == YAML_MAPPING_START_EVENT:
- node = self._compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- cdef _compose_scalar_node(self, object anchor):
- start_mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- end_mark = Mark(self.stream_name,
- self.parsed_event.end_mark.index,
- self.parsed_event.end_mark.line,
- self.parsed_event.end_mark.column,
- None, None)
+ self.descend_resolver(parent, index)
+ if self.parsed_event.type == YAML_SCALAR_EVENT:
+ node = self._compose_scalar_node(anchor)
+ elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT:
+ node = self._compose_sequence_node(anchor)
+ elif self.parsed_event.type == YAML_MAPPING_START_EVENT:
+ node = self._compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ cdef _compose_scalar_node(self, object anchor):
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
value = PyUnicode_DecodeUTF8(self.parsed_event.data.scalar.value,
- self.parsed_event.data.scalar.length, 'strict')
- plain_implicit = False
- if self.parsed_event.data.scalar.plain_implicit == 1:
- plain_implicit = True
- quoted_implicit = False
- if self.parsed_event.data.scalar.quoted_implicit == 1:
- quoted_implicit = True
- if self.parsed_event.data.scalar.tag == NULL \
- or (self.parsed_event.data.scalar.tag[0] == c'!'
- and self.parsed_event.data.scalar.tag[1] == c'\0'):
- tag = self.resolve(ScalarNode, value, (plain_implicit, quoted_implicit))
- else:
- tag = PyUnicode_FromString(self.parsed_event.data.scalar.tag)
- style = None
- if self.parsed_event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
- style = u''
- elif self.parsed_event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
- style = u'\''
- elif self.parsed_event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
- style = u'"'
- elif self.parsed_event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
- style = u'|'
- elif self.parsed_event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
- style = u'>'
- node = ScalarNode(tag, value, start_mark, end_mark, style)
- if anchor is not None:
- self.anchors[anchor] = node
- yaml_event_delete(&self.parsed_event)
- return node
-
- cdef _compose_sequence_node(self, object anchor):
- cdef int index
- start_mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- implicit = False
- if self.parsed_event.data.sequence_start.implicit == 1:
- implicit = True
- if self.parsed_event.data.sequence_start.tag == NULL \
- or (self.parsed_event.data.sequence_start.tag[0] == c'!'
- and self.parsed_event.data.sequence_start.tag[1] == c'\0'):
- tag = self.resolve(SequenceNode, None, implicit)
- else:
- tag = PyUnicode_FromString(self.parsed_event.data.sequence_start.tag)
- flow_style = None
- if self.parsed_event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
- flow_style = True
- elif self.parsed_event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
- flow_style = False
- value = []
- node = SequenceNode(tag, value, start_mark, None, flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- yaml_event_delete(&self.parsed_event)
- index = 0
- self._parse_next_event()
- while self.parsed_event.type != YAML_SEQUENCE_END_EVENT:
- value.append(self._compose_node(node, index))
- index = index+1
- self._parse_next_event()
- node.end_mark = Mark(self.stream_name,
- self.parsed_event.end_mark.index,
- self.parsed_event.end_mark.line,
- self.parsed_event.end_mark.column,
- None, None)
- yaml_event_delete(&self.parsed_event)
- return node
-
- cdef _compose_mapping_node(self, object anchor):
- start_mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- implicit = False
- if self.parsed_event.data.mapping_start.implicit == 1:
- implicit = True
- if self.parsed_event.data.mapping_start.tag == NULL \
- or (self.parsed_event.data.mapping_start.tag[0] == c'!'
- and self.parsed_event.data.mapping_start.tag[1] == c'\0'):
- tag = self.resolve(MappingNode, None, implicit)
- else:
- tag = PyUnicode_FromString(self.parsed_event.data.mapping_start.tag)
- flow_style = None
- if self.parsed_event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
- flow_style = True
- elif self.parsed_event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
- flow_style = False
- value = []
- node = MappingNode(tag, value, start_mark, None, flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- yaml_event_delete(&self.parsed_event)
- self._parse_next_event()
- while self.parsed_event.type != YAML_MAPPING_END_EVENT:
- item_key = self._compose_node(node, None)
- item_value = self._compose_node(node, item_key)
- value.append((item_key, item_value))
- self._parse_next_event()
- node.end_mark = Mark(self.stream_name,
- self.parsed_event.end_mark.index,
- self.parsed_event.end_mark.line,
- self.parsed_event.end_mark.column,
- None, None)
- yaml_event_delete(&self.parsed_event)
- return node
-
- cdef int _parse_next_event(self) except 0:
- if self.parsed_event.type == YAML_NO_EVENT:
- if yaml_parser_parse(&self.parser, &self.parsed_event) == 0:
- error = self._parser_error()
- raise error
- return 1
-
+ self.parsed_event.data.scalar.length, 'strict')
+ plain_implicit = False
+ if self.parsed_event.data.scalar.plain_implicit == 1:
+ plain_implicit = True
+ quoted_implicit = False
+ if self.parsed_event.data.scalar.quoted_implicit == 1:
+ quoted_implicit = True
+ if self.parsed_event.data.scalar.tag == NULL \
+ or (self.parsed_event.data.scalar.tag[0] == c'!'
+ and self.parsed_event.data.scalar.tag[1] == c'\0'):
+ tag = self.resolve(ScalarNode, value, (plain_implicit, quoted_implicit))
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.scalar.tag)
+ style = None
+ if self.parsed_event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ style = u''
+ elif self.parsed_event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif self.parsed_event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif self.parsed_event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif self.parsed_event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ node = ScalarNode(tag, value, start_mark, end_mark, style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef _compose_sequence_node(self, object anchor):
+ cdef int index
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ implicit = False
+ if self.parsed_event.data.sequence_start.implicit == 1:
+ implicit = True
+ if self.parsed_event.data.sequence_start.tag == NULL \
+ or (self.parsed_event.data.sequence_start.tag[0] == c'!'
+ and self.parsed_event.data.sequence_start.tag[1] == c'\0'):
+ tag = self.resolve(SequenceNode, None, implicit)
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.sequence_start.tag)
+ flow_style = None
+ if self.parsed_event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
+ flow_style = True
+ elif self.parsed_event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
+ flow_style = False
+ value = []
+ node = SequenceNode(tag, value, start_mark, None, flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ index = 0
+ self._parse_next_event()
+ while self.parsed_event.type != YAML_SEQUENCE_END_EVENT:
+ value.append(self._compose_node(node, index))
+ index = index+1
+ self._parse_next_event()
+ node.end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef _compose_mapping_node(self, object anchor):
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ implicit = False
+ if self.parsed_event.data.mapping_start.implicit == 1:
+ implicit = True
+ if self.parsed_event.data.mapping_start.tag == NULL \
+ or (self.parsed_event.data.mapping_start.tag[0] == c'!'
+ and self.parsed_event.data.mapping_start.tag[1] == c'\0'):
+ tag = self.resolve(MappingNode, None, implicit)
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.mapping_start.tag)
+ flow_style = None
+ if self.parsed_event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
+ flow_style = True
+ elif self.parsed_event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
+ flow_style = False
+ value = []
+ node = MappingNode(tag, value, start_mark, None, flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ while self.parsed_event.type != YAML_MAPPING_END_EVENT:
+ item_key = self._compose_node(node, None)
+ item_value = self._compose_node(node, item_key)
+ value.append((item_key, item_value))
+ self._parse_next_event()
+ node.end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef int _parse_next_event(self) except 0:
+ if self.parsed_event.type == YAML_NO_EVENT:
+ if yaml_parser_parse(&self.parser, &self.parsed_event) == 0:
+ error = self._parser_error()
+ raise error
+ return 1
+
cdef int input_handler(void *data, char *buffer, size_t size, size_t *read) except 0:
- cdef CParser parser
- parser = <CParser>data
- if parser.stream_cache is None:
- value = parser.stream.read(size)
- if PyUnicode_CheckExact(value) != 0:
- value = PyUnicode_AsUTF8String(value)
- parser.unicode_source = 1
- if PyString_CheckExact(value) == 0:
- if PY_MAJOR_VERSION < 3:
- raise TypeError("a string value is expected")
- else:
- raise TypeError(u"a string value is expected")
- parser.stream_cache = value
- parser.stream_cache_pos = 0
- parser.stream_cache_len = PyString_GET_SIZE(value)
- if (parser.stream_cache_len - parser.stream_cache_pos) < size:
- size = parser.stream_cache_len - parser.stream_cache_pos
- if size > 0:
+ cdef CParser parser
+ parser = <CParser>data
+ if parser.stream_cache is None:
+ value = parser.stream.read(size)
+ if PyUnicode_CheckExact(value) != 0:
+ value = PyUnicode_AsUTF8String(value)
+ parser.unicode_source = 1
+ if PyString_CheckExact(value) == 0:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("a string value is expected")
+ else:
+ raise TypeError(u"a string value is expected")
+ parser.stream_cache = value
+ parser.stream_cache_pos = 0
+ parser.stream_cache_len = PyString_GET_SIZE(value)
+ if (parser.stream_cache_len - parser.stream_cache_pos) < size:
+ size = parser.stream_cache_len - parser.stream_cache_pos
+ if size > 0:
memcpy(buffer, PyString_AS_STRING(parser.stream_cache)
- + parser.stream_cache_pos, size)
- read[0] = size
- parser.stream_cache_pos += size
- if parser.stream_cache_pos == parser.stream_cache_len:
- parser.stream_cache = None
- return 1
-
-cdef class CEmitter:
-
- cdef yaml_emitter_t emitter
-
- cdef object stream
-
- cdef int document_start_implicit
- cdef int document_end_implicit
- cdef object use_version
- cdef object use_tags
-
- cdef object serialized_nodes
- cdef object anchors
- cdef int last_alias_id
- cdef int closed
- cdef int dump_unicode
- cdef object use_encoding
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- if yaml_emitter_initialize(&self.emitter) == 0:
- raise MemoryError
- self.stream = stream
- self.dump_unicode = 0
- if PY_MAJOR_VERSION < 3:
- if getattr3(stream, 'encoding', None):
- self.dump_unicode = 1
- else:
- if hasattr(stream, u'encoding'):
- self.dump_unicode = 1
- self.use_encoding = encoding
+ + parser.stream_cache_pos, size)
+ read[0] = size
+ parser.stream_cache_pos += size
+ if parser.stream_cache_pos == parser.stream_cache_len:
+ parser.stream_cache = None
+ return 1
+
+cdef class CEmitter:
+
+ cdef yaml_emitter_t emitter
+
+ cdef object stream
+
+ cdef int document_start_implicit
+ cdef int document_end_implicit
+ cdef object use_version
+ cdef object use_tags
+
+ cdef object serialized_nodes
+ cdef object anchors
+ cdef int last_alias_id
+ cdef int closed
+ cdef int dump_unicode
+ cdef object use_encoding
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ if yaml_emitter_initialize(&self.emitter) == 0:
+ raise MemoryError
+ self.stream = stream
+ self.dump_unicode = 0
+ if PY_MAJOR_VERSION < 3:
+ if getattr3(stream, 'encoding', None):
+ self.dump_unicode = 1
+ else:
+ if hasattr(stream, u'encoding'):
+ self.dump_unicode = 1
+ self.use_encoding = encoding
yaml_emitter_set_output(&self.emitter, output_handler, <void *>self)
- if canonical:
- yaml_emitter_set_canonical(&self.emitter, 1)
- if indent is not None:
- yaml_emitter_set_indent(&self.emitter, indent)
- if width is not None:
- yaml_emitter_set_width(&self.emitter, width)
- if allow_unicode:
- yaml_emitter_set_unicode(&self.emitter, 1)
- if line_break is not None:
- if line_break == '\r':
- yaml_emitter_set_break(&self.emitter, YAML_CR_BREAK)
- elif line_break == '\n':
- yaml_emitter_set_break(&self.emitter, YAML_LN_BREAK)
- elif line_break == '\r\n':
- yaml_emitter_set_break(&self.emitter, YAML_CRLN_BREAK)
- self.document_start_implicit = 1
- if explicit_start:
- self.document_start_implicit = 0
- self.document_end_implicit = 1
- if explicit_end:
- self.document_end_implicit = 0
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_alias_id = 0
- self.closed = -1
-
- def __dealloc__(self):
- yaml_emitter_delete(&self.emitter)
-
- def dispose(self):
- pass
-
- cdef object _emitter_error(self):
- if self.emitter.error == YAML_MEMORY_ERROR:
- return MemoryError
- elif self.emitter.error == YAML_EMITTER_ERROR:
- if PY_MAJOR_VERSION < 3:
- problem = self.emitter.problem
- else:
- problem = PyUnicode_FromString(self.emitter.problem)
- return EmitterError(problem)
- if PY_MAJOR_VERSION < 3:
- raise ValueError("no emitter error")
- else:
- raise ValueError(u"no emitter error")
-
- cdef int _object_to_event(self, object event_object, yaml_event_t *event) except 0:
- cdef yaml_encoding_t encoding
- cdef yaml_version_directive_t version_directive_value
- cdef yaml_version_directive_t *version_directive
- cdef yaml_tag_directive_t tag_directives_value[128]
- cdef yaml_tag_directive_t *tag_directives_start
- cdef yaml_tag_directive_t *tag_directives_end
- cdef int implicit
- cdef int plain_implicit
- cdef int quoted_implicit
- cdef char *anchor
- cdef char *tag
- cdef char *value
- cdef int length
- cdef yaml_scalar_style_t scalar_style
- cdef yaml_sequence_style_t sequence_style
- cdef yaml_mapping_style_t mapping_style
- event_class = event_object.__class__
- if event_class is StreamStartEvent:
- encoding = YAML_UTF8_ENCODING
- if event_object.encoding == u'utf-16-le' or event_object.encoding == 'utf-16-le':
- encoding = YAML_UTF16LE_ENCODING
- elif event_object.encoding == u'utf-16-be' or event_object.encoding == 'utf-16-be':
- encoding = YAML_UTF16BE_ENCODING
- if event_object.encoding is None:
- self.dump_unicode = 1
- if self.dump_unicode == 1:
- encoding = YAML_UTF8_ENCODING
- yaml_stream_start_event_initialize(event, encoding)
- elif event_class is StreamEndEvent:
- yaml_stream_end_event_initialize(event)
- elif event_class is DocumentStartEvent:
- version_directive = NULL
- if event_object.version:
- version_directive_value.major = event_object.version[0]
- version_directive_value.minor = event_object.version[1]
- version_directive = &version_directive_value
- tag_directives_start = NULL
- tag_directives_end = NULL
- if event_object.tags:
- if len(event_object.tags) > 128:
- if PY_MAJOR_VERSION < 3:
- raise ValueError("too many tags")
- else:
- raise ValueError(u"too many tags")
- tag_directives_start = tag_directives_value
- tag_directives_end = tag_directives_value
- cache = []
- for handle in event_object.tags:
- prefix = event_object.tags[handle]
- if PyUnicode_CheckExact(handle):
- handle = PyUnicode_AsUTF8String(handle)
- cache.append(handle)
- if not PyString_CheckExact(handle):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag handle must be a string")
- else:
- raise TypeError(u"tag handle must be a string")
+ if canonical:
+ yaml_emitter_set_canonical(&self.emitter, 1)
+ if indent is not None:
+ yaml_emitter_set_indent(&self.emitter, indent)
+ if width is not None:
+ yaml_emitter_set_width(&self.emitter, width)
+ if allow_unicode:
+ yaml_emitter_set_unicode(&self.emitter, 1)
+ if line_break is not None:
+ if line_break == '\r':
+ yaml_emitter_set_break(&self.emitter, YAML_CR_BREAK)
+ elif line_break == '\n':
+ yaml_emitter_set_break(&self.emitter, YAML_LN_BREAK)
+ elif line_break == '\r\n':
+ yaml_emitter_set_break(&self.emitter, YAML_CRLN_BREAK)
+ self.document_start_implicit = 1
+ if explicit_start:
+ self.document_start_implicit = 0
+ self.document_end_implicit = 1
+ if explicit_end:
+ self.document_end_implicit = 0
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_alias_id = 0
+ self.closed = -1
+
+ def __dealloc__(self):
+ yaml_emitter_delete(&self.emitter)
+
+ def dispose(self):
+ pass
+
+ cdef object _emitter_error(self):
+ if self.emitter.error == YAML_MEMORY_ERROR:
+ return MemoryError
+ elif self.emitter.error == YAML_EMITTER_ERROR:
+ if PY_MAJOR_VERSION < 3:
+ problem = self.emitter.problem
+ else:
+ problem = PyUnicode_FromString(self.emitter.problem)
+ return EmitterError(problem)
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("no emitter error")
+ else:
+ raise ValueError(u"no emitter error")
+
+ cdef int _object_to_event(self, object event_object, yaml_event_t *event) except 0:
+ cdef yaml_encoding_t encoding
+ cdef yaml_version_directive_t version_directive_value
+ cdef yaml_version_directive_t *version_directive
+ cdef yaml_tag_directive_t tag_directives_value[128]
+ cdef yaml_tag_directive_t *tag_directives_start
+ cdef yaml_tag_directive_t *tag_directives_end
+ cdef int implicit
+ cdef int plain_implicit
+ cdef int quoted_implicit
+ cdef char *anchor
+ cdef char *tag
+ cdef char *value
+ cdef int length
+ cdef yaml_scalar_style_t scalar_style
+ cdef yaml_sequence_style_t sequence_style
+ cdef yaml_mapping_style_t mapping_style
+ event_class = event_object.__class__
+ if event_class is StreamStartEvent:
+ encoding = YAML_UTF8_ENCODING
+ if event_object.encoding == u'utf-16-le' or event_object.encoding == 'utf-16-le':
+ encoding = YAML_UTF16LE_ENCODING
+ elif event_object.encoding == u'utf-16-be' or event_object.encoding == 'utf-16-be':
+ encoding = YAML_UTF16BE_ENCODING
+ if event_object.encoding is None:
+ self.dump_unicode = 1
+ if self.dump_unicode == 1:
+ encoding = YAML_UTF8_ENCODING
+ yaml_stream_start_event_initialize(event, encoding)
+ elif event_class is StreamEndEvent:
+ yaml_stream_end_event_initialize(event)
+ elif event_class is DocumentStartEvent:
+ version_directive = NULL
+ if event_object.version:
+ version_directive_value.major = event_object.version[0]
+ version_directive_value.minor = event_object.version[1]
+ version_directive = &version_directive_value
+ tag_directives_start = NULL
+ tag_directives_end = NULL
+ if event_object.tags:
+ if len(event_object.tags) > 128:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("too many tags")
+ else:
+ raise ValueError(u"too many tags")
+ tag_directives_start = tag_directives_value
+ tag_directives_end = tag_directives_value
+ cache = []
+ for handle in event_object.tags:
+ prefix = event_object.tags[handle]
+ if PyUnicode_CheckExact(handle):
+ handle = PyUnicode_AsUTF8String(handle)
+ cache.append(handle)
+ if not PyString_CheckExact(handle):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag handle must be a string")
+ else:
+ raise TypeError(u"tag handle must be a string")
tag_directives_end.handle = PyString_AS_STRING(handle)
- if PyUnicode_CheckExact(prefix):
- prefix = PyUnicode_AsUTF8String(prefix)
- cache.append(prefix)
- if not PyString_CheckExact(prefix):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag prefix must be a string")
- else:
- raise TypeError(u"tag prefix must be a string")
+ if PyUnicode_CheckExact(prefix):
+ prefix = PyUnicode_AsUTF8String(prefix)
+ cache.append(prefix)
+ if not PyString_CheckExact(prefix):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag prefix must be a string")
+ else:
+ raise TypeError(u"tag prefix must be a string")
tag_directives_end.prefix = PyString_AS_STRING(prefix)
- tag_directives_end = tag_directives_end+1
- implicit = 1
- if event_object.explicit:
- implicit = 0
- if yaml_document_start_event_initialize(event, version_directive,
- tag_directives_start, tag_directives_end, implicit) == 0:
- raise MemoryError
- elif event_class is DocumentEndEvent:
- implicit = 1
- if event_object.explicit:
- implicit = 0
- yaml_document_end_event_initialize(event, implicit)
- elif event_class is AliasEvent:
- anchor = NULL
- anchor_object = event_object.anchor
- if PyUnicode_CheckExact(anchor_object):
- anchor_object = PyUnicode_AsUTF8String(anchor_object)
- if not PyString_CheckExact(anchor_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("anchor must be a string")
- else:
- raise TypeError(u"anchor must be a string")
- anchor = PyString_AS_STRING(anchor_object)
+ tag_directives_end = tag_directives_end+1
+ implicit = 1
+ if event_object.explicit:
+ implicit = 0
+ if yaml_document_start_event_initialize(event, version_directive,
+ tag_directives_start, tag_directives_end, implicit) == 0:
+ raise MemoryError
+ elif event_class is DocumentEndEvent:
+ implicit = 1
+ if event_object.explicit:
+ implicit = 0
+ yaml_document_end_event_initialize(event, implicit)
+ elif event_class is AliasEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
if yaml_alias_event_initialize(event, anchor) == 0:
- raise MemoryError
- elif event_class is ScalarEvent:
- anchor = NULL
- anchor_object = event_object.anchor
- if anchor_object is not None:
- if PyUnicode_CheckExact(anchor_object):
- anchor_object = PyUnicode_AsUTF8String(anchor_object)
- if not PyString_CheckExact(anchor_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("anchor must be a string")
- else:
- raise TypeError(u"anchor must be a string")
- anchor = PyString_AS_STRING(anchor_object)
- tag = NULL
- tag_object = event_object.tag
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- value_object = event_object.value
- if PyUnicode_CheckExact(value_object):
- value_object = PyUnicode_AsUTF8String(value_object)
- if not PyString_CheckExact(value_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("value must be a string")
- else:
- raise TypeError(u"value must be a string")
- value = PyString_AS_STRING(value_object)
- length = PyString_GET_SIZE(value_object)
- plain_implicit = 0
- quoted_implicit = 0
- if event_object.implicit is not None:
- plain_implicit = event_object.implicit[0]
- quoted_implicit = event_object.implicit[1]
- style_object = event_object.style
- scalar_style = YAML_PLAIN_SCALAR_STYLE
- if style_object == "'" or style_object == u"'":
- scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
- elif style_object == "\"" or style_object == u"\"":
- scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
- elif style_object == "|" or style_object == u"|":
- scalar_style = YAML_LITERAL_SCALAR_STYLE
- elif style_object == ">" or style_object == u">":
- scalar_style = YAML_FOLDED_SCALAR_STYLE
+ raise MemoryError
+ elif event_class is ScalarEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ value_object = event_object.value
+ if PyUnicode_CheckExact(value_object):
+ value_object = PyUnicode_AsUTF8String(value_object)
+ if not PyString_CheckExact(value_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("value must be a string")
+ else:
+ raise TypeError(u"value must be a string")
+ value = PyString_AS_STRING(value_object)
+ length = PyString_GET_SIZE(value_object)
+ plain_implicit = 0
+ quoted_implicit = 0
+ if event_object.implicit is not None:
+ plain_implicit = event_object.implicit[0]
+ quoted_implicit = event_object.implicit[1]
+ style_object = event_object.style
+ scalar_style = YAML_PLAIN_SCALAR_STYLE
+ if style_object == "'" or style_object == u"'":
+ scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
+ elif style_object == "\"" or style_object == u"\"":
+ scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ elif style_object == "|" or style_object == u"|":
+ scalar_style = YAML_LITERAL_SCALAR_STYLE
+ elif style_object == ">" or style_object == u">":
+ scalar_style = YAML_FOLDED_SCALAR_STYLE
if yaml_scalar_event_initialize(event, anchor, tag, value, length,
- plain_implicit, quoted_implicit, scalar_style) == 0:
- raise MemoryError
- elif event_class is SequenceStartEvent:
- anchor = NULL
- anchor_object = event_object.anchor
- if anchor_object is not None:
- if PyUnicode_CheckExact(anchor_object):
- anchor_object = PyUnicode_AsUTF8String(anchor_object)
- if not PyString_CheckExact(anchor_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("anchor must be a string")
- else:
- raise TypeError(u"anchor must be a string")
- anchor = PyString_AS_STRING(anchor_object)
- tag = NULL
- tag_object = event_object.tag
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- implicit = 0
- if event_object.implicit:
- implicit = 1
- sequence_style = YAML_BLOCK_SEQUENCE_STYLE
- if event_object.flow_style:
- sequence_style = YAML_FLOW_SEQUENCE_STYLE
+ plain_implicit, quoted_implicit, scalar_style) == 0:
+ raise MemoryError
+ elif event_class is SequenceStartEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ implicit = 0
+ if event_object.implicit:
+ implicit = 1
+ sequence_style = YAML_BLOCK_SEQUENCE_STYLE
+ if event_object.flow_style:
+ sequence_style = YAML_FLOW_SEQUENCE_STYLE
if yaml_sequence_start_event_initialize(event, anchor, tag,
- implicit, sequence_style) == 0:
- raise MemoryError
- elif event_class is MappingStartEvent:
- anchor = NULL
- anchor_object = event_object.anchor
- if anchor_object is not None:
- if PyUnicode_CheckExact(anchor_object):
- anchor_object = PyUnicode_AsUTF8String(anchor_object)
- if not PyString_CheckExact(anchor_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("anchor must be a string")
- else:
- raise TypeError(u"anchor must be a string")
- anchor = PyString_AS_STRING(anchor_object)
- tag = NULL
- tag_object = event_object.tag
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- implicit = 0
- if event_object.implicit:
- implicit = 1
- mapping_style = YAML_BLOCK_MAPPING_STYLE
- if event_object.flow_style:
- mapping_style = YAML_FLOW_MAPPING_STYLE
+ implicit, sequence_style) == 0:
+ raise MemoryError
+ elif event_class is MappingStartEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ implicit = 0
+ if event_object.implicit:
+ implicit = 1
+ mapping_style = YAML_BLOCK_MAPPING_STYLE
+ if event_object.flow_style:
+ mapping_style = YAML_FLOW_MAPPING_STYLE
if yaml_mapping_start_event_initialize(event, anchor, tag,
- implicit, mapping_style) == 0:
- raise MemoryError
- elif event_class is SequenceEndEvent:
- yaml_sequence_end_event_initialize(event)
- elif event_class is MappingEndEvent:
- yaml_mapping_end_event_initialize(event)
- else:
- if PY_MAJOR_VERSION < 3:
- raise TypeError("invalid event %s" % event_object)
- else:
- raise TypeError(u"invalid event %s" % event_object)
- return 1
-
- def emit(self, event_object):
- cdef yaml_event_t event
- self._object_to_event(event_object, &event)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
-
- def open(self):
- cdef yaml_event_t event
- cdef yaml_encoding_t encoding
- if self.closed == -1:
- if self.use_encoding == u'utf-16-le' or self.use_encoding == 'utf-16-le':
- encoding = YAML_UTF16LE_ENCODING
- elif self.use_encoding == u'utf-16-be' or self.use_encoding == 'utf-16-be':
- encoding = YAML_UTF16BE_ENCODING
- else:
- encoding = YAML_UTF8_ENCODING
- if self.use_encoding is None:
- self.dump_unicode = 1
- if self.dump_unicode == 1:
- encoding = YAML_UTF8_ENCODING
- yaml_stream_start_event_initialize(&event, encoding)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- self.closed = 0
- elif self.closed == 1:
- if PY_MAJOR_VERSION < 3:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError(u"serializer is closed")
- else:
- if PY_MAJOR_VERSION < 3:
- raise SerializerError("serializer is already opened")
- else:
- raise SerializerError(u"serializer is already opened")
-
- def close(self):
- cdef yaml_event_t event
- if self.closed == -1:
- if PY_MAJOR_VERSION < 3:
- raise SerializerError("serializer is not opened")
- else:
- raise SerializerError(u"serializer is not opened")
- elif self.closed == 0:
- yaml_stream_end_event_initialize(&event)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- self.closed = 1
-
- def serialize(self, node):
- cdef yaml_event_t event
- cdef yaml_version_directive_t version_directive_value
- cdef yaml_version_directive_t *version_directive
- cdef yaml_tag_directive_t tag_directives_value[128]
- cdef yaml_tag_directive_t *tag_directives_start
- cdef yaml_tag_directive_t *tag_directives_end
- if self.closed == -1:
- if PY_MAJOR_VERSION < 3:
- raise SerializerError("serializer is not opened")
- else:
- raise SerializerError(u"serializer is not opened")
- elif self.closed == 1:
- if PY_MAJOR_VERSION < 3:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError(u"serializer is closed")
- cache = []
- version_directive = NULL
- if self.use_version:
- version_directive_value.major = self.use_version[0]
- version_directive_value.minor = self.use_version[1]
- version_directive = &version_directive_value
- tag_directives_start = NULL
- tag_directives_end = NULL
- if self.use_tags:
- if len(self.use_tags) > 128:
- if PY_MAJOR_VERSION < 3:
- raise ValueError("too many tags")
- else:
- raise ValueError(u"too many tags")
- tag_directives_start = tag_directives_value
- tag_directives_end = tag_directives_value
- for handle in self.use_tags:
- prefix = self.use_tags[handle]
- if PyUnicode_CheckExact(handle):
- handle = PyUnicode_AsUTF8String(handle)
- cache.append(handle)
- if not PyString_CheckExact(handle):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag handle must be a string")
- else:
- raise TypeError(u"tag handle must be a string")
+ implicit, mapping_style) == 0:
+ raise MemoryError
+ elif event_class is SequenceEndEvent:
+ yaml_sequence_end_event_initialize(event)
+ elif event_class is MappingEndEvent:
+ yaml_mapping_end_event_initialize(event)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("invalid event %s" % event_object)
+ else:
+ raise TypeError(u"invalid event %s" % event_object)
+ return 1
+
+ def emit(self, event_object):
+ cdef yaml_event_t event
+ self._object_to_event(event_object, &event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+
+ def open(self):
+ cdef yaml_event_t event
+ cdef yaml_encoding_t encoding
+ if self.closed == -1:
+ if self.use_encoding == u'utf-16-le' or self.use_encoding == 'utf-16-le':
+ encoding = YAML_UTF16LE_ENCODING
+ elif self.use_encoding == u'utf-16-be' or self.use_encoding == 'utf-16-be':
+ encoding = YAML_UTF16BE_ENCODING
+ else:
+ encoding = YAML_UTF8_ENCODING
+ if self.use_encoding is None:
+ self.dump_unicode = 1
+ if self.dump_unicode == 1:
+ encoding = YAML_UTF8_ENCODING
+ yaml_stream_start_event_initialize(&event, encoding)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.closed = 0
+ elif self.closed == 1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError(u"serializer is closed")
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is already opened")
+ else:
+ raise SerializerError(u"serializer is already opened")
+
+ def close(self):
+ cdef yaml_event_t event
+ if self.closed == -1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is not opened")
+ else:
+ raise SerializerError(u"serializer is not opened")
+ elif self.closed == 0:
+ yaml_stream_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.closed = 1
+
+ def serialize(self, node):
+ cdef yaml_event_t event
+ cdef yaml_version_directive_t version_directive_value
+ cdef yaml_version_directive_t *version_directive
+ cdef yaml_tag_directive_t tag_directives_value[128]
+ cdef yaml_tag_directive_t *tag_directives_start
+ cdef yaml_tag_directive_t *tag_directives_end
+ if self.closed == -1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is not opened")
+ else:
+ raise SerializerError(u"serializer is not opened")
+ elif self.closed == 1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError(u"serializer is closed")
+ cache = []
+ version_directive = NULL
+ if self.use_version:
+ version_directive_value.major = self.use_version[0]
+ version_directive_value.minor = self.use_version[1]
+ version_directive = &version_directive_value
+ tag_directives_start = NULL
+ tag_directives_end = NULL
+ if self.use_tags:
+ if len(self.use_tags) > 128:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("too many tags")
+ else:
+ raise ValueError(u"too many tags")
+ tag_directives_start = tag_directives_value
+ tag_directives_end = tag_directives_value
+ for handle in self.use_tags:
+ prefix = self.use_tags[handle]
+ if PyUnicode_CheckExact(handle):
+ handle = PyUnicode_AsUTF8String(handle)
+ cache.append(handle)
+ if not PyString_CheckExact(handle):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag handle must be a string")
+ else:
+ raise TypeError(u"tag handle must be a string")
tag_directives_end.handle = PyString_AS_STRING(handle)
- if PyUnicode_CheckExact(prefix):
- prefix = PyUnicode_AsUTF8String(prefix)
- cache.append(prefix)
- if not PyString_CheckExact(prefix):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag prefix must be a string")
- else:
- raise TypeError(u"tag prefix must be a string")
+ if PyUnicode_CheckExact(prefix):
+ prefix = PyUnicode_AsUTF8String(prefix)
+ cache.append(prefix)
+ if not PyString_CheckExact(prefix):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag prefix must be a string")
+ else:
+ raise TypeError(u"tag prefix must be a string")
tag_directives_end.prefix = PyString_AS_STRING(prefix)
- tag_directives_end = tag_directives_end+1
- if yaml_document_start_event_initialize(&event, version_directive,
- tag_directives_start, tag_directives_end,
- self.document_start_implicit) == 0:
- raise MemoryError
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- self._anchor_node(node)
- self._serialize_node(node, None, None)
- yaml_document_end_event_initialize(&event, self.document_end_implicit)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_alias_id = 0
-
- cdef int _anchor_node(self, object node) except 0:
- if node in self.anchors:
- if self.anchors[node] is None:
- self.last_alias_id = self.last_alias_id+1
- self.anchors[node] = u"id%03d" % self.last_alias_id
- else:
- self.anchors[node] = None
- node_class = node.__class__
- if node_class is SequenceNode:
- for item in node.value:
- self._anchor_node(item)
- elif node_class is MappingNode:
- for key, value in node.value:
- self._anchor_node(key)
- self._anchor_node(value)
- return 1
-
- cdef int _serialize_node(self, object node, object parent, object index) except 0:
- cdef yaml_event_t event
- cdef int implicit
- cdef int plain_implicit
- cdef int quoted_implicit
- cdef char *anchor
- cdef char *tag
- cdef char *value
- cdef int length
- cdef int item_index
- cdef yaml_scalar_style_t scalar_style
- cdef yaml_sequence_style_t sequence_style
- cdef yaml_mapping_style_t mapping_style
- anchor_object = self.anchors[node]
- anchor = NULL
- if anchor_object is not None:
- if PyUnicode_CheckExact(anchor_object):
- anchor_object = PyUnicode_AsUTF8String(anchor_object)
- if not PyString_CheckExact(anchor_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("anchor must be a string")
- else:
- raise TypeError(u"anchor must be a string")
- anchor = PyString_AS_STRING(anchor_object)
- if node in self.serialized_nodes:
+ tag_directives_end = tag_directives_end+1
+ if yaml_document_start_event_initialize(&event, version_directive,
+ tag_directives_start, tag_directives_end,
+ self.document_start_implicit) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self._anchor_node(node)
+ self._serialize_node(node, None, None)
+ yaml_document_end_event_initialize(&event, self.document_end_implicit)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_alias_id = 0
+
+ cdef int _anchor_node(self, object node) except 0:
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.last_alias_id = self.last_alias_id+1
+ self.anchors[node] = u"id%03d" % self.last_alias_id
+ else:
+ self.anchors[node] = None
+ node_class = node.__class__
+ if node_class is SequenceNode:
+ for item in node.value:
+ self._anchor_node(item)
+ elif node_class is MappingNode:
+ for key, value in node.value:
+ self._anchor_node(key)
+ self._anchor_node(value)
+ return 1
+
+ cdef int _serialize_node(self, object node, object parent, object index) except 0:
+ cdef yaml_event_t event
+ cdef int implicit
+ cdef int plain_implicit
+ cdef int quoted_implicit
+ cdef char *anchor
+ cdef char *tag
+ cdef char *value
+ cdef int length
+ cdef int item_index
+ cdef yaml_scalar_style_t scalar_style
+ cdef yaml_sequence_style_t sequence_style
+ cdef yaml_mapping_style_t mapping_style
+ anchor_object = self.anchors[node]
+ anchor = NULL
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ if node in self.serialized_nodes:
if yaml_alias_event_initialize(&event, anchor) == 0:
- raise MemoryError
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- else:
- node_class = node.__class__
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if node_class is ScalarNode:
- plain_implicit = 0
- quoted_implicit = 0
- tag_object = node.tag
- if self.resolve(ScalarNode, node.value, (True, False)) == tag_object:
- plain_implicit = 1
- if self.resolve(ScalarNode, node.value, (False, True)) == tag_object:
- quoted_implicit = 1
- tag = NULL
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- value_object = node.value
- if PyUnicode_CheckExact(value_object):
- value_object = PyUnicode_AsUTF8String(value_object)
- if not PyString_CheckExact(value_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("value must be a string")
- else:
- raise TypeError(u"value must be a string")
- value = PyString_AS_STRING(value_object)
- length = PyString_GET_SIZE(value_object)
- style_object = node.style
- scalar_style = YAML_PLAIN_SCALAR_STYLE
- if style_object == "'" or style_object == u"'":
- scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
- elif style_object == "\"" or style_object == u"\"":
- scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
- elif style_object == "|" or style_object == u"|":
- scalar_style = YAML_LITERAL_SCALAR_STYLE
- elif style_object == ">" or style_object == u">":
- scalar_style = YAML_FOLDED_SCALAR_STYLE
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ else:
+ node_class = node.__class__
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if node_class is ScalarNode:
+ plain_implicit = 0
+ quoted_implicit = 0
+ tag_object = node.tag
+ if self.resolve(ScalarNode, node.value, (True, False)) == tag_object:
+ plain_implicit = 1
+ if self.resolve(ScalarNode, node.value, (False, True)) == tag_object:
+ quoted_implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ value_object = node.value
+ if PyUnicode_CheckExact(value_object):
+ value_object = PyUnicode_AsUTF8String(value_object)
+ if not PyString_CheckExact(value_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("value must be a string")
+ else:
+ raise TypeError(u"value must be a string")
+ value = PyString_AS_STRING(value_object)
+ length = PyString_GET_SIZE(value_object)
+ style_object = node.style
+ scalar_style = YAML_PLAIN_SCALAR_STYLE
+ if style_object == "'" or style_object == u"'":
+ scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
+ elif style_object == "\"" or style_object == u"\"":
+ scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ elif style_object == "|" or style_object == u"|":
+ scalar_style = YAML_LITERAL_SCALAR_STYLE
+ elif style_object == ">" or style_object == u">":
+ scalar_style = YAML_FOLDED_SCALAR_STYLE
if yaml_scalar_event_initialize(&event, anchor, tag, value, length,
- plain_implicit, quoted_implicit, scalar_style) == 0:
- raise MemoryError
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- elif node_class is SequenceNode:
- implicit = 0
- tag_object = node.tag
- if self.resolve(SequenceNode, node.value, True) == tag_object:
- implicit = 1
- tag = NULL
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- sequence_style = YAML_BLOCK_SEQUENCE_STYLE
- if node.flow_style:
- sequence_style = YAML_FLOW_SEQUENCE_STYLE
+ plain_implicit, quoted_implicit, scalar_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ elif node_class is SequenceNode:
+ implicit = 0
+ tag_object = node.tag
+ if self.resolve(SequenceNode, node.value, True) == tag_object:
+ implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ sequence_style = YAML_BLOCK_SEQUENCE_STYLE
+ if node.flow_style:
+ sequence_style = YAML_FLOW_SEQUENCE_STYLE
if yaml_sequence_start_event_initialize(&event, anchor, tag,
- implicit, sequence_style) == 0:
- raise MemoryError
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- item_index = 0
- for item in node.value:
- self._serialize_node(item, node, item_index)
- item_index = item_index+1
- yaml_sequence_end_event_initialize(&event)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- elif node_class is MappingNode:
- implicit = 0
- tag_object = node.tag
- if self.resolve(MappingNode, node.value, True) == tag_object:
- implicit = 1
- tag = NULL
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- mapping_style = YAML_BLOCK_MAPPING_STYLE
- if node.flow_style:
- mapping_style = YAML_FLOW_MAPPING_STYLE
+ implicit, sequence_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ item_index = 0
+ for item in node.value:
+ self._serialize_node(item, node, item_index)
+ item_index = item_index+1
+ yaml_sequence_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ elif node_class is MappingNode:
+ implicit = 0
+ tag_object = node.tag
+ if self.resolve(MappingNode, node.value, True) == tag_object:
+ implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ mapping_style = YAML_BLOCK_MAPPING_STYLE
+ if node.flow_style:
+ mapping_style = YAML_FLOW_MAPPING_STYLE
if yaml_mapping_start_event_initialize(&event, anchor, tag,
- implicit, mapping_style) == 0:
- raise MemoryError
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- for item_key, item_value in node.value:
- self._serialize_node(item_key, node, None)
- self._serialize_node(item_value, node, item_key)
- yaml_mapping_end_event_initialize(&event)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- self.ascend_resolver()
- return 1
-
+ implicit, mapping_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ for item_key, item_value in node.value:
+ self._serialize_node(item_key, node, None)
+ self._serialize_node(item_value, node, item_key)
+ yaml_mapping_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.ascend_resolver()
+ return 1
+
cdef int output_handler(void *data, char *buffer, size_t size) except 0:
- cdef CEmitter emitter
- emitter = <CEmitter>data
- if emitter.dump_unicode == 0:
+ cdef CEmitter emitter
+ emitter = <CEmitter>data
+ if emitter.dump_unicode == 0:
value = PyString_FromStringAndSize(buffer, size)
- else:
+ else:
value = PyUnicode_DecodeUTF8(buffer, size, 'strict')
- emitter.stream.write(value)
- return 1
-
+ emitter.stream.write(value)
+ return 1
+
diff --git a/contrib/python/PyYAML/py2/yaml/composer.py b/contrib/python/PyYAML/py2/yaml/composer.py
index 8e9c68ae43..df85ef653b 100644
--- a/contrib/python/PyYAML/py2/yaml/composer.py
+++ b/contrib/python/PyYAML/py2/yaml/composer.py
@@ -1,139 +1,139 @@
-
-__all__ = ['Composer', 'ComposerError']
-
-from error import MarkedYAMLError
-from events import *
-from nodes import *
-
-class ComposerError(MarkedYAMLError):
- pass
-
-class Composer(object):
-
- def __init__(self):
- self.anchors = {}
-
- def check_node(self):
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
-
- # If there are more documents available?
- return not self.check_event(StreamEndEvent)
-
- def get_node(self):
- # Get the root node of the next document.
- if not self.check_event(StreamEndEvent):
- return self.compose_document()
-
- def get_single_node(self):
- # Drop the STREAM-START event.
- self.get_event()
-
- # Compose a document if the stream is not empty.
- document = None
- if not self.check_event(StreamEndEvent):
- document = self.compose_document()
-
- # Ensure that the stream contains no more documents.
- if not self.check_event(StreamEndEvent):
- event = self.get_event()
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
-
- # Drop the STREAM-END event.
- self.get_event()
-
- return document
-
- def compose_document(self):
- # Drop the DOCUMENT-START event.
- self.get_event()
-
- # Compose the root node.
- node = self.compose_node(None, None)
-
- # Drop the DOCUMENT-END event.
- self.get_event()
-
- self.anchors = {}
- return node
-
- def compose_node(self, parent, index):
- if self.check_event(AliasEvent):
- event = self.get_event()
- anchor = event.anchor
- if anchor not in self.anchors:
- raise ComposerError(None, None, "found undefined alias %r"
- % anchor.encode('utf-8'), event.start_mark)
- return self.anchors[anchor]
- event = self.peek_event()
- anchor = event.anchor
- if anchor is not None:
- if anchor in self.anchors:
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurrence"
- % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
"second occurrence", event.start_mark)
- self.descend_resolver(parent, index)
- if self.check_event(ScalarEvent):
- node = self.compose_scalar_node(anchor)
- elif self.check_event(SequenceStartEvent):
- node = self.compose_sequence_node(anchor)
- elif self.check_event(MappingStartEvent):
- node = self.compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- def compose_scalar_node(self, anchor):
- event = self.get_event()
- tag = event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style)
- if anchor is not None:
- self.anchors[anchor] = node
- return node
-
- def compose_sequence_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- index = 0
- while not self.check_event(SequenceEndEvent):
- node.value.append(self.compose_node(node, index))
- index += 1
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
- def compose_mapping_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- while not self.check_event(MappingEndEvent):
- #key_event = self.peek_event()
- item_key = self.compose_node(node, None)
- #if item_key in node.value:
- # raise ComposerError("while composing a mapping", start_event.start_mark,
- # "found duplicate key", key_event.start_mark)
- item_value = self.compose_node(node, item_key)
- #node.value[item_key] = item_value
- node.value.append((item_key, item_value))
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/contrib/python/PyYAML/py2/yaml/constructor.py b/contrib/python/PyYAML/py2/yaml/constructor.py
index f4dd7b0054..ff4e36828e 100644
--- a/contrib/python/PyYAML/py2/yaml/constructor.py
+++ b/contrib/python/PyYAML/py2/yaml/constructor.py
@@ -1,4 +1,4 @@
-
+
__all__ = [
'BaseConstructor',
'SafeConstructor',
@@ -7,17 +7,17 @@ __all__ = [
'Constructor',
'ConstructorError'
]
-
-from error import *
-from nodes import *
-
-import datetime
-
-import binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
- pass
-
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
class timezone(datetime.tzinfo):
def __init__(self, offset):
@@ -47,21 +47,21 @@ class timezone(datetime.tzinfo):
__repr__ = __str__ = tzname
-class BaseConstructor(object):
-
- yaml_constructors = {}
- yaml_multi_constructors = {}
-
- def __init__(self):
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.state_generators = []
- self.deep_construct = False
-
- def check_data(self):
- # If there are more documents available?
- return self.check_node()
-
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
def check_state_key(self, key):
"""Block special attributes/methods from being set in a newly created
object, to prevent user-controlled methods from being called during
@@ -70,475 +70,475 @@ class BaseConstructor(object):
raise ConstructorError(None, None,
"blacklisted key '%s' in instance state found" % (key,), None)
- def get_data(self):
- # Construct and return the next document.
- if self.check_node():
- return self.construct_document(self.get_node())
-
- def get_single_data(self):
- # Ensure that the stream contains a single document and construct it.
- node = self.get_single_node()
- if node is not None:
- return self.construct_document(node)
- return None
-
- def construct_document(self, node):
- data = self.construct_object(node)
- while self.state_generators:
- state_generators = self.state_generators
- self.state_generators = []
- for generator in state_generators:
- for dummy in generator:
- pass
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.deep_construct = False
- return data
-
- def construct_object(self, node, deep=False):
- if node in self.constructed_objects:
- return self.constructed_objects[node]
- if deep:
- old_deep = self.deep_construct
- self.deep_construct = True
- if node in self.recursive_objects:
- raise ConstructorError(None, None,
- "found unconstructable recursive node", node.start_mark)
- self.recursive_objects[node] = None
- constructor = None
- tag_suffix = None
- if node.tag in self.yaml_constructors:
- constructor = self.yaml_constructors[node.tag]
- else:
- for tag_prefix in self.yaml_multi_constructors:
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
if tag_prefix is not None and node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
- constructor = self.yaml_multi_constructors[tag_prefix]
- break
- else:
- if None in self.yaml_multi_constructors:
- tag_suffix = node.tag
- constructor = self.yaml_multi_constructors[None]
- elif None in self.yaml_constructors:
- constructor = self.yaml_constructors[None]
- elif isinstance(node, ScalarNode):
- constructor = self.__class__.construct_scalar
- elif isinstance(node, SequenceNode):
- constructor = self.__class__.construct_sequence
- elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping
- if tag_suffix is None:
- data = constructor(self, node)
- else:
- data = constructor(self, tag_suffix, node)
- if isinstance(data, types.GeneratorType):
- generator = data
- data = generator.next()
- if self.deep_construct:
- for dummy in generator:
- pass
- else:
- self.state_generators.append(generator)
- self.constructed_objects[node] = data
- del self.recursive_objects[node]
- if deep:
- self.deep_construct = old_deep
- return data
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
- return node.value
-
- def construct_sequence(self, node, deep=False):
- if not isinstance(node, SequenceNode):
- raise ConstructorError(None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
-
- def construct_mapping(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- mapping = {}
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- try:
- hash(key)
- except TypeError, exc:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" % exc, key_node.start_mark)
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
- return mapping
-
- def construct_pairs(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- pairs = []
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- value = self.construct_object(value_node, deep=deep)
- pairs.append((key, value))
- return pairs
-
- def add_constructor(cls, tag, constructor):
- if not 'yaml_constructors' in cls.__dict__:
- cls.yaml_constructors = cls.yaml_constructors.copy()
- cls.yaml_constructors[tag] = constructor
- add_constructor = classmethod(add_constructor)
-
- def add_multi_constructor(cls, tag_prefix, multi_constructor):
- if not 'yaml_multi_constructors' in cls.__dict__:
- cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
- cls.yaml_multi_constructors[tag_prefix] = multi_constructor
- add_multi_constructor = classmethod(add_multi_constructor)
-
-class SafeConstructor(BaseConstructor):
-
- def construct_scalar(self, node):
- if isinstance(node, MappingNode):
- for key_node, value_node in node.value:
- if key_node.tag == u'tag:yaml.org,2002:value':
- return self.construct_scalar(value_node)
- return BaseConstructor.construct_scalar(self, node)
-
- def flatten_mapping(self, node):
- merge = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == u'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- self.flatten_mapping(value_node)
- merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- self.flatten_mapping(subnode)
- submerge.append(subnode.value)
- submerge.reverse()
- for value in submerge:
- merge.extend(value)
- else:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == u'tag:yaml.org,2002:value':
- key_node.tag = u'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- if merge:
- node.value = merge + node.value
-
- def construct_mapping(self, node, deep=False):
- if isinstance(node, MappingNode):
- self.flatten_mapping(node)
- return BaseConstructor.construct_mapping(self, node, deep=deep)
-
- def construct_yaml_null(self, node):
- self.construct_scalar(node)
- return None
-
- bool_values = {
- u'yes': True,
- u'no': False,
- u'true': True,
- u'false': False,
- u'on': True,
- u'off': False,
- }
-
- def construct_yaml_bool(self, node):
- value = self.construct_scalar(node)
- return self.bool_values[value.lower()]
-
- def construct_yaml_int(self, node):
- value = str(self.construct_scalar(node))
- value = value.replace('_', '')
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '0':
- return 0
- elif value.startswith('0b'):
- return sign*int(value[2:], 2)
- elif value.startswith('0x'):
- return sign*int(value[2:], 16)
- elif value[0] == '0':
- return sign*int(value, 8)
- elif ':' in value:
- digits = [int(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*int(value)
-
- inf_value = 1e300
- while inf_value != inf_value*inf_value:
- inf_value *= inf_value
- nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
-
- def construct_yaml_float(self, node):
- value = str(self.construct_scalar(node))
- value = value.replace('_', '').lower()
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '.inf':
- return sign*self.inf_value
- elif value == '.nan':
- return self.nan_value
- elif ':' in value:
- digits = [float(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0.0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*float(value)
-
- def construct_yaml_binary(self, node):
- value = self.construct_scalar(node)
- try:
- return str(value).decode('base64')
- except (binascii.Error, UnicodeEncodeError), exc:
- raise ConstructorError(None, None,
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
-
- timestamp_regexp = re.compile(
- ur'''^(?P<year>[0-9][0-9][0-9][0-9])
- -(?P<month>[0-9][0-9]?)
- -(?P<day>[0-9][0-9]?)
- (?:(?:[Tt]|[ \t]+)
- (?P<hour>[0-9][0-9]?)
- :(?P<minute>[0-9][0-9])
- :(?P<second>[0-9][0-9])
- (?:\.(?P<fraction>[0-9]*))?
- (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
- def construct_yaml_timestamp(self, node):
- value = self.construct_scalar(node)
- match = self.timestamp_regexp.match(node.value)
- values = match.groupdict()
- year = int(values['year'])
- month = int(values['month'])
- day = int(values['day'])
- if not values['hour']:
- return datetime.date(year, month, day)
- hour = int(values['hour'])
- minute = int(values['minute'])
- second = int(values['second'])
- fraction = 0
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
tzinfo = None
- if values['fraction']:
- fraction = values['fraction'][:6]
- while len(fraction) < 6:
- fraction += '0'
- fraction = int(fraction)
- if values['tz_sign']:
- tz_hour = int(values['tz_hour'])
- tz_minute = int(values['tz_minute'] or 0)
- delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
- if values['tz_sign'] == '-':
- delta = -delta
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
tzinfo = timezone(delta)
elif values['tz']:
tzinfo = timezone(datetime.timedelta(0))
return datetime.datetime(year, month, day, hour, minute, second, fraction,
tzinfo=tzinfo)
-
- def construct_yaml_omap(self, node):
- # Note: we do not check for duplicate keys, because it's too
- # CPU-expensive.
- omap = []
- yield omap
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- omap.append((key, value))
-
- def construct_yaml_pairs(self, node):
- # Note: the same code as `construct_yaml_omap`.
- pairs = []
- yield pairs
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- pairs.append((key, value))
-
- def construct_yaml_set(self, node):
- data = set()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_str(self, node):
- value = self.construct_scalar(node)
- try:
- return value.encode('ascii')
- except UnicodeEncodeError:
- return value
-
- def construct_yaml_seq(self, node):
- data = []
- yield data
- data.extend(self.construct_sequence(node))
-
- def construct_yaml_map(self, node):
- data = {}
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_object(self, node, cls):
- data = cls.__new__(cls)
- yield data
- if hasattr(data, '__setstate__'):
- state = self.construct_mapping(node, deep=True)
- data.__setstate__(state)
- else:
- state = self.construct_mapping(node)
- data.__dict__.update(state)
-
- def construct_undefined(self, node):
- raise ConstructorError(None, None,
- "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
- node.start_mark)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
- SafeConstructor.construct_undefined)
-
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
class FullConstructor(SafeConstructor):
# 'extend' is blacklisted because it is used by
# construct_python_object_apply to add `listitems` to a newly generate
# python instance
def get_state_keys_blacklist(self):
return ['^extend$', '^__.*__$']
-
+
def get_state_keys_blacklist_regexp(self):
if not hasattr(self, 'state_keys_blacklist_regexp'):
self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
return self.state_keys_blacklist_regexp
- def construct_python_str(self, node):
- return self.construct_scalar(node).encode('utf-8')
-
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
- def construct_python_long(self, node):
- return long(self.construct_yaml_int(node))
-
- def construct_python_complex(self, node):
- return complex(self.construct_scalar(node))
-
- def construct_python_tuple(self, node):
- return tuple(self.construct_sequence(node))
-
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
def find_python_module(self, name, mark, unsafe=False):
- if not name:
- raise ConstructorError("while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
if unsafe:
try:
__import__(name)
@@ -546,19 +546,19 @@ class FullConstructor(SafeConstructor):
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
if name not in sys.modules:
- raise ConstructorError("while constructing a Python module", mark,
+ raise ConstructorError("while constructing a Python module", mark,
"module %r is not imported" % name.encode('utf-8'), mark)
- return sys.modules[name]
-
+ return sys.modules[name]
+
def find_python_name(self, name, mark, unsafe=False):
- if not name:
- raise ConstructorError("while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
- if u'.' in name:
- module_name, object_name = name.rsplit('.', 1)
- else:
- module_name = '__builtin__'
- object_name = name
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
if unsafe:
try:
__import__(module_name)
@@ -566,168 +566,168 @@ class FullConstructor(SafeConstructor):
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
if module_name not in sys.modules:
- raise ConstructorError("while constructing a Python object", mark,
+ raise ConstructorError("while constructing a Python object", mark,
"module %r is not imported" % module_name.encode('utf-8'), mark)
- module = sys.modules[module_name]
- if not hasattr(module, object_name):
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find %r in the module %r" % (object_name.encode('utf-8'),
- module.__name__), mark)
- return getattr(module, object_name)
-
- def construct_python_name(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % value.encode('utf-8'),
- node.start_mark)
- return self.find_python_name(suffix, node.start_mark)
-
- def construct_python_module(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % value.encode('utf-8'),
- node.start_mark)
- return self.find_python_module(suffix, node.start_mark)
-
- class classobj: pass
-
- def make_python_instance(self, suffix, node,
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False, unsafe=False):
- if not args:
- args = []
- if not kwds:
- kwds = {}
- cls = self.find_python_name(suffix, node.start_mark)
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
if not (unsafe or isinstance(cls, type) or isinstance(cls, type(self.classobj))):
raise ConstructorError("while constructing a Python instance", node.start_mark,
"expected a class, but found %r" % type(cls),
node.start_mark)
- if newobj and isinstance(cls, type(self.classobj)) \
- and not args and not kwds:
- instance = self.classobj()
- instance.__class__ = cls
- return instance
- elif newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
-
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
def set_python_instance_state(self, instance, state, unsafe=False):
- if hasattr(instance, '__setstate__'):
- instance.__setstate__(state)
- else:
- slotstate = {}
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if hasattr(instance, '__dict__'):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
if not unsafe and state:
for key in state.keys():
self.check_state_key(key)
- instance.__dict__.update(state)
- elif state:
- slotstate.update(state)
- for key, value in slotstate.items():
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
if not unsafe:
self.check_state_key(key)
setattr(instance, key, value)
-
- def construct_python_object(self, suffix, node):
- # Format:
- # !!python/object:module.name { ... state ... }
- instance = self.make_python_instance(suffix, node, newobj=True)
- yield instance
- deep = hasattr(instance, '__setstate__')
- state = self.construct_mapping(node, deep=deep)
- self.set_python_instance_state(instance, state)
-
- def construct_python_object_apply(self, suffix, node, newobj=False):
- # Format:
- # !!python/object/apply # (or !!python/object/new)
- # args: [ ... arguments ... ]
- # kwds: { ... keywords ... }
- # state: ... state ...
- # listitems: [ ... listitems ... ]
- # dictitems: { ... dictitems ... }
- # or short format:
- # !!python/object/apply [ ... arguments ... ]
- # The difference between !!python/object/apply and !!python/object/new
- # is how an object is created, check make_python_instance for details.
- if isinstance(node, SequenceNode):
- args = self.construct_sequence(node, deep=True)
- kwds = {}
- state = {}
- listitems = []
- dictitems = {}
- else:
- value = self.construct_mapping(node, deep=True)
- args = value.get('args', [])
- kwds = value.get('kwds', {})
- state = value.get('state', {})
- listitems = value.get('listitems', [])
- dictitems = value.get('dictitems', {})
- instance = self.make_python_instance(suffix, node, args, kwds, newobj)
- if state:
- self.set_python_instance_state(instance, state)
- if listitems:
- instance.extend(listitems)
- if dictitems:
- for key in dictitems:
- instance[key] = dictitems[key]
- return instance
-
- def construct_python_object_new(self, suffix, node):
- return self.construct_python_object_apply(suffix, node, newobj=True)
-
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/none',
+ u'tag:yaml.org,2002:python/none',
FullConstructor.construct_yaml_null)
-
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/bool',
+ u'tag:yaml.org,2002:python/bool',
FullConstructor.construct_yaml_bool)
-
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/str',
+ u'tag:yaml.org,2002:python/str',
FullConstructor.construct_python_str)
-
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
+ u'tag:yaml.org,2002:python/unicode',
FullConstructor.construct_python_unicode)
-
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/int',
+ u'tag:yaml.org,2002:python/int',
FullConstructor.construct_yaml_int)
-
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/long',
+ u'tag:yaml.org,2002:python/long',
FullConstructor.construct_python_long)
-
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/float',
+ u'tag:yaml.org,2002:python/float',
FullConstructor.construct_yaml_float)
-
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/complex',
+ u'tag:yaml.org,2002:python/complex',
FullConstructor.construct_python_complex)
-
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/list',
+ u'tag:yaml.org,2002:python/list',
FullConstructor.construct_yaml_seq)
-
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/tuple',
+ u'tag:yaml.org,2002:python/tuple',
FullConstructor.construct_python_tuple)
-
+
FullConstructor.add_constructor(
- u'tag:yaml.org,2002:python/dict',
+ u'tag:yaml.org,2002:python/dict',
FullConstructor.construct_yaml_map)
-
+
FullConstructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/name:',
+ u'tag:yaml.org,2002:python/name:',
FullConstructor.construct_python_name)
-
+
class UnsafeConstructor(FullConstructor):
def find_python_module(self, name, mark):
diff --git a/contrib/python/PyYAML/py2/yaml/cyaml.py b/contrib/python/PyYAML/py2/yaml/cyaml.py
index 3abdfb054f..768b49d6b9 100644
--- a/contrib/python/PyYAML/py2/yaml/cyaml.py
+++ b/contrib/python/PyYAML/py2/yaml/cyaml.py
@@ -1,32 +1,32 @@
-
+
__all__ = [
'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper'
]
-
+
from yaml._yaml import CParser, CEmitter
-
-from constructor import *
-
-from serializer import *
-from representer import *
-
-from resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
class CFullLoader(CParser, FullConstructor, Resolver):
def __init__(self, stream):
@@ -41,61 +41,61 @@ class CUnsafeLoader(CParser, UnsafeConstructor, Resolver):
UnsafeConstructor.__init__(self)
Resolver.__init__(self)
-class CLoader(CParser, Constructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
default_style=None, default_flow_style=False,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
- Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
default_style=None, default_flow_style=False,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
- Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
default_style=None, default_flow_style=False,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
- Resolver.__init__(self)
-
+ Resolver.__init__(self)
+
diff --git a/contrib/python/PyYAML/py2/yaml/dumper.py b/contrib/python/PyYAML/py2/yaml/dumper.py
index 42cc2d48b7..f9cd49fda5 100644
--- a/contrib/python/PyYAML/py2/yaml/dumper.py
+++ b/contrib/python/PyYAML/py2/yaml/dumper.py
@@ -1,62 +1,62 @@
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from emitter import *
-from serializer import *
-from representer import *
-from resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
default_style=None, default_flow_style=False,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
- Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
default_style=None, default_flow_style=False,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
- Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
default_style=None, default_flow_style=False,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
- Resolver.__init__(self)
-
+ Resolver.__init__(self)
+
diff --git a/contrib/python/PyYAML/py2/yaml/emitter.py b/contrib/python/PyYAML/py2/yaml/emitter.py
index d7207037be..23c25ca80a 100644
--- a/contrib/python/PyYAML/py2/yaml/emitter.py
+++ b/contrib/python/PyYAML/py2/yaml/emitter.py
@@ -1,1144 +1,1144 @@
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
import sys
-from error import YAMLError
-from events import *
-
+from error import YAMLError
+from events import *
+
has_ucs4 = sys.maxunicode > 0xffff
-class EmitterError(YAMLError):
- pass
-
-class ScalarAnalysis(object):
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
- self.scalar = scalar
- self.empty = empty
- self.multiline = multiline
- self.allow_flow_plain = allow_flow_plain
- self.allow_block_plain = allow_block_plain
- self.allow_single_quoted = allow_single_quoted
- self.allow_double_quoted = allow_double_quoted
- self.allow_block = allow_block
-
-class Emitter(object):
-
- DEFAULT_TAG_PREFIXES = {
- u'!' : u'!',
- u'tag:yaml.org,2002:' : u'!!',
- }
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
-
- # The stream should have the methods `write` and possibly `flush`.
- self.stream = stream
-
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
# Encoding can be overridden by STREAM-START.
- self.encoding = None
-
- # Emitter is a state machine with a stack of states to handle nested
- # structures.
- self.states = []
- self.state = self.expect_stream_start
-
- # Current event and the event queue.
- self.events = []
- self.event = None
-
- # The current indentation level and the stack of previous indents.
- self.indents = []
- self.indent = None
-
- # Flow level.
- self.flow_level = 0
-
- # Contexts.
- self.root_context = False
- self.sequence_context = False
- self.mapping_context = False
- self.simple_key_context = False
-
- # Characteristics of the last emitted character:
- # - current position.
- # - is it a whitespace?
- # - is it an indention character
- # (indentation space, '-', '?', or ':')?
- self.line = 0
- self.column = 0
- self.whitespace = True
- self.indention = True
-
- # Whether the document requires an explicit document indicator
- self.open_ended = False
-
- # Formatting details.
- self.canonical = canonical
- self.allow_unicode = allow_unicode
- self.best_indent = 2
- if indent and 1 < indent < 10:
- self.best_indent = indent
- self.best_width = 80
- if width and width > self.best_indent*2:
- self.best_width = width
- self.best_line_break = u'\n'
- if line_break in [u'\r', u'\n', u'\r\n']:
- self.best_line_break = line_break
-
- # Tag prefixes.
- self.tag_prefixes = None
-
- # Prepared anchor and tag.
- self.prepared_anchor = None
- self.prepared_tag = None
-
- # Scalar analysis and style.
- self.analysis = None
- self.style = None
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def emit(self, event):
- self.events.append(event)
- while not self.need_more_events():
- self.event = self.events.pop(0)
- self.state()
- self.event = None
-
- # In some cases, we wait for a few next events before emitting.
-
- def need_more_events(self):
- if not self.events:
- return True
- event = self.events[0]
- if isinstance(event, DocumentStartEvent):
- return self.need_events(1)
- elif isinstance(event, SequenceStartEvent):
- return self.need_events(2)
- elif isinstance(event, MappingStartEvent):
- return self.need_events(3)
- else:
- return False
-
- def need_events(self, count):
- level = 0
- for event in self.events[1:]:
- if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
- level += 1
- elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
- level -= 1
- elif isinstance(event, StreamEndEvent):
- level = -1
- if level < 0:
- return False
- return (len(self.events) < count+1)
-
- def increase_indent(self, flow=False, indentless=False):
- self.indents.append(self.indent)
- if self.indent is None:
- if flow:
- self.indent = self.best_indent
- else:
- self.indent = 0
- elif not indentless:
- self.indent += self.best_indent
-
- # States.
-
- # Stream handlers.
-
- def expect_stream_start(self):
- if isinstance(self.event, StreamStartEvent):
- if self.event.encoding and not getattr(self.stream, 'encoding', None):
- self.encoding = self.event.encoding
- self.write_stream_start()
- self.state = self.expect_first_document_start
- else:
- raise EmitterError("expected StreamStartEvent, but got %s"
- % self.event)
-
- def expect_nothing(self):
- raise EmitterError("expected nothing, but got %s" % self.event)
-
- # Document handlers.
-
- def expect_first_document_start(self):
- return self.expect_document_start(first=True)
-
- def expect_document_start(self, first=False):
- if isinstance(self.event, DocumentStartEvent):
- if (self.event.version or self.event.tags) and self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- if self.event.version:
- version_text = self.prepare_version(self.event.version)
- self.write_version_directive(version_text)
- self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
- if self.event.tags:
- handles = self.event.tags.keys()
- handles.sort()
- for handle in handles:
- prefix = self.event.tags[handle]
- self.tag_prefixes[prefix] = handle
- handle_text = self.prepare_tag_handle(handle)
- prefix_text = self.prepare_tag_prefix(prefix)
- self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and not self.event.explicit and not self.canonical
- and not self.event.version and not self.event.tags
- and not self.check_empty_document())
- if not implicit:
- self.write_indent()
- self.write_indicator(u'---', True)
- if self.canonical:
- self.write_indent()
- self.state = self.expect_document_root
- elif isinstance(self.event, StreamEndEvent):
- if self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.write_stream_end()
- self.state = self.expect_nothing
- else:
- raise EmitterError("expected DocumentStartEvent, but got %s"
- % self.event)
-
- def expect_document_end(self):
- if isinstance(self.event, DocumentEndEvent):
- self.write_indent()
- if self.event.explicit:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.flush_stream()
- self.state = self.expect_document_start
- else:
- raise EmitterError("expected DocumentEndEvent, but got %s"
- % self.event)
-
- def expect_document_root(self):
- self.states.append(self.expect_document_end)
- self.expect_node(root=True)
-
- # Node handlers.
-
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
- self.root_context = root
- self.sequence_context = sequence
- self.mapping_context = mapping
- self.simple_key_context = simple_key
- if isinstance(self.event, AliasEvent):
- self.expect_alias()
- elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
- self.process_anchor(u'&')
- self.process_tag()
- if isinstance(self.event, ScalarEvent):
- self.expect_scalar()
- elif isinstance(self.event, SequenceStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_sequence():
- self.expect_flow_sequence()
- else:
- self.expect_block_sequence()
- elif isinstance(self.event, MappingStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
- self.expect_flow_mapping()
- else:
- self.expect_block_mapping()
- else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
- def expect_alias(self):
- if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
- self.process_anchor(u'*')
- self.state = self.states.pop()
-
- def expect_scalar(self):
- self.increase_indent(flow=True)
- self.process_scalar()
- self.indent = self.indents.pop()
- self.state = self.states.pop()
-
- # Flow sequence handlers.
-
- def expect_flow_sequence(self):
- self.write_indicator(u'[', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_sequence_item
-
- def expect_first_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- def expect_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- # Flow mapping handlers.
-
- def expect_flow_mapping(self):
- self.write_indicator(u'{', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_mapping_key
-
- def expect_first_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u'}', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u'}', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_simple_value(self):
- self.write_indicator(u':', False)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_value(self):
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.write_indicator(u':', True)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- # Block sequence handlers.
-
- def expect_block_sequence(self):
- indentless = (self.mapping_context and not self.indention)
- self.increase_indent(flow=False, indentless=indentless)
- self.state = self.expect_first_block_sequence_item
-
- def expect_first_block_sequence_item(self):
- return self.expect_block_sequence_item(first=True)
-
- def expect_block_sequence_item(self, first=False):
- if not first and isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- self.write_indicator(u'-', True, indention=True)
- self.states.append(self.expect_block_sequence_item)
- self.expect_node(sequence=True)
-
- # Block mapping handlers.
-
- def expect_block_mapping(self):
- self.increase_indent(flow=False)
- self.state = self.expect_first_block_mapping_key
-
- def expect_first_block_mapping_key(self):
- return self.expect_block_mapping_key(first=True)
-
- def expect_block_mapping_key(self, first=False):
- if not first and isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- if self.check_simple_key():
- self.states.append(self.expect_block_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True, indention=True)
- self.states.append(self.expect_block_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_simple_value(self):
- self.write_indicator(u':', False)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_value(self):
- self.write_indent()
- self.write_indicator(u':', True, indention=True)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- # Checkers.
-
- def check_empty_sequence(self):
- return (isinstance(self.event, SequenceStartEvent) and self.events
- and isinstance(self.events[0], SequenceEndEvent))
-
- def check_empty_mapping(self):
- return (isinstance(self.event, MappingStartEvent) and self.events
- and isinstance(self.events[0], MappingEndEvent))
-
- def check_empty_document(self):
- if not isinstance(self.event, DocumentStartEvent) or not self.events:
- return False
- event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None
- and event.tag is None and event.implicit and event.value == u'')
-
- def check_simple_key(self):
- length = 0
- if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(self.event.tag)
- length += len(self.prepared_tag)
- if isinstance(self.event, ScalarEvent):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- length += len(self.analysis.scalar)
- return (length < 128 and (isinstance(self.event, AliasEvent)
- or (isinstance(self.event, ScalarEvent)
- and not self.analysis.empty and not self.analysis.multiline)
- or self.check_empty_sequence() or self.check_empty_mapping()))
-
- # Anchor, Tag, and Scalar processors.
-
- def process_anchor(self, indicator):
- if self.event.anchor is None:
- self.prepared_anchor = None
- return
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- if self.prepared_anchor:
- self.write_indicator(indicator+self.prepared_anchor, True)
- self.prepared_anchor = None
-
- def process_tag(self):
- tag = self.event.tag
- if isinstance(self.event, ScalarEvent):
- if self.style is None:
- self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0])
- or (self.style != '' and self.event.implicit[1]))):
- self.prepared_tag = None
- return
- if self.event.implicit[0] and tag is None:
- tag = u'!'
- self.prepared_tag = None
- else:
- if (not self.canonical or tag is None) and self.event.implicit:
- self.prepared_tag = None
- return
- if tag is None:
- raise EmitterError("tag is not specified")
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(tag)
- if self.prepared_tag:
- self.write_indicator(self.prepared_tag, True)
- self.prepared_tag = None
-
- def choose_scalar_style(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.event.style == '"' or self.canonical:
- return '"'
- if not self.event.style and self.event.implicit[0]:
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline))
- and (self.flow_level and self.analysis.allow_flow_plain
- or (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
- if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context
- and self.analysis.allow_block):
- return self.event.style
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
- return '"'
-
- def process_scalar(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.style is None:
- self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
- #if self.analysis.multiline and split \
- # and (not self.style or self.style in '\'\"'):
- # self.write_indent()
- if self.style == '"':
- self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
- self.write_single_quoted(self.analysis.scalar, split)
- elif self.style == '>':
- self.write_folded(self.analysis.scalar)
- elif self.style == '|':
- self.write_literal(self.analysis.scalar)
- else:
- self.write_plain(self.analysis.scalar, split)
- self.analysis = None
- self.style = None
-
- # Analyzers.
-
- def prepare_version(self, version):
- major, minor = version
- if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
- return u'%d.%d' % (major, minor)
-
- def prepare_tag_handle(self, handle):
- if not handle:
- raise EmitterError("tag handle must not be empty")
- if handle[0] != u'!' or handle[-1] != u'!':
- raise EmitterError("tag handle must start and end with '!': %r"
- % (handle.encode('utf-8')))
- for ch in handle[1:-1]:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (ch.encode('utf-8'), handle.encode('utf-8')))
- return handle
-
- def prepare_tag_prefix(self, prefix):
- if not prefix:
- raise EmitterError("tag prefix must not be empty")
- chunks = []
- start = end = 0
- if prefix[0] == u'!':
- end = 1
- while end < len(prefix):
- ch = prefix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?!:@&=+$,_.~*\'()[]':
- end += 1
- else:
- if start < end:
- chunks.append(prefix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(prefix[start:end])
- return u''.join(chunks)
-
- def prepare_tag(self, tag):
- if not tag:
- raise EmitterError("tag must not be empty")
- if tag == u'!':
- return tag
- handle = None
- suffix = tag
- prefixes = self.tag_prefixes.keys()
- prefixes.sort()
- for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == u'!' or len(prefix) < len(tag)):
- handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
- chunks = []
- start = end = 0
- while end < len(suffix):
- ch = suffix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.~*\'()[]' \
- or (ch == u'!' and handle != u'!'):
- end += 1
- else:
- if start < end:
- chunks.append(suffix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(suffix[start:end])
- suffix_text = u''.join(chunks)
- if handle:
- return u'%s%s' % (handle, suffix_text)
- else:
- return u'!<%s>' % suffix_text
-
- def prepare_anchor(self, anchor):
- if not anchor:
- raise EmitterError("anchor must not be empty")
- for ch in anchor:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_'):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (ch.encode('utf-8'), anchor.encode('utf-8')))
- return anchor
-
- def analyze_scalar(self, scalar):
-
- # Empty scalar is a special case.
- if not scalar:
- return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
-
- # Indicators and special characters.
- block_indicators = False
- flow_indicators = False
- line_breaks = False
- special_characters = False
-
- # Important whitespace combinations.
- leading_space = False
- leading_break = False
- trailing_space = False
- trailing_break = False
- break_space = False
- space_break = False
-
- # Check document indicators.
- if scalar.startswith(u'---') or scalar.startswith(u'...'):
- block_indicators = True
- flow_indicators = True
-
- # First character or preceded by a whitespace.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
preceded_by_whitespace = True
-
- # Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # The previous character is a space.
- previous_space = False
-
- # The previous character is a break.
- previous_break = False
-
- index = 0
- while index < len(scalar):
- ch = scalar[index]
-
- # Check for indicators.
- if index == 0:
- # Leading indicators are special characters.
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
if ch in u'#,[]{}&*!|>\'\"%@`':
- flow_indicators = True
- block_indicators = True
- if ch in u'?:':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'-' and followed_by_whitespace:
- flow_indicators = True
- block_indicators = True
- else:
- # Some indicators cannot appear within a scalar as well.
- if ch in u',?[]{}':
- flow_indicators = True
- if ch == u':':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
if ch == u'#' and preceded_by_whitespace:
- flow_indicators = True
- block_indicators = True
-
- # Check for line breaks, special, and unicode characters.
- if ch in u'\n\x85\u2028\u2029':
- line_breaks = True
- if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
- if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD'
or (u'\U00010000' <= ch < u'\U0010ffff')) and ch != u'\uFEFF':
- unicode_characters = True
- if not self.allow_unicode:
- special_characters = True
- else:
- special_characters = True
-
- # Detect important whitespace combinations.
- if ch == u' ':
- if index == 0:
- leading_space = True
- if index == len(scalar)-1:
- trailing_space = True
- if previous_break:
- break_space = True
- previous_space = True
- previous_break = False
- elif ch in u'\n\x85\u2028\u2029':
- if index == 0:
- leading_break = True
- if index == len(scalar)-1:
- trailing_break = True
- if previous_space:
- space_break = True
- previous_space = False
- previous_break = True
- else:
- previous_space = False
- previous_break = False
-
- # Prepare for the next character.
- index += 1
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
preceded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
- followed_by_whitespace = (index+1 >= len(scalar) or
- scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # Let's decide what styles are allowed.
- allow_flow_plain = True
- allow_block_plain = True
- allow_single_quoted = True
- allow_double_quoted = True
- allow_block = True
-
- # Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break
- or trailing_space or trailing_break):
- allow_flow_plain = allow_block_plain = False
-
- # We do not permit trailing spaces for block scalars.
- if trailing_space:
- allow_block = False
-
- # Spaces at the beginning of a new line are only acceptable for block
- # scalars.
- if break_space:
- allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
- # Spaces followed by breaks, as well as special character are only
- # allowed for double quoted scalars.
- if space_break or special_characters:
- allow_flow_plain = allow_block_plain = \
- allow_single_quoted = allow_block = False
-
- # Although the plain scalar writer supports breaks, we never emit
- # multiline plain scalars.
- if line_breaks:
- allow_flow_plain = allow_block_plain = False
-
- # Flow indicators are forbidden for flow plain scalars.
- if flow_indicators:
- allow_flow_plain = False
-
- # Block indicators are forbidden for block plain scalars.
- if block_indicators:
- allow_block_plain = False
-
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
-
- # Writers.
-
- def flush_stream(self):
- if hasattr(self.stream, 'flush'):
- self.stream.flush()
-
- def write_stream_start(self):
- # Write BOM if needed.
- if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write(u'\uFEFF'.encode(self.encoding))
-
- def write_stream_end(self):
- self.flush_stream()
-
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
- if self.whitespace or not need_whitespace:
- data = indicator
- else:
- data = u' '+indicator
- self.whitespace = whitespace
- self.indention = self.indention and indention
- self.column += len(data)
- self.open_ended = False
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_indent(self):
- indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
- self.write_line_break()
- if self.column < indent:
- self.whitespace = True
- data = u' '*(indent-self.column)
- self.column = indent
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_line_break(self, data=None):
- if data is None:
- data = self.best_line_break
- self.whitespace = True
- self.indention = True
- self.line += 1
- self.column = 0
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_version_directive(self, version_text):
- data = u'%%YAML %s' % version_text
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- def write_tag_directive(self, handle_text, prefix_text):
- data = u'%%TAG %s %s' % (handle_text, prefix_text)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- # Scalar streams.
-
- def write_single_quoted(self, text, split=True):
- self.write_indicator(u'\'', True)
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch is None or ch != u' ':
- if start+1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch == u'\'':
- data = u'\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
- self.write_indicator(u'\'', False)
-
- ESCAPE_REPLACEMENTS = {
- u'\0': u'0',
- u'\x07': u'a',
- u'\x08': u'b',
- u'\x09': u't',
- u'\x0A': u'n',
- u'\x0B': u'v',
- u'\x0C': u'f',
- u'\x0D': u'r',
- u'\x1B': u'e',
- u'\"': u'\"',
- u'\\': u'\\',
- u'\x85': u'N',
- u'\xA0': u'_',
- u'\u2028': u'L',
- u'\u2029': u'P',
- }
-
- def write_double_quoted(self, text, split=True):
- self.write_indicator(u'"', True)
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
- or not (u'\x20' <= ch <= u'\x7E'
- or (self.allow_unicode
- and (u'\xA0' <= ch <= u'\uD7FF'
- or u'\uE000' <= ch <= u'\uFFFD'))):
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- if ch in self.ESCAPE_REPLACEMENTS:
- data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
- elif ch <= u'\xFF':
- data = u'\\x%02X' % ord(ch)
- elif ch <= u'\uFFFF':
- data = u'\\u%04X' % ord(ch)
- else:
- data = u'\\U%08X' % ord(ch)
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end+1
- if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
- and self.column+(end-start) > self.best_width and split:
- data = text[start:end]+u'\\'
- if start < end:
- start = end
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- if text[start] == u' ':
- data = u'\\'
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- end += 1
- self.write_indicator(u'"', False)
-
- def determine_block_hints(self, text):
- hints = u''
- if text:
- if text[0] in u' \n\x85\u2028\u2029':
- hints += unicode(self.best_indent)
- if text[-1] not in u'\n\x85\u2028\u2029':
- hints += u'-'
- elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
- hints += u'+'
- return hints
-
- def write_folded(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'>'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- leading_space = True
- spaces = False
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != u' ' \
- and text[start] == u'\n':
- self.write_line_break()
- leading_space = (ch == u' ')
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- elif spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width:
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- spaces = (ch == u' ')
- end += 1
-
- def write_literal(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'|'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u'\n\x85\u2028\u2029':
- data = text[start:end]
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
- def write_plain(self, text, split=True):
- if self.root_context:
- self.open_ended = True
- if not text:
- return
- if not self.whitespace:
- data = u' '
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.whitespace = False
- self.indention = False
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width and split:
- self.write_indent()
- self.whitespace = False
- self.indention = False
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
diff --git a/contrib/python/PyYAML/py2/yaml/error.py b/contrib/python/PyYAML/py2/yaml/error.py
index 905e265ab1..577686db5f 100644
--- a/contrib/python/PyYAML/py2/yaml/error.py
+++ b/contrib/python/PyYAML/py2/yaml/error.py
@@ -1,75 +1,75 @@
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark(object):
-
- def __init__(self, name, index, line, column, buffer, pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self, indent=4, max_length=75):
- if self.buffer is None:
- return None
- head = ''
- start = self.pointer
- while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
- start -= 1
- if self.pointer-start > max_length/2-1:
- head = ' ... '
- start += 5
- break
- tail = ''
- end = self.pointer
- while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
- end += 1
- if end-self.pointer > max_length/2-1:
- tail = ' ... '
- end -= 5
- break
- snippet = self.buffer[start:end].encode('utf-8')
- return ' '*indent + head + snippet + tail + '\n' \
- + ' '*(indent+self.pointer-start+len(head)) + '^'
-
- def __str__(self):
- snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- if snippet is not None:
- where += ":\n"+snippet
- return where
-
-class YAMLError(Exception):
- pass
-
-class MarkedYAMLError(YAMLError):
-
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None):
- self.context = context
- self.context_mark = context_mark
- self.problem = problem
- self.problem_mark = problem_mark
- self.note = note
-
- def __str__(self):
- lines = []
- if self.context is not None:
- lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None
- or self.context_mark.name != self.problem_mark.name
- or self.context_mark.line != self.problem_mark.line
- or self.context_mark.column != self.problem_mark.column):
- lines.append(str(self.context_mark))
- if self.problem is not None:
- lines.append(self.problem)
- if self.problem_mark is not None:
- lines.append(str(self.problem_mark))
- if self.note is not None:
- lines.append(self.note)
- return '\n'.join(lines)
-
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/contrib/python/PyYAML/py2/yaml/events.py b/contrib/python/PyYAML/py2/yaml/events.py
index 6325aedb77..f79ad389cb 100644
--- a/contrib/python/PyYAML/py2/yaml/events.py
+++ b/contrib/python/PyYAML/py2/yaml/events.py
@@ -1,86 +1,86 @@
-
-# Abstract classes.
-
-class Event(object):
- def __init__(self, start_mark=None, end_mark=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
- def __init__(self, anchor, start_mark=None, end_mark=None):
- self.anchor = anchor
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
- pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None, encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndEvent(Event):
- pass
-
-class DocumentStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
- self.version = version
- self.tags = tags
-
-class DocumentEndEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
-
-class AliasEvent(NodeEvent):
- pass
-
-class ScalarEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
- pass
-
-class SequenceEndEvent(CollectionEndEvent):
- pass
-
-class MappingStartEvent(CollectionStartEvent):
- pass
-
-class MappingEndEvent(CollectionEndEvent):
- pass
-
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/contrib/python/PyYAML/py2/yaml/loader.py b/contrib/python/PyYAML/py2/yaml/loader.py
index 5ffedac258..4d773c3cc1 100644
--- a/contrib/python/PyYAML/py2/yaml/loader.py
+++ b/contrib/python/PyYAML/py2/yaml/loader.py
@@ -1,23 +1,23 @@
-
+
__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader']
-
-from reader import *
-from scanner import *
-from parser import *
-from composer import *
-from constructor import *
-from resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
def __init__(self, stream):
@@ -28,26 +28,26 @@ class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
FullConstructor.__init__(self)
Resolver.__init__(self)
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
# UnsafeLoader is the same as Loader (which is and was always unsafe on
# untrusted input). Use of either Loader or UnsafeLoader should be rare, since
# FullLoad should be able to load almost all YAML safely. Loader is left intact
diff --git a/contrib/python/PyYAML/py2/yaml/nodes.py b/contrib/python/PyYAML/py2/yaml/nodes.py
index a7858b88e1..c4f070c41e 100644
--- a/contrib/python/PyYAML/py2/yaml/nodes.py
+++ b/contrib/python/PyYAML/py2/yaml/nodes.py
@@ -1,49 +1,49 @@
-
-class Node(object):
- def __init__(self, tag, value, start_mark, end_mark):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- value = self.value
- #if isinstance(value, list):
- # if len(value) == 0:
- # value = '<empty>'
- # elif len(value) == 1:
- # value = '<1 item>'
- # else:
- # value = '<%d items>' % len(value)
- #else:
- # if len(value) > 75:
- # value = repr(value[:70]+u' ... ')
- # else:
- # value = repr(value)
- value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
- id = 'scalar'
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class CollectionNode(Node):
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, flow_style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
- id = 'sequence'
-
-class MappingNode(CollectionNode):
- id = 'mapping'
-
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/contrib/python/PyYAML/py2/yaml/parser.py b/contrib/python/PyYAML/py2/yaml/parser.py
index 3e7deb45f4..f9e3057f33 100644
--- a/contrib/python/PyYAML/py2/yaml/parser.py
+++ b/contrib/python/PyYAML/py2/yaml/parser.py
@@ -1,589 +1,589 @@
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-# ALIAS
-# | properties (block_content | indentless_block_sequence)?
-# | block_content
-# | indentless_block_sequence
-# block_node ::= ALIAS
-# | properties block_content?
-# | block_content
-# flow_node ::= ALIAS
-# | properties flow_content?
-# | flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# flow_collection ::= flow_sequence | flow_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-# block_mapping ::= BLOCK-MAPPING_START
-# ((KEY block_node_or_indentless_sequence?)?
-# (VALUE block_node_or_indentless_sequence?)?)*
-# BLOCK-END
-# flow_sequence ::= FLOW-SEQUENCE-START
-# (flow_sequence_entry FLOW-ENTRY)*
-# flow_sequence_entry?
-# FLOW-SEQUENCE-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping ::= FLOW-MAPPING-START
-# (flow_mapping_entry FLOW-ENTRY)*
-# flow_mapping_entry?
-# FLOW-MAPPING-END
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from error import MarkedYAMLError
-from tokens import *
-from events import *
-from scanner import *
-
-class ParserError(MarkedYAMLError):
- pass
-
-class Parser(object):
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
-
- DEFAULT_TAGS = {
- u'!': u'!',
- u'!!': u'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.states = []
- self.marks = []
- self.state = self.parse_stream_start
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- return self.current_event
-
- def get_event(self):
- # Get the next event and proceed further.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- value = self.current_event
- self.current_event = None
- return value
-
- # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
- # implicit_document ::= block_node DOCUMENT-END*
- # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
- def parse_stream_start(self):
-
- # Parse the stream start.
- token = self.get_token()
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Prepare the next state.
- self.state = self.parse_implicit_document_start
-
- return event
-
- def parse_implicit_document_start(self):
-
- # Parse an implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
-
- # Prepare the next state.
- self.states.append(self.parse_document_end)
- self.state = self.parse_block_node
-
- return event
-
- else:
- return self.parse_document_start()
-
- def parse_document_start(self):
-
- # Parse any extra document end indicators.
- while self.check_token(DocumentEndToken):
- self.get_token()
-
- # Parse an explicit document.
- if not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- self.states.append(self.parse_document_end)
- self.state = self.parse_document_content
- else:
- # Parse the end of the stream.
- token = self.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark)
- assert not self.states
- assert not self.marks
- self.state = None
- return event
-
- def parse_document_end(self):
-
- # Parse the document end.
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- if self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- event = DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Prepare the next state.
- self.state = self.parse_document_start
-
- return event
-
- def parse_document_content(self):
- if self.check_token(DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- event = self.process_empty_scalar(self.peek_token().start_mark)
- self.state = self.states.pop()
- return event
- else:
- return self.parse_block_node()
-
- def process_directives(self):
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == u'YAML':
- if self.yaml_version is not None:
- raise ParserError(None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(None, None,
- "found incompatible YAML document (version 1.* is required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == u'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % handle.encode('utf-8'),
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- # block_node_or_indentless_sequence ::= ALIAS
- # | properties (block_content | indentless_block_sequence)?
- # | block_content
- # | indentless_block_sequence
- # block_node ::= ALIAS
- # | properties block_content?
- # | block_content
- # flow_node ::= ALIAS
- # | properties flow_content?
- # | flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # flow_collection ::= flow_sequence | flow_mapping
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def parse_node(self, block=False, indentless_sequence=False):
- if self.check_token(AliasToken):
- token = self.get_token()
- event = AliasEvent(token.value, token.start_mark, token.end_mark)
- self.state = self.states.pop()
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None:
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError("while parsing a node", start_mark,
- "found undefined tag handle %r" % handle.encode('utf-8'),
- tag_mark)
- tag = self.tag_handles[handle]+suffix
- else:
- tag = suffix
- #if tag == u'!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- implicit = (tag is None or tag == u'!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- self.state = self.parse_indentless_sequence_entry
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == u'!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style)
- self.state = self.states.pop()
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_sequence_first_entry
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_mapping_first_key
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_sequence_first_entry
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_mapping_first_key
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), u'',
- start_mark, end_mark)
- self.state = self.states.pop()
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError("while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- return event
-
- # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
- def parse_block_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_sequence_entry()
-
- def parse_block_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken, BlockEndToken):
- self.states.append(self.parse_block_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_block_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
- def parse_indentless_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_indentless_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_indentless_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark)
- self.state = self.states.pop()
- return event
-
- # block_mapping ::= BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
-
- def parse_block_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_mapping_key()
-
- def parse_block_mapping_key(self):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_value)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_value
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_block_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_key)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_block_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
-
- def parse_flow_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_sequence_entry(first=True)
-
- def parse_flow_sequence_entry(self, first=False):
- if not self.check_token(FlowSequenceEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow sequence", self.marks[-1],
- "expected ',' or ']', but got %r" % token.id, token.start_mark)
-
- if self.check_token(KeyToken):
- token = self.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- self.state = self.parse_flow_sequence_entry_mapping_key
- return event
- elif not self.check_token(FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry)
- return self.parse_flow_node()
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_sequence_entry_mapping_key(self):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_value
- return self.process_empty_scalar(token.end_mark)
-
- def parse_flow_sequence_entry_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_end)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_sequence_entry_mapping_end(self):
- self.state = self.parse_flow_sequence_entry
- token = self.peek_token()
- return MappingEndEvent(token.start_mark, token.start_mark)
-
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
- def parse_flow_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_mapping_key(first=True)
-
- def parse_flow_mapping_key(self, first=False):
- if not self.check_token(FlowMappingEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow mapping", self.marks[-1],
- "expected ',' or '}', but got %r" % token.id, token.start_mark)
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_value
- return self.process_empty_scalar(token.end_mark)
- elif not self.check_token(FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_empty_value)
- return self.parse_flow_node()
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_key)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_mapping_empty_value(self):
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(self.peek_token().start_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), u'', mark, mark)
-
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/contrib/python/PyYAML/py2/yaml/reader.py b/contrib/python/PyYAML/py2/yaml/reader.py
index 9ebce06eff..4c42150989 100644
--- a/contrib/python/PyYAML/py2/yaml/reader.py
+++ b/contrib/python/PyYAML/py2/yaml/reader.py
@@ -1,141 +1,141 @@
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-# Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-# Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-# reader.peek(length=1) - return the next `length` characters
-# reader.forward(length=1) - move the current position to `length` characters.
-# reader.index - the number of the current character.
-# reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from error import YAMLError, Mark
-
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
import codecs, re, sys
-
+
has_ucs4 = sys.maxunicode > 0xffff
-class ReaderError(YAMLError):
-
- def __init__(self, name, position, character, encoding, reason):
- self.name = name
- self.character = character
- self.position = position
- self.encoding = encoding
- self.reason = reason
-
- def __str__(self):
- if isinstance(self.character, str):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
- else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
-
-class Reader(object):
- # Reader:
- # - determines the data encoding and converts it to unicode,
- # - checks if characters are in allowed range,
- # - adds '\0' to the end.
-
- # Reader accepts
- # - a `str` object,
- # - a `unicode` object,
- # - a file-like object with its `read` method returning `str`,
- # - a file-like object with its `read` method returning `unicode`.
-
- # Yeah, it's ugly and slow.
-
- def __init__(self, stream):
- self.name = None
- self.stream = None
- self.stream_pointer = 0
- self.eof = True
- self.buffer = u''
- self.pointer = 0
- self.raw_buffer = None
- self.raw_decode = None
- self.encoding = None
- self.index = 0
- self.line = 0
- self.column = 0
- if isinstance(stream, unicode):
- self.name = "<unicode string>"
- self.check_printable(stream)
- self.buffer = stream+u'\0'
- elif isinstance(stream, str):
- self.name = "<string>"
- self.raw_buffer = stream
- self.determine_encoding()
- else:
- self.stream = stream
- self.name = getattr(stream, 'name', "<file>")
- self.eof = False
- self.raw_buffer = ''
- self.determine_encoding()
-
- def peek(self, index=0):
- try:
- return self.buffer[self.pointer+index]
- except IndexError:
- self.update(index+1)
- return self.buffer[self.pointer+index]
-
- def prefix(self, length=1):
- if self.pointer+length >= len(self.buffer):
- self.update(length)
- return self.buffer[self.pointer:self.pointer+length]
-
- def forward(self, length=1):
- if self.pointer+length+1 >= len(self.buffer):
- self.update(length+1)
- while length:
- ch = self.buffer[self.pointer]
- self.pointer += 1
- self.index += 1
- if ch in u'\n\x85\u2028\u2029' \
- or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
- self.line += 1
- self.column = 0
- elif ch != u'\uFEFF':
- self.column += 1
- length -= 1
-
- def get_mark(self):
- if self.stream is None:
- return Mark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
- else:
- return Mark(self.name, self.index, self.line, self.column,
- None, None)
-
- def determine_encoding(self):
- while not self.eof and len(self.raw_buffer) < 2:
- self.update_raw()
- if not isinstance(self.raw_buffer, unicode):
- if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = codecs.utf_16_le_decode
- self.encoding = 'utf-16-le'
- elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = codecs.utf_16_be_decode
- self.encoding = 'utf-16-be'
- else:
- self.raw_decode = codecs.utf_8_decode
- self.encoding = 'utf-8'
- self.update(1)
-
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "<string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
if has_ucs4:
NON_PRINTABLE = u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]'
elif sys.platform.startswith('java'):
@@ -145,49 +145,49 @@ class Reader(object):
# Need to use eval here due to the above Jython issue
NON_PRINTABLE = eval(r"u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uFFFD]|(?:^|[^\uD800-\uDBFF])[\uDC00-\uDFFF]|[\uD800-\uDBFF](?:[^\uDC00-\uDFFF]|$)'")
NON_PRINTABLE = re.compile(NON_PRINTABLE)
- def check_printable(self, data):
- match = self.NON_PRINTABLE.search(data)
- if match:
- character = match.group()
- position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
-
- def update(self, length):
- if self.raw_buffer is None:
- return
- self.buffer = self.buffer[self.pointer:]
- self.pointer = 0
- while len(self.buffer) < length:
- if not self.eof:
- self.update_raw()
- if self.raw_decode is not None:
- try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
- except UnicodeDecodeError, exc:
- character = exc.object[exc.start]
- if self.stream is not None:
- position = self.stream_pointer-len(self.raw_buffer)+exc.start
- else:
- position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
- else:
- data = self.raw_buffer
- converted = len(data)
- self.check_printable(data)
- self.buffer += data
- self.raw_buffer = self.raw_buffer[converted:]
- if self.eof:
- self.buffer += u'\0'
- self.raw_buffer = None
- break
-
- def update_raw(self, size=1024):
- data = self.stream.read(size)
- if data:
- self.raw_buffer += data
- self.stream_pointer += len(data)
- else:
- self.eof = True
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
diff --git a/contrib/python/PyYAML/py2/yaml/representer.py b/contrib/python/PyYAML/py2/yaml/representer.py
index da01788100..93e09b67b3 100644
--- a/contrib/python/PyYAML/py2/yaml/representer.py
+++ b/contrib/python/PyYAML/py2/yaml/representer.py
@@ -1,489 +1,489 @@
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
- 'RepresenterError']
-
-from error import *
-
-from nodes import *
-
-import datetime
-
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+
+from nodes import *
+
+import datetime
+
import copy_reg, types
-
-class RepresenterError(YAMLError):
- pass
-
-class BaseRepresenter(object):
-
- yaml_representers = {}
- yaml_multi_representers = {}
-
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
- self.default_style = default_style
- self.default_flow_style = default_flow_style
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
self.sort_keys = sort_keys
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent(self, data):
- node = self.represent_data(data)
- self.serialize(node)
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def get_classobj_bases(self, cls):
- bases = [cls]
- for base in cls.__bases__:
- bases.extend(self.get_classobj_bases(base))
- return bases
-
- def represent_data(self, data):
- if self.ignore_aliases(data):
- self.alias_key = None
- else:
- self.alias_key = id(data)
- if self.alias_key is not None:
- if self.alias_key in self.represented_objects:
- node = self.represented_objects[self.alias_key]
- #if node is None:
- # raise RepresenterError("recursive objects are not allowed: %r" % data)
- return node
- #self.represented_objects[alias_key] = None
- self.object_keeper.append(data)
- data_types = type(data).__mro__
- if type(data) is types.InstanceType:
- data_types = self.get_classobj_bases(data.__class__)+list(data_types)
- if data_types[0] in self.yaml_representers:
- node = self.yaml_representers[data_types[0]](self, data)
- else:
- for data_type in data_types:
- if data_type in self.yaml_multi_representers:
- node = self.yaml_multi_representers[data_type](self, data)
- break
- else:
- if None in self.yaml_multi_representers:
- node = self.yaml_multi_representers[None](self, data)
- elif None in self.yaml_representers:
- node = self.yaml_representers[None](self, data)
- else:
- node = ScalarNode(None, unicode(data))
- #if alias_key is not None:
- # self.represented_objects[alias_key] = node
- return node
-
- def add_representer(cls, data_type, representer):
- if not 'yaml_representers' in cls.__dict__:
- cls.yaml_representers = cls.yaml_representers.copy()
- cls.yaml_representers[data_type] = representer
- add_representer = classmethod(add_representer)
-
- def add_multi_representer(cls, data_type, representer):
- if not 'yaml_multi_representers' in cls.__dict__:
- cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
- cls.yaml_multi_representers[data_type] = representer
- add_multi_representer = classmethod(add_multi_representer)
-
- def represent_scalar(self, tag, value, style=None):
- if style is None:
- style = self.default_style
- node = ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item in sequence:
- node_item = self.represent_data(item)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- node = MappingNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- if hasattr(mapping, 'items'):
- mapping = mapping.items()
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
if self.sort_keys:
mapping.sort()
- for item_key, item_value in mapping:
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def ignore_aliases(self, data):
- return False
-
-class SafeRepresenter(BaseRepresenter):
-
- def ignore_aliases(self, data):
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
if data is None:
- return True
+ return True
if isinstance(data, tuple) and data == ():
return True
- if isinstance(data, (str, unicode, bool, int, float)):
- return True
-
- def represent_none(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:null',
- u'null')
-
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:str', data)
-
- def represent_bool(self, data):
- if data:
- value = u'true'
- else:
- value = u'false'
- return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
-
- def represent_int(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
- def represent_long(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
- inf_value = 1e300
- while repr(inf_value) != repr(inf_value*inf_value):
- inf_value *= inf_value
-
- def represent_float(self, data):
- if data != data or (data == 0.0 and data == 1.0):
- value = u'.nan'
- elif data == self.inf_value:
- value = u'.inf'
- elif data == -self.inf_value:
- value = u'-.inf'
- else:
- value = unicode(repr(data)).lower()
- # Note that in some cases `repr(data)` represents a float number
- # without the decimal parts. For instance:
- # >>> repr(1e17)
- # '1e17'
- # Unfortunately, this is not a valid float representation according
- # to the definition of the `!!float` tag. We fix this by adding
- # '.0' before the 'e' symbol.
- if u'.' not in value and u'e' in value:
- value = value.replace(u'e', u'.0e', 1)
- return self.represent_scalar(u'tag:yaml.org,2002:float', value)
-
- def represent_list(self, data):
- #pairs = (len(data) > 0 and isinstance(data, list))
- #if pairs:
- # for item in data:
- # if not isinstance(item, tuple) or len(item) != 2:
- # pairs = False
- # break
- #if not pairs:
- return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
- #value = []
- #for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
- def represent_dict(self, data):
- return self.represent_mapping(u'tag:yaml.org,2002:map', data)
-
- def represent_set(self, data):
- value = {}
- for key in data:
- value[key] = None
- return self.represent_mapping(u'tag:yaml.org,2002:set', value)
-
- def represent_date(self, data):
- value = unicode(data.isoformat())
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_datetime(self, data):
- value = unicode(data.isoformat(' '))
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_yaml_object(self, tag, data, cls, flow_style=None):
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__.copy()
- return self.represent_mapping(tag, state, flow_style=flow_style)
-
- def represent_undefined(self, data):
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
raise RepresenterError("cannot represent an object", data)
-
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(unicode,
- SafeRepresenter.represent_unicode)
-
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(long,
- SafeRepresenter.represent_long)
-
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:python/str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- tag = None
- try:
- data.encode('ascii')
- tag = u'tag:yaml.org,2002:python/unicode'
- except UnicodeEncodeError:
- tag = u'tag:yaml.org,2002:str'
- return self.represent_scalar(tag, data)
-
- def represent_long(self, data):
- tag = u'tag:yaml.org,2002:int'
- if int(data) is not data:
- tag = u'tag:yaml.org,2002:python/long'
- return self.represent_scalar(tag, unicode(data))
-
- def represent_complex(self, data):
- if data.imag == 0.0:
- data = u'%r' % data.real
- elif data.real == 0.0:
- data = u'%rj' % data.imag
- elif data.imag > 0:
- data = u'%r+%rj' % (data.real, data.imag)
- else:
- data = u'%r%rj' % (data.real, data.imag)
- return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
-
- def represent_tuple(self, data):
- return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
-
- def represent_name(self, data):
- name = u'%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
-
- def represent_module(self, data):
- return self.represent_scalar(
- u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
-
- def represent_instance(self, data):
- # For instances of classic classes, we use __getinitargs__ and
- # __getstate__ to serialize the data.
-
- # If data.__getinitargs__ exists, the object must be reconstructed by
- # calling cls(**args), where args is a tuple returned by
- # __getinitargs__. Otherwise, the cls.__init__ method should never be
- # called and the class instance is created by instantiating a trivial
- # class and assigning to the instance's __class__ variable.
-
- # If data.__getstate__ exists, it returns the state of the object.
- # Otherwise, the state of the object is data.__dict__.
-
- # We produce either a !!python/object or !!python/object/new node.
- # If data.__getinitargs__ does not exist and state is a dictionary, we
- # produce a !!python/object node . Otherwise we produce a
- # !!python/object/new node.
-
- cls = data.__class__
- class_name = u'%s.%s' % (cls.__module__, cls.__name__)
- args = None
- state = None
- if hasattr(data, '__getinitargs__'):
- args = list(data.__getinitargs__())
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__
- if args is None and isinstance(state, dict):
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+class_name, state)
- if isinstance(state, dict) and not state:
- return self.represent_sequence(
- u'tag:yaml.org,2002:python/object/new:'+class_name, args)
- value = {}
- if args:
- value['args'] = args
- value['state'] = state
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object/new:'+class_name, value)
-
- def represent_object(self, data):
- # We use __reduce__ API to save the data. data.__reduce__ returns
- # a tuple of length 2-5:
- # (function, args, state, listitems, dictitems)
-
- # For reconstructing, we calls function(*args), then set its state,
- # listitems, and dictitems if they are not None.
-
- # A special case is when function.__name__ == '__newobj__'. In this
- # case we create the object with args[0].__new__(*args).
-
- # Another special case is when __reduce__ returns a string - we don't
- # support it.
-
- # We produce a !!python/object, !!python/object/new or
- # !!python/object/apply node.
-
- cls = type(data)
- if cls in copy_reg.dispatch_table:
- reduce = copy_reg.dispatch_table[cls](data)
- elif hasattr(data, '__reduce_ex__'):
- reduce = data.__reduce_ex__(2)
- elif hasattr(data, '__reduce__'):
- reduce = data.__reduce__()
- else:
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
raise RepresenterError("cannot represent an object", data)
- reduce = (list(reduce)+[None]*5)[:5]
- function, args, state, listitems, dictitems = reduce
- args = list(args)
- if state is None:
- state = {}
- if listitems is not None:
- listitems = list(listitems)
- if dictitems is not None:
- dictitems = dict(dictitems)
- if function.__name__ == '__newobj__':
- function = args[0]
- args = args[1:]
- tag = u'tag:yaml.org,2002:python/object/new:'
- newobj = True
- else:
- tag = u'tag:yaml.org,2002:python/object/apply:'
- newobj = False
- function_name = u'%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
- return self.represent_sequence(tag+function_name, args)
- value = {}
- if args:
- value['args'] = args
- if state or not isinstance(state, dict):
- value['state'] = state
- if listitems:
- value['listitems'] = listitems
- if dictitems:
- value['dictitems'] = dictitems
- return self.represent_mapping(tag+function_name, value)
-
-Representer.add_representer(str,
- Representer.represent_str)
-
-Representer.add_representer(unicode,
- Representer.represent_unicode)
-
-Representer.add_representer(long,
- Representer.represent_long)
-
-Representer.add_representer(complex,
- Representer.represent_complex)
-
-Representer.add_representer(tuple,
- Representer.represent_tuple)
-
-Representer.add_representer(type,
- Representer.represent_name)
-
-Representer.add_representer(types.ClassType,
- Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
-
-Representer.add_multi_representer(types.InstanceType,
- Representer.represent_instance)
-
-Representer.add_multi_representer(object,
- Representer.represent_object)
-
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/contrib/python/PyYAML/py2/yaml/resolver.py b/contrib/python/PyYAML/py2/yaml/resolver.py
index a665d04bcf..ba9aeab21d 100644
--- a/contrib/python/PyYAML/py2/yaml/resolver.py
+++ b/contrib/python/PyYAML/py2/yaml/resolver.py
@@ -1,227 +1,227 @@
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from error import *
-from nodes import *
-
-import re
-
-class ResolverError(YAMLError):
- pass
-
-class BaseResolver(object):
-
- DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
- DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
- DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
-
- yaml_implicit_resolvers = {}
- yaml_path_resolvers = {}
-
- def __init__(self):
- self.resolver_exact_paths = []
- self.resolver_prefix_paths = []
-
- def add_implicit_resolver(cls, tag, regexp, first):
- if not 'yaml_implicit_resolvers' in cls.__dict__:
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
implicit_resolvers = {}
for key in cls.yaml_implicit_resolvers:
implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
cls.yaml_implicit_resolvers = implicit_resolvers
- if first is None:
- first = [None]
- for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
- add_implicit_resolver = classmethod(add_implicit_resolver)
-
- def add_path_resolver(cls, tag, path, kind=None):
- # Note: `add_path_resolver` is experimental. The API could be changed.
- # `new_path` is a pattern that is matched against the path from the
- # root to the node that is being considered. `node_path` elements are
- # tuples `(node_check, index_check)`. `node_check` is a node class:
- # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
- # matches any kind of a node. `index_check` could be `None`, a boolean
- # value, a string value, or a number. `None` and `False` match against
- # any _value_ of sequence and mapping nodes. `True` matches against
- # any _key_ of a mapping node. A string `index_check` matches against
- # a mapping value that corresponds to a scalar key which content is
- # equal to the `index_check` value. An integer `index_check` matches
- # against a sequence value with the index equal to `index_check`.
- if not 'yaml_path_resolvers' in cls.__dict__:
- cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
- new_path = []
- for element in path:
- if isinstance(element, (list, tuple)):
- if len(element) == 2:
- node_check, index_check = element
- elif len(element) == 1:
- node_check = element[0]
- index_check = True
- else:
- raise ResolverError("Invalid path element: %s" % element)
- else:
- node_check = None
- index_check = element
- if node_check is str:
- node_check = ScalarNode
- elif node_check is list:
- node_check = SequenceNode
- elif node_check is dict:
- node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, basestring) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (basestring, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
- new_path.append((node_check, index_check))
- if kind is str:
- kind = ScalarNode
- elif kind is list:
- kind = SequenceNode
- elif kind is dict:
- kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
- cls.yaml_path_resolvers[tuple(new_path), kind] = tag
- add_path_resolver = classmethod(add_path_resolver)
-
- def descend_resolver(self, current_node, current_index):
- if not self.yaml_path_resolvers:
- return
- exact_paths = {}
- prefix_paths = []
- if current_node:
- depth = len(self.resolver_prefix_paths)
- for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
- if len(path) > depth:
- prefix_paths.append((path, kind))
- else:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- for path, kind in self.yaml_path_resolvers:
- if not path:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- prefix_paths.append((path, kind))
- self.resolver_exact_paths.append(exact_paths)
- self.resolver_prefix_paths.append(prefix_paths)
-
- def ascend_resolver(self):
- if not self.yaml_path_resolvers:
- return
- self.resolver_exact_paths.pop()
- self.resolver_prefix_paths.pop()
-
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
- node_check, index_check = path[depth-1]
- if isinstance(node_check, basestring):
- if current_node.tag != node_check:
- return
- elif node_check is not None:
- if not isinstance(current_node, node_check):
- return
- if index_check is True and current_index is not None:
- return
- if (index_check is False or index_check is None) \
- and current_index is None:
- return
- if isinstance(index_check, basestring):
- if not (isinstance(current_index, ScalarNode)
- and index_check == current_index.value):
- return
- elif isinstance(index_check, int) and not isinstance(index_check, bool):
- if index_check != current_index:
- return
- return True
-
- def resolve(self, kind, value, implicit):
- if kind is ScalarNode and implicit[0]:
- if value == u'':
- resolvers = self.yaml_implicit_resolvers.get(u'', [])
- else:
- resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
wildcard_resolvers = self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers + wildcard_resolvers:
- if regexp.match(value):
- return tag
- implicit = implicit[1]
- if self.yaml_path_resolvers:
- exact_paths = self.resolver_exact_paths[-1]
- if kind in exact_paths:
- return exact_paths[kind]
- if None in exact_paths:
- return exact_paths[None]
- if kind is ScalarNode:
- return self.DEFAULT_SCALAR_TAG
- elif kind is SequenceNode:
- return self.DEFAULT_SEQUENCE_TAG
- elif kind is MappingNode:
- return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
- pass
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:bool',
- re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list(u'yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:float',
- re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
- |\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
- |[-+]?\.(?:inf|Inf|INF)
- |\.(?:nan|NaN|NAN))$''', re.X),
- list(u'-+0123456789.'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:int',
- re.compile(ur'''^(?:[-+]?0b[0-1_]+
- |[-+]?0[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list(u'-+0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:merge',
- re.compile(ur'^(?:<<)$'),
- [u'<'])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:null',
- re.compile(ur'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- [u'~', u'n', u'N', u''])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:timestamp',
- re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
- (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list(u'0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:value',
- re.compile(ur'^(?:=)$'),
- [u'='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:yaml',
- re.compile(ur'^(?:!|&|\*)$'),
- list(u'!&*'))
-
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/contrib/python/PyYAML/py2/yaml/scanner.py b/contrib/python/PyYAML/py2/yaml/scanner.py
index a307d9a55a..587b73d8b4 100644
--- a/contrib/python/PyYAML/py2/yaml/scanner.py
+++ b/contrib/python/PyYAML/py2/yaml/scanner.py
@@ -1,1449 +1,1449 @@
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
import sys
-from error import MarkedYAMLError
-from tokens import *
-
-class ScannerError(MarkedYAMLError):
- pass
-
-class SimpleKey(object):
- # See below simple keys treatment.
-
- def __init__(self, token_number, required, index, line, column, mark):
- self.token_number = token_number
- self.required = required
- self.index = index
- self.line = line
- self.column = column
- self.mark = mark
-
-class Scanner(object):
-
- def __init__(self):
- """Initialize the scanner."""
- # It is assumed that Scanner and Reader will have a common descendant.
- # Reader do the dirty work of checking for BOM and converting the
- # input data to Unicode. It also adds NUL to the end.
- #
- # Reader supports the following methods
- # self.peek(i=0) # peek the next i-th character
- # self.prefix(l=1) # peek the next l characters
- # self.forward(l=1) # read the next l characters and move the pointer.
-
- # Had we reached the end of the stream?
- self.done = False
-
- # The number of unclosed '{' and '['. `flow_level == 0` means block
- # context.
- self.flow_level = 0
-
- # List of processed tokens that are not yet emitted.
- self.tokens = []
-
- # Add the STREAM-START token.
- self.fetch_stream_start()
-
- # Number of tokens that were emitted through the `get_token` method.
- self.tokens_taken = 0
-
- # The current indentation level.
- self.indent = -1
-
- # Past indentation levels.
- self.indents = []
-
- # Variables related to simple keys treatment.
-
- # A simple key is a key that is not denoted by the '?' indicator.
- # Example of simple keys:
- # ---
- # block simple key: value
- # ? not a simple key:
- # : { flow simple key: value }
- # We emit the KEY token before all keys, so when we find a potential
- # simple key, we try to locate the corresponding ':' indicator.
- # Simple keys should be limited to a single line and 1024 characters.
-
- # Can a simple key start at the current position? A simple key may
- # start:
- # - at the beginning of the line, not counting indentation spaces
- # (in block context),
- # - after '{', '[', ',' (in the flow context),
- # - after '?', ':', '-' (in the block context).
- # In the block context, this flag also signifies if a block collection
- # may start at the current position.
- self.allow_simple_key = True
-
- # Keep track of possible simple keys. This is a dictionary. The key
- # is `flow_level`; there can be no more that one possible simple key
- # for each level. The value is a SimpleKey record:
- # (token_number, required, index, line, column, mark)
- # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
- # '[', or '{' tokens.
- self.possible_simple_keys = {}
-
- # Public methods.
-
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
# Return None if no more tokens.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- return self.tokens[0]
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
else:
return None
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- # Private methods.
-
- def need_more_tokens(self):
- if self.done:
- return False
- if not self.tokens:
- return True
- # The current token may be a potential simple key, so we
- # need to look further.
- self.stale_possible_simple_keys()
- if self.next_possible_simple_key() == self.tokens_taken:
- return True
-
- def fetch_more_tokens(self):
-
- # Eat whitespaces and comments until we reach the next token.
- self.scan_to_next_token()
-
- # Remove obsolete possible simple keys.
- self.stale_possible_simple_keys()
-
- # Compare the current indentation and column. It may add some tokens
- # and decrease the current indentation level.
- self.unwind_indent(self.column)
-
- # Peek the next character.
- ch = self.peek()
-
- # Is it the end of stream?
- if ch == u'\0':
- return self.fetch_stream_end()
-
- # Is it a directive?
- if ch == u'%' and self.check_directive():
- return self.fetch_directive()
-
- # Is it the document start?
- if ch == u'-' and self.check_document_start():
- return self.fetch_document_start()
-
- # Is it the document end?
- if ch == u'.' and self.check_document_end():
- return self.fetch_document_end()
-
- # TODO: support for BOM within a stream.
- #if ch == u'\uFEFF':
- # return self.fetch_bom() <-- issue BOMToken
-
- # Note: the order of the following checks is NOT significant.
-
- # Is it the flow sequence start indicator?
- if ch == u'[':
- return self.fetch_flow_sequence_start()
-
- # Is it the flow mapping start indicator?
- if ch == u'{':
- return self.fetch_flow_mapping_start()
-
- # Is it the flow sequence end indicator?
- if ch == u']':
- return self.fetch_flow_sequence_end()
-
- # Is it the flow mapping end indicator?
- if ch == u'}':
- return self.fetch_flow_mapping_end()
-
- # Is it the flow entry indicator?
- if ch == u',':
- return self.fetch_flow_entry()
-
- # Is it the block entry indicator?
- if ch == u'-' and self.check_block_entry():
- return self.fetch_block_entry()
-
- # Is it the key indicator?
- if ch == u'?' and self.check_key():
- return self.fetch_key()
-
- # Is it the value indicator?
- if ch == u':' and self.check_value():
- return self.fetch_value()
-
- # Is it an alias?
- if ch == u'*':
- return self.fetch_alias()
-
- # Is it an anchor?
- if ch == u'&':
- return self.fetch_anchor()
-
- # Is it a tag?
- if ch == u'!':
- return self.fetch_tag()
-
- # Is it a literal scalar?
- if ch == u'|' and not self.flow_level:
- return self.fetch_literal()
-
- # Is it a folded scalar?
- if ch == u'>' and not self.flow_level:
- return self.fetch_folded()
-
- # Is it a single quoted scalar?
- if ch == u'\'':
- return self.fetch_single()
-
- # Is it a double quoted scalar?
- if ch == u'\"':
- return self.fetch_double()
-
- # It must be a plain scalar then.
- if self.check_plain():
- return self.fetch_plain()
-
- # No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token"
- % ch.encode('utf-8'), self.get_mark())
-
- # Simple keys treatment.
-
- def next_possible_simple_key(self):
- # Return the number of the nearest possible simple key. Actually we
- # don't need to loop through the whole dictionary. We may replace it
- # with the following code:
- # if not self.possible_simple_keys:
- # return None
- # return self.possible_simple_keys[
- # min(self.possible_simple_keys.keys())].token_number
- min_token_number = None
- for level in self.possible_simple_keys:
- key = self.possible_simple_keys[level]
- if min_token_number is None or key.token_number < min_token_number:
- min_token_number = key.token_number
- return min_token_number
-
- def stale_possible_simple_keys(self):
- # Remove entries that are no longer possible simple keys. According to
- # the YAML specification, simple keys
- # - should be limited to a single line,
- # - should be no longer than 1024 characters.
- # Disabling this procedure will allow simple keys of any length and
- # height (may cause problems if indentation is broken though).
- for level in self.possible_simple_keys.keys():
- key = self.possible_simple_keys[level]
- if key.line != self.line \
- or self.index-key.index > 1024:
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
"could not find expected ':'", self.get_mark())
- del self.possible_simple_keys[level]
-
- def save_possible_simple_key(self):
- # The next token may start a simple key. We check if it's possible
- # and save its position. This function is called for
- # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
- # Check if a simple key is required at the current position.
- required = not self.flow_level and self.indent == self.column
-
- # The next token might be a simple key. Let's save it's number and
- # position.
- if self.allow_simple_key:
- self.remove_possible_simple_key()
- token_number = self.tokens_taken+len(self.tokens)
- key = SimpleKey(token_number, required,
- self.index, self.line, self.column, self.get_mark())
- self.possible_simple_keys[self.flow_level] = key
-
- def remove_possible_simple_key(self):
- # Remove the saved possible key position at the current flow level.
- if self.flow_level in self.possible_simple_keys:
- key = self.possible_simple_keys[self.flow_level]
-
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
"could not find expected ':'", self.get_mark())
-
- del self.possible_simple_keys[self.flow_level]
-
- # Indentation functions.
-
- def unwind_indent(self, column):
-
- ## In flow context, tokens should respect indentation.
- ## Actually the condition should be `self.indent >= column` according to
- ## the spec. But this condition will prohibit intuitively correct
- ## constructions such as
- ## key : {
- ## }
- #if self.flow_level and self.indent > column:
- # raise ScannerError(None, None,
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
# "invalid indentation or unclosed '[' or '{'",
- # self.get_mark())
-
- # In the flow context, indentation is ignored. We make the scanner less
- # restrictive then specification requires.
- if self.flow_level:
- return
-
- # In block context, we may need to issue the BLOCK-END tokens.
- while self.indent > column:
- mark = self.get_mark()
- self.indent = self.indents.pop()
- self.tokens.append(BlockEndToken(mark, mark))
-
- def add_indent(self, column):
- # Check if we need to increase indentation.
- if self.indent < column:
- self.indents.append(self.indent)
- self.indent = column
- return True
- return False
-
- # Fetchers.
-
- def fetch_stream_start(self):
- # We always add STREAM-START as the first token and STREAM-END as the
- # last token.
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.encoding))
-
-
- def fetch_stream_end(self):
-
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
# Set the current indentation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
- self.possible_simple_keys = {}
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-END.
- self.tokens.append(StreamEndToken(mark, mark))
-
- # The steam is finished.
- self.done = True
-
- def fetch_directive(self):
-
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
# Set the current indentation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Scan and add DIRECTIVE.
- self.tokens.append(self.scan_directive())
-
- def fetch_document_start(self):
- self.fetch_document_indicator(DocumentStartToken)
-
- def fetch_document_end(self):
- self.fetch_document_indicator(DocumentEndToken)
-
- def fetch_document_indicator(self, TokenClass):
-
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
# Set the current indentation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys. Note that there could not be a block collection
- # after '---'.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Add DOCUMENT-START or DOCUMENT-END.
- start_mark = self.get_mark()
- self.forward(3)
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_start(self):
- self.fetch_flow_collection_start(FlowSequenceStartToken)
-
- def fetch_flow_mapping_start(self):
- self.fetch_flow_collection_start(FlowMappingStartToken)
-
- def fetch_flow_collection_start(self, TokenClass):
-
- # '[' and '{' may start a simple key.
- self.save_possible_simple_key()
-
- # Increase the flow level.
- self.flow_level += 1
-
- # Simple keys are allowed after '[' and '{'.
- self.allow_simple_key = True
-
- # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_end(self):
- self.fetch_flow_collection_end(FlowSequenceEndToken)
-
- def fetch_flow_mapping_end(self):
- self.fetch_flow_collection_end(FlowMappingEndToken)
-
- def fetch_flow_collection_end(self, TokenClass):
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Decrease the flow level.
- self.flow_level -= 1
-
- # No simple keys after ']' or '}'.
- self.allow_simple_key = False
-
- # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_entry(self):
-
- # Simple keys are allowed after ','.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add FLOW-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
- def fetch_block_entry(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a new entry?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-SEQUENCE-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockSequenceStartToken(mark, mark))
-
- # It's an error for the block entry to occur in the flow context,
- # but we let the parser detect this.
- else:
- pass
-
- # Simple keys are allowed after '-'.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add BLOCK-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
- def fetch_key(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
# Are we allowed to start a key (not necessary a simple)?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-MAPPING-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after '?' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add KEY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(KeyToken(start_mark, end_mark))
-
- def fetch_value(self):
-
- # Do we determine a simple key?
- if self.flow_level in self.possible_simple_keys:
-
- # Add KEY.
- key = self.possible_simple_keys[self.flow_level]
- del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number-self.tokens_taken,
- KeyToken(key.mark, key.mark))
-
- # If this key starts a new block mapping, we need to add
- # BLOCK-MAPPING-START.
- if not self.flow_level:
- if self.add_indent(key.column):
- self.tokens.insert(key.token_number-self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
-
- # There cannot be two simple keys one after another.
- self.allow_simple_key = False
-
- # It must be a part of a complex key.
- else:
-
- # Block context needs additional checks.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
# (Do we really need them? They will be caught by the parser
- # anyway.)
- if not self.flow_level:
-
- # We are allowed to start a complex value if and only if
- # we can start a simple key.
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.get_mark())
-
- # If this value starts a new block mapping, we need to add
- # BLOCK-MAPPING-START. It will be detected as an error later by
- # the parser.
- if not self.flow_level:
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after ':' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add VALUE.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(ValueToken(start_mark, end_mark))
-
- def fetch_alias(self):
-
- # ALIAS could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ALIAS.
- self.allow_simple_key = False
-
- # Scan and add ALIAS.
- self.tokens.append(self.scan_anchor(AliasToken))
-
- def fetch_anchor(self):
-
- # ANCHOR could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ANCHOR.
- self.allow_simple_key = False
-
- # Scan and add ANCHOR.
- self.tokens.append(self.scan_anchor(AnchorToken))
-
- def fetch_tag(self):
-
- # TAG could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after TAG.
- self.allow_simple_key = False
-
- # Scan and add TAG.
- self.tokens.append(self.scan_tag())
-
- def fetch_literal(self):
- self.fetch_block_scalar(style='|')
-
- def fetch_folded(self):
- self.fetch_block_scalar(style='>')
-
- def fetch_block_scalar(self, style):
-
- # A simple key may follow a block scalar.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_block_scalar(style))
-
- def fetch_single(self):
- self.fetch_flow_scalar(style='\'')
-
- def fetch_double(self):
- self.fetch_flow_scalar(style='"')
-
- def fetch_flow_scalar(self, style):
-
- # A flow scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after flow scalars.
- self.allow_simple_key = False
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_flow_scalar(style))
-
- def fetch_plain(self):
-
- # A plain scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after plain scalars. But note that `scan_plain` will
- # change this flag if the scan is finished at the beginning of the
- # line.
- self.allow_simple_key = False
-
- # Scan and add SCALAR. May change `allow_simple_key`.
- self.tokens.append(self.scan_plain())
-
- # Checkers.
-
- def check_directive(self):
-
- # DIRECTIVE: ^ '%' ...
- # The '%' indicator is already checked.
- if self.column == 0:
- return True
-
- def check_document_start(self):
-
- # DOCUMENT-START: ^ '---' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'---' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_document_end(self):
-
- # DOCUMENT-END: ^ '...' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'...' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_block_entry(self):
-
- # BLOCK-ENTRY: '-' (' '|'\n')
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_key(self):
-
- # KEY(flow context): '?'
- if self.flow_level:
- return True
-
- # KEY(block context): '?' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_value(self):
-
- # VALUE(flow context): ':'
- if self.flow_level:
- return True
-
- # VALUE(block context): ':' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_plain(self):
-
- # A plain scalar may start with any non-space character except:
- # '-', '?', ':', ',', '[', ']', '{', '}',
- # '#', '&', '*', '!', '|', '>', '\'', '\"',
- # '%', '@', '`'.
- #
- # It may also start with
- # '-', '?', ':'
- # if it is followed by a non-space character.
- #
- # Note that we limit the last rule to the block context (except the
- # '-' character) because we want the flow context to be space
- # independent.
- ch = self.peek()
- return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
- or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
- and (ch == u'-' or (not self.flow_level and ch in u'?:')))
-
- # Scanners.
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == u'\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
- def scan_directive(self):
- # See the specification for details.
- start_mark = self.get_mark()
- self.forward()
- name = self.scan_directive_name(start_mark)
- value = None
- if name == u'YAML':
- value = self.scan_yaml_directive_value(start_mark)
- end_mark = self.get_mark()
- elif name == u'TAG':
- value = self.scan_tag_directive_value(start_mark)
- end_mark = self.get_mark()
- else:
- end_mark = self.get_mark()
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- self.scan_directive_ignored_line(start_mark)
- return DirectiveToken(name, value, start_mark, end_mark)
-
- def scan_directive_name(self, start_mark):
- # See the specification for details.
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- return value
-
- def scan_yaml_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- major = self.scan_yaml_directive_number(start_mark)
- if self.peek() != '.':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or '.', but found %r"
- % self.peek().encode('utf-8'),
- self.get_mark())
- self.forward()
- minor = self.scan_yaml_directive_number(start_mark)
- if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r"
- % self.peek().encode('utf-8'),
- self.get_mark())
- return (major, minor)
-
- def scan_yaml_directive_number(self, start_mark):
- # See the specification for details.
- ch = self.peek()
- if not (u'0' <= ch <= u'9'):
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit, but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length = 0
- while u'0' <= self.peek(length) <= u'9':
- length += 1
- value = int(self.prefix(length))
- self.forward(length)
- return value
-
- def scan_tag_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- handle = self.scan_tag_directive_handle(start_mark)
- while self.peek() == u' ':
- self.forward()
- prefix = self.scan_tag_directive_prefix(start_mark)
- return (handle, prefix)
-
- def scan_tag_directive_handle(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_handle('directive', start_mark)
- ch = self.peek()
- if ch != u' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return value
-
- def scan_tag_directive_prefix(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_uri('directive', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return value
-
- def scan_directive_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- self.scan_line_break()
-
- def scan_anchor(self, TokenClass):
- # The specification does not restrict characters for anchors and
- # aliases. This may lead to problems, for instance, the document:
- # [ *alias, value ]
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
# can be interpreted in two ways, as
- # [ "value" ]
- # and
- # [ *alias , "value" ]
- # Therefore we restrict aliases to numbers and ASCII letters.
- start_mark = self.get_mark()
- indicator = self.peek()
- if indicator == u'*':
- name = 'alias'
- else:
- name = 'anchor'
- self.forward()
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- end_mark = self.get_mark()
- return TokenClass(value, start_mark, end_mark)
-
- def scan_tag(self):
- # See the specification for details.
- start_mark = self.get_mark()
- ch = self.peek(1)
- if ch == u'<':
- handle = None
- self.forward(2)
- suffix = self.scan_tag_uri('tag', start_mark)
- if self.peek() != u'>':
- raise ScannerError("while parsing a tag", start_mark,
- "expected '>', but found %r" % self.peek().encode('utf-8'),
- self.get_mark())
- self.forward()
- elif ch in u'\0 \t\r\n\x85\u2028\u2029':
- handle = None
- suffix = u'!'
- self.forward()
- else:
- length = 1
- use_handle = False
- while ch not in u'\0 \r\n\x85\u2028\u2029':
- if ch == u'!':
- use_handle = True
- break
- length += 1
- ch = self.peek(length)
- handle = u'!'
- if use_handle:
- handle = self.scan_tag_handle('tag', start_mark)
- else:
- handle = u'!'
- self.forward()
- suffix = self.scan_tag_uri('tag', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- value = (handle, suffix)
- end_mark = self.get_mark()
- return TagToken(value, start_mark, end_mark)
-
- def scan_block_scalar(self, style):
- # See the specification for details.
-
- if style == '>':
- folded = True
- else:
- folded = False
-
- chunks = []
- start_mark = self.get_mark()
-
- # Scan the header.
- self.forward()
- chomping, increment = self.scan_block_scalar_indicators(start_mark)
- self.scan_block_scalar_ignored_line(start_mark)
-
- # Determine the indentation level and go to the first non-empty line.
- min_indent = self.indent+1
- if min_indent < 1:
- min_indent = 1
- if increment is None:
- breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
- indent = max(min_indent, max_indent)
- else:
- indent = min_indent+increment-1
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = u''
-
- # Scan the inner part of the block scalar.
- while self.column == indent and self.peek() != u'\0':
- chunks.extend(breaks)
- leading_non_space = self.peek() not in u' \t'
- length = 0
- while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
- length += 1
- chunks.append(self.prefix(length))
- self.forward(length)
- line_break = self.scan_line_break()
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- if self.column == indent and self.peek() != u'\0':
-
- # Unfortunately, folding rules are ambiguous.
- #
- # This is the folding according to the specification:
-
- if folded and line_break == u'\n' \
- and leading_non_space and self.peek() not in u' \t':
- if not breaks:
- chunks.append(u' ')
- else:
- chunks.append(line_break)
-
- # This is Clark Evans's interpretation (also in the spec
- # examples):
- #
- #if folded and line_break == u'\n':
- # if not breaks:
- # if self.peek() not in ' \t':
- # chunks.append(u' ')
- # else:
- # chunks.append(line_break)
- #else:
- # chunks.append(line_break)
- else:
- break
-
- # Chomp the tail.
- if chomping is not False:
- chunks.append(line_break)
- if chomping is True:
- chunks.extend(breaks)
-
- # We are done.
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- def scan_block_scalar_indicators(self, start_mark):
- # See the specification for details.
- chomping = None
- increment = None
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- elif ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- return chomping, increment
-
- def scan_block_scalar_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- self.scan_line_break()
-
- def scan_block_scalar_indentation(self):
- # See the specification for details.
- chunks = []
- max_indent = 0
- end_mark = self.get_mark()
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() != u' ':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- else:
- self.forward()
- if self.column > max_indent:
- max_indent = self.column
- return chunks, max_indent, end_mark
-
- def scan_block_scalar_breaks(self, indent):
- # See the specification for details.
- chunks = []
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- while self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- return chunks, end_mark
-
- def scan_flow_scalar(self, style):
- # See the specification for details.
- # Note that we loose indentation rules for quoted scalars. Quoted
- # scalars don't need to adhere indentation because " and ' clearly
- # mark the beginning and the end of them. Therefore we are less
- # restrictive then the specification requires. We only need to check
- # that document separators are not included in scalars.
- if style == '"':
- double = True
- else:
- double = False
- chunks = []
- start_mark = self.get_mark()
- quote = self.peek()
- self.forward()
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- while self.peek() != quote:
- chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- self.forward()
- end_mark = self.get_mark()
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- ESCAPE_REPLACEMENTS = {
- u'0': u'\0',
- u'a': u'\x07',
- u'b': u'\x08',
- u't': u'\x09',
- u'\t': u'\x09',
- u'n': u'\x0A',
- u'v': u'\x0B',
- u'f': u'\x0C',
- u'r': u'\x0D',
- u'e': u'\x1B',
- u' ': u'\x20',
- u'\"': u'\"',
- u'\\': u'\\',
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
u'/': u'/',
- u'N': u'\x85',
- u'_': u'\xA0',
- u'L': u'\u2028',
- u'P': u'\u2029',
- }
-
- ESCAPE_CODES = {
- u'x': 2,
- u'u': 4,
- u'U': 8,
- }
-
- def scan_flow_scalar_non_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- length = 0
- while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
- length += 1
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- ch = self.peek()
- if not double and ch == u'\'' and self.peek(1) == u'\'':
- chunks.append(u'\'')
- self.forward(2)
- elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
- chunks.append(ch)
- self.forward()
- elif double and ch == u'\\':
- self.forward()
- ch = self.peek()
- if ch in self.ESCAPE_REPLACEMENTS:
- chunks.append(self.ESCAPE_REPLACEMENTS[ch])
- self.forward()
- elif ch in self.ESCAPE_CODES:
- length = self.ESCAPE_CODES[ch]
- self.forward()
- for k in range(length):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "expected escape sequence of %d hexdecimal numbers, but found %r" %
- (length, self.peek(k).encode('utf-8')), self.get_mark())
- code = int(self.prefix(length), 16)
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
if code <= sys.maxunicode:
chunks.append(unichr(code))
else:
chunks.append(('\\U%08x' % code).decode('unicode-escape'))
- self.forward(length)
- elif ch in u'\r\n\x85\u2028\u2029':
- self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
- else:
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
- else:
- return chunks
-
- def scan_flow_scalar_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- length = 0
- while self.peek(length) in u' \t':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch == u'\0':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.get_mark())
- elif ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- else:
- chunks.append(whitespaces)
- return chunks
-
- def scan_flow_scalar_breaks(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- # Instead of checking indentation, we check for document
- # separators.
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected document separator", self.get_mark())
- while self.peek() in u' \t':
- self.forward()
- if self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- else:
- return chunks
-
- def scan_plain(self):
- # See the specification for details.
- # We add an additional restriction for the flow context:
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',' or '?'.
- # We also keep track of the `allow_simple_key` flag here.
- # Indentation rules are loosed for the flow context.
- chunks = []
- start_mark = self.get_mark()
- end_mark = start_mark
- indent = self.indent+1
- # We allow zero indentation for scalars, but then we need to check for
- # document separators at the beginning of the line.
- #if indent == 0:
- # indent = 1
- spaces = []
- while True:
- length = 0
- if self.peek() == u'#':
- break
- while True:
- ch = self.peek(length)
- if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
or (ch == u':' and
self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029'
+ (u',[]{}' if self.flow_level else u''))\
or (self.flow_level and ch in u',?[]{}'):
- break
- length += 1
- if length == 0:
- break
- self.allow_simple_key = False
- chunks.extend(spaces)
- chunks.append(self.prefix(length))
- self.forward(length)
- end_mark = self.get_mark()
- spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.peek() == u'#' \
- or (not self.flow_level and self.column < indent):
- break
- return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
-
- def scan_plain_spaces(self, indent, start_mark):
- # See the specification for details.
- # The specification is really confusing about tabs in plain scalars.
- # We just forbid them completely. Do not use tabs in YAML!
- chunks = []
- length = 0
- while self.peek(length) in u' ':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- self.allow_simple_key = True
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- breaks = []
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() == ' ':
- self.forward()
- else:
- breaks.append(self.scan_line_break())
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- elif whitespaces:
- chunks.append(whitespaces)
- return chunks
-
- def scan_tag_handle(self, name, start_mark):
- # See the specification for details.
- # For some strange reasons, the specification does not allow '_' in
- # tag handles. I have allowed it anyway.
- ch = self.peek()
- if ch != u'!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length = 1
- ch = self.peek(length)
- if ch != u' ':
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if ch != u'!':
- self.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length += 1
- value = self.prefix(length)
- self.forward(length)
- return value
-
- def scan_tag_uri(self, name, start_mark):
- # See the specification for details.
- # Note: we do not check if URI is well-formed.
- chunks = []
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
- if ch == u'%':
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- chunks.append(self.scan_uri_escapes(name, start_mark))
- else:
- length += 1
- ch = self.peek(length)
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return u''.join(chunks)
-
- def scan_uri_escapes(self, name, start_mark):
- # See the specification for details.
- bytes = []
- mark = self.get_mark()
- while self.peek() == u'%':
- self.forward()
- for k in range(2):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
- (self.peek(k).encode('utf-8')), self.get_mark())
- bytes.append(chr(int(self.prefix(2), 16)))
- self.forward(2)
- try:
- value = unicode(''.join(bytes), 'utf-8')
- except UnicodeDecodeError, exc:
- raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
- return value
-
- def scan_line_break(self):
- # Transforms:
- # '\r\n' : '\n'
- # '\r' : '\n'
- # '\n' : '\n'
- # '\x85' : '\n'
- # '\u2028' : '\u2028'
- # '\u2029 : '\u2029'
- # default : ''
- ch = self.peek()
- if ch in u'\r\n\x85':
- if self.prefix(2) == u'\r\n':
- self.forward(2)
- else:
- self.forward()
- return u'\n'
- elif ch in u'\u2028\u2029':
- self.forward()
- return ch
- return u''
+ break
+ length += 1
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
diff --git a/contrib/python/PyYAML/py2/yaml/serializer.py b/contrib/python/PyYAML/py2/yaml/serializer.py
index fc8321150f..0bf1e96dc1 100644
--- a/contrib/python/PyYAML/py2/yaml/serializer.py
+++ b/contrib/python/PyYAML/py2/yaml/serializer.py
@@ -1,111 +1,111 @@
-
-__all__ = ['Serializer', 'SerializerError']
-
-from error import YAMLError
-from events import *
-from nodes import *
-
-class SerializerError(YAMLError):
- pass
-
-class Serializer(object):
-
- ANCHOR_TEMPLATE = u'id%03d'
-
- def __init__(self, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- self.use_encoding = encoding
- self.use_explicit_start = explicit_start
- self.use_explicit_end = explicit_end
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
- self.closed = None
-
- def open(self):
- if self.closed is None:
- self.emit(StreamStartEvent(encoding=self.use_encoding))
- self.closed = False
- elif self.closed:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError("serializer is already opened")
-
- def close(self):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif not self.closed:
- self.emit(StreamEndEvent())
- self.closed = True
-
- #def __del__(self):
- # self.close()
-
- def serialize(self, node):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif self.closed:
- raise SerializerError("serializer is closed")
- self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version, tags=self.use_tags))
- self.anchor_node(node)
- self.serialize_node(node, None, None)
- self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
-
- def anchor_node(self, node):
- if node in self.anchors:
- if self.anchors[node] is None:
- self.anchors[node] = self.generate_anchor(node)
- else:
- self.anchors[node] = None
- if isinstance(node, SequenceNode):
- for item in node.value:
- self.anchor_node(item)
- elif isinstance(node, MappingNode):
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
-
- def generate_anchor(self, node):
- self.last_anchor_id += 1
- return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
- def serialize_node(self, node, parent, index):
- alias = self.anchors[node]
- if node in self.serialized_nodes:
- self.emit(AliasEvent(alias))
- else:
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if isinstance(node, ScalarNode):
- detected_tag = self.resolve(ScalarNode, node.value, (True, False))
- default_tag = self.resolve(ScalarNode, node.value, (False, True))
- implicit = (node.tag == detected_tag), (node.tag == default_tag)
- self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style))
- elif isinstance(node, SequenceNode):
- implicit = (node.tag
- == self.resolve(SequenceNode, node.value, True))
- self.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- index = 0
- for item in node.value:
- self.serialize_node(item, node, index)
- index += 1
- self.emit(SequenceEndEvent())
- elif isinstance(node, MappingNode):
- implicit = (node.tag
- == self.resolve(MappingNode, node.value, True))
- self.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
- self.emit(MappingEndEvent())
- self.ascend_resolver()
-
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/contrib/python/PyYAML/py2/yaml/tokens.py b/contrib/python/PyYAML/py2/yaml/tokens.py
index 34da1025dd..4d0b48a394 100644
--- a/contrib/python/PyYAML/py2/yaml/tokens.py
+++ b/contrib/python/PyYAML/py2/yaml/tokens.py
@@ -1,104 +1,104 @@
-
-class Token(object):
- def __init__(self, start_mark, end_mark):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in self.__dict__
- if not key.endswith('_mark')]
- attributes.sort()
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-# id = '<byte order mark>'
-
-class DirectiveToken(Token):
- id = '<directive>'
- def __init__(self, name, value, start_mark, end_mark):
- self.name = name
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class DocumentStartToken(Token):
- id = '<document start>'
-
-class DocumentEndToken(Token):
- id = '<document end>'
-
-class StreamStartToken(Token):
- id = '<stream start>'
- def __init__(self, start_mark=None, end_mark=None,
- encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndToken(Token):
- id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
- id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
- id = '<block mapping start>'
-
-class BlockEndToken(Token):
- id = '<block end>'
-
-class FlowSequenceStartToken(Token):
- id = '['
-
-class FlowMappingStartToken(Token):
- id = '{'
-
-class FlowSequenceEndToken(Token):
- id = ']'
-
-class FlowMappingEndToken(Token):
- id = '}'
-
-class KeyToken(Token):
- id = '?'
-
-class ValueToken(Token):
- id = ':'
-
-class BlockEntryToken(Token):
- id = '-'
-
-class FlowEntryToken(Token):
- id = ','
-
-class AliasToken(Token):
- id = '<alias>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class AnchorToken(Token):
- id = '<anchor>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class TagToken(Token):
- id = '<tag>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class ScalarToken(Token):
- id = '<scalar>'
- def __init__(self, value, plain, start_mark, end_mark, style=None):
- self.value = value
- self.plain = plain
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/contrib/python/PyYAML/py3/LICENSE b/contrib/python/PyYAML/py3/LICENSE
index 63edc20f2f..2f1b8e15e5 100644
--- a/contrib/python/PyYAML/py3/LICENSE
+++ b/contrib/python/PyYAML/py3/LICENSE
@@ -1,20 +1,20 @@
Copyright (c) 2017-2021 Ingy döt Net
Copyright (c) 2006-2016 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/contrib/python/PyYAML/py3/ya.make b/contrib/python/PyYAML/py3/ya.make
index 628977356a..0401c04651 100644
--- a/contrib/python/PyYAML/py3/ya.make
+++ b/contrib/python/PyYAML/py3/ya.make
@@ -1,43 +1,43 @@
PY3_LIBRARY()
-
+
LICENSE(MIT)
VERSION(5.4.1)
OWNER(g:python-contrib borman g:testenv)
-
+
PEERDIR(
contrib/libs/yaml
)
-
+
ADDINCL(
contrib/python/PyYAML/py3/yaml
)
-PY_SRCS(
- TOP_LEVEL
+PY_SRCS(
+ TOP_LEVEL
_yaml/__init__.py
- yaml/__init__.py
- yaml/composer.py
- yaml/constructor.py
- yaml/cyaml.py
- yaml/dumper.py
- yaml/emitter.py
- yaml/error.py
- yaml/events.py
- yaml/loader.py
- yaml/nodes.py
- yaml/parser.py
- yaml/reader.py
- yaml/representer.py
- yaml/resolver.py
- yaml/scanner.py
- yaml/serializer.py
- yaml/tokens.py
+ yaml/__init__.py
+ yaml/composer.py
+ yaml/constructor.py
+ yaml/cyaml.py
+ yaml/dumper.py
+ yaml/emitter.py
+ yaml/error.py
+ yaml/events.py
+ yaml/loader.py
+ yaml/nodes.py
+ yaml/parser.py
+ yaml/reader.py
+ yaml/representer.py
+ yaml/resolver.py
+ yaml/scanner.py
+ yaml/serializer.py
+ yaml/tokens.py
CYTHON_C
yaml/_yaml.pyx
-)
-
+)
+
RESOURCE_FILES(
PREFIX contrib/python/PyYAML/
.dist-info/METADATA
@@ -48,4 +48,4 @@ NO_LINT()
NO_COMPILER_WARNINGS()
-END()
+END()
diff --git a/contrib/python/PyYAML/py3/yaml/_yaml.h b/contrib/python/PyYAML/py3/yaml/_yaml.h
index 622cfc561a..21fd6a991b 100644
--- a/contrib/python/PyYAML/py3/yaml/_yaml.h
+++ b/contrib/python/PyYAML/py3/yaml/_yaml.h
@@ -1,23 +1,23 @@
-
-#include <yaml.h>
-
-#if PY_MAJOR_VERSION < 3
-
+
+#include <yaml.h>
+
+#if PY_MAJOR_VERSION < 3
+
#define PyUnicode_FromString(s) PyUnicode_DecodeUTF8((s), strlen(s), "strict")
-
-#else
-
-#define PyString_CheckExact PyBytes_CheckExact
-#define PyString_AS_STRING PyBytes_AS_STRING
-#define PyString_GET_SIZE PyBytes_GET_SIZE
-#define PyString_FromStringAndSize PyBytes_FromStringAndSize
-
-#endif
-
-#ifdef _MSC_VER /* MS Visual C++ 6.0 */
-#if _MSC_VER == 1200
-
-#define PyLong_FromUnsignedLongLong(z) PyInt_FromLong(i)
-
-#endif
-#endif
+
+#else
+
+#define PyString_CheckExact PyBytes_CheckExact
+#define PyString_AS_STRING PyBytes_AS_STRING
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define PyString_FromStringAndSize PyBytes_FromStringAndSize
+
+#endif
+
+#ifdef _MSC_VER /* MS Visual C++ 6.0 */
+#if _MSC_VER == 1200
+
+#define PyLong_FromUnsignedLongLong(z) PyInt_FromLong(i)
+
+#endif
+#endif
diff --git a/contrib/python/PyYAML/py3/yaml/_yaml.pxd b/contrib/python/PyYAML/py3/yaml/_yaml.pxd
index b6629e26ae..7937c9db51 100644
--- a/contrib/python/PyYAML/py3/yaml/_yaml.pxd
+++ b/contrib/python/PyYAML/py3/yaml/_yaml.pxd
@@ -1,251 +1,251 @@
-
+
cdef extern from "_yaml.h":
-
- void malloc(int l)
- void memcpy(char *d, char *s, int l)
- int strlen(char *s)
- int PyString_CheckExact(object o)
- int PyUnicode_CheckExact(object o)
- char *PyString_AS_STRING(object o)
- int PyString_GET_SIZE(object o)
- object PyString_FromStringAndSize(char *v, int l)
- object PyUnicode_FromString(char *u)
- object PyUnicode_DecodeUTF8(char *u, int s, char *e)
- object PyUnicode_AsUTF8String(object o)
- int PY_MAJOR_VERSION
-
- ctypedef enum:
- SIZEOF_VOID_P
- ctypedef enum yaml_encoding_t:
- YAML_ANY_ENCODING
- YAML_UTF8_ENCODING
- YAML_UTF16LE_ENCODING
- YAML_UTF16BE_ENCODING
- ctypedef enum yaml_break_t:
- YAML_ANY_BREAK
- YAML_CR_BREAK
- YAML_LN_BREAK
- YAML_CRLN_BREAK
- ctypedef enum yaml_error_type_t:
- YAML_NO_ERROR
- YAML_MEMORY_ERROR
- YAML_READER_ERROR
- YAML_SCANNER_ERROR
- YAML_PARSER_ERROR
- YAML_WRITER_ERROR
- YAML_EMITTER_ERROR
- ctypedef enum yaml_scalar_style_t:
- YAML_ANY_SCALAR_STYLE
- YAML_PLAIN_SCALAR_STYLE
- YAML_SINGLE_QUOTED_SCALAR_STYLE
- YAML_DOUBLE_QUOTED_SCALAR_STYLE
- YAML_LITERAL_SCALAR_STYLE
- YAML_FOLDED_SCALAR_STYLE
- ctypedef enum yaml_sequence_style_t:
- YAML_ANY_SEQUENCE_STYLE
- YAML_BLOCK_SEQUENCE_STYLE
- YAML_FLOW_SEQUENCE_STYLE
- ctypedef enum yaml_mapping_style_t:
- YAML_ANY_MAPPING_STYLE
- YAML_BLOCK_MAPPING_STYLE
- YAML_FLOW_MAPPING_STYLE
- ctypedef enum yaml_token_type_t:
- YAML_NO_TOKEN
- YAML_STREAM_START_TOKEN
- YAML_STREAM_END_TOKEN
- YAML_VERSION_DIRECTIVE_TOKEN
- YAML_TAG_DIRECTIVE_TOKEN
- YAML_DOCUMENT_START_TOKEN
- YAML_DOCUMENT_END_TOKEN
- YAML_BLOCK_SEQUENCE_START_TOKEN
- YAML_BLOCK_MAPPING_START_TOKEN
- YAML_BLOCK_END_TOKEN
- YAML_FLOW_SEQUENCE_START_TOKEN
- YAML_FLOW_SEQUENCE_END_TOKEN
- YAML_FLOW_MAPPING_START_TOKEN
- YAML_FLOW_MAPPING_END_TOKEN
- YAML_BLOCK_ENTRY_TOKEN
- YAML_FLOW_ENTRY_TOKEN
- YAML_KEY_TOKEN
- YAML_VALUE_TOKEN
- YAML_ALIAS_TOKEN
- YAML_ANCHOR_TOKEN
- YAML_TAG_TOKEN
- YAML_SCALAR_TOKEN
- ctypedef enum yaml_event_type_t:
- YAML_NO_EVENT
- YAML_STREAM_START_EVENT
- YAML_STREAM_END_EVENT
- YAML_DOCUMENT_START_EVENT
- YAML_DOCUMENT_END_EVENT
- YAML_ALIAS_EVENT
- YAML_SCALAR_EVENT
- YAML_SEQUENCE_START_EVENT
- YAML_SEQUENCE_END_EVENT
- YAML_MAPPING_START_EVENT
- YAML_MAPPING_END_EVENT
-
+
+ void malloc(int l)
+ void memcpy(char *d, char *s, int l)
+ int strlen(char *s)
+ int PyString_CheckExact(object o)
+ int PyUnicode_CheckExact(object o)
+ char *PyString_AS_STRING(object o)
+ int PyString_GET_SIZE(object o)
+ object PyString_FromStringAndSize(char *v, int l)
+ object PyUnicode_FromString(char *u)
+ object PyUnicode_DecodeUTF8(char *u, int s, char *e)
+ object PyUnicode_AsUTF8String(object o)
+ int PY_MAJOR_VERSION
+
+ ctypedef enum:
+ SIZEOF_VOID_P
+ ctypedef enum yaml_encoding_t:
+ YAML_ANY_ENCODING
+ YAML_UTF8_ENCODING
+ YAML_UTF16LE_ENCODING
+ YAML_UTF16BE_ENCODING
+ ctypedef enum yaml_break_t:
+ YAML_ANY_BREAK
+ YAML_CR_BREAK
+ YAML_LN_BREAK
+ YAML_CRLN_BREAK
+ ctypedef enum yaml_error_type_t:
+ YAML_NO_ERROR
+ YAML_MEMORY_ERROR
+ YAML_READER_ERROR
+ YAML_SCANNER_ERROR
+ YAML_PARSER_ERROR
+ YAML_WRITER_ERROR
+ YAML_EMITTER_ERROR
+ ctypedef enum yaml_scalar_style_t:
+ YAML_ANY_SCALAR_STYLE
+ YAML_PLAIN_SCALAR_STYLE
+ YAML_SINGLE_QUOTED_SCALAR_STYLE
+ YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ YAML_LITERAL_SCALAR_STYLE
+ YAML_FOLDED_SCALAR_STYLE
+ ctypedef enum yaml_sequence_style_t:
+ YAML_ANY_SEQUENCE_STYLE
+ YAML_BLOCK_SEQUENCE_STYLE
+ YAML_FLOW_SEQUENCE_STYLE
+ ctypedef enum yaml_mapping_style_t:
+ YAML_ANY_MAPPING_STYLE
+ YAML_BLOCK_MAPPING_STYLE
+ YAML_FLOW_MAPPING_STYLE
+ ctypedef enum yaml_token_type_t:
+ YAML_NO_TOKEN
+ YAML_STREAM_START_TOKEN
+ YAML_STREAM_END_TOKEN
+ YAML_VERSION_DIRECTIVE_TOKEN
+ YAML_TAG_DIRECTIVE_TOKEN
+ YAML_DOCUMENT_START_TOKEN
+ YAML_DOCUMENT_END_TOKEN
+ YAML_BLOCK_SEQUENCE_START_TOKEN
+ YAML_BLOCK_MAPPING_START_TOKEN
+ YAML_BLOCK_END_TOKEN
+ YAML_FLOW_SEQUENCE_START_TOKEN
+ YAML_FLOW_SEQUENCE_END_TOKEN
+ YAML_FLOW_MAPPING_START_TOKEN
+ YAML_FLOW_MAPPING_END_TOKEN
+ YAML_BLOCK_ENTRY_TOKEN
+ YAML_FLOW_ENTRY_TOKEN
+ YAML_KEY_TOKEN
+ YAML_VALUE_TOKEN
+ YAML_ALIAS_TOKEN
+ YAML_ANCHOR_TOKEN
+ YAML_TAG_TOKEN
+ YAML_SCALAR_TOKEN
+ ctypedef enum yaml_event_type_t:
+ YAML_NO_EVENT
+ YAML_STREAM_START_EVENT
+ YAML_STREAM_END_EVENT
+ YAML_DOCUMENT_START_EVENT
+ YAML_DOCUMENT_END_EVENT
+ YAML_ALIAS_EVENT
+ YAML_SCALAR_EVENT
+ YAML_SEQUENCE_START_EVENT
+ YAML_SEQUENCE_END_EVENT
+ YAML_MAPPING_START_EVENT
+ YAML_MAPPING_END_EVENT
+
ctypedef int yaml_read_handler_t(void *data, char *buffer,
- size_t size, size_t *size_read) except 0
-
+ size_t size, size_t *size_read) except 0
+
ctypedef int yaml_write_handler_t(void *data, char *buffer,
- size_t size) except 0
-
- ctypedef struct yaml_mark_t:
+ size_t size) except 0
+
+ ctypedef struct yaml_mark_t:
size_t index
size_t line
size_t column
- ctypedef struct yaml_version_directive_t:
- int major
- int minor
- ctypedef struct yaml_tag_directive_t:
+ ctypedef struct yaml_version_directive_t:
+ int major
+ int minor
+ ctypedef struct yaml_tag_directive_t:
char *handle
char *prefix
-
- ctypedef struct _yaml_token_stream_start_data_t:
- yaml_encoding_t encoding
- ctypedef struct _yaml_token_alias_data_t:
- char *value
- ctypedef struct _yaml_token_anchor_data_t:
- char *value
- ctypedef struct _yaml_token_tag_data_t:
+
+ ctypedef struct _yaml_token_stream_start_data_t:
+ yaml_encoding_t encoding
+ ctypedef struct _yaml_token_alias_data_t:
+ char *value
+ ctypedef struct _yaml_token_anchor_data_t:
+ char *value
+ ctypedef struct _yaml_token_tag_data_t:
char *handle
char *suffix
- ctypedef struct _yaml_token_scalar_data_t:
- char *value
+ ctypedef struct _yaml_token_scalar_data_t:
+ char *value
size_t length
- yaml_scalar_style_t style
- ctypedef struct _yaml_token_version_directive_data_t:
- int major
- int minor
- ctypedef struct _yaml_token_tag_directive_data_t:
+ yaml_scalar_style_t style
+ ctypedef struct _yaml_token_version_directive_data_t:
+ int major
+ int minor
+ ctypedef struct _yaml_token_tag_directive_data_t:
char *handle
char *prefix
- ctypedef union _yaml_token_data_t:
- _yaml_token_stream_start_data_t stream_start
- _yaml_token_alias_data_t alias
- _yaml_token_anchor_data_t anchor
- _yaml_token_tag_data_t tag
- _yaml_token_scalar_data_t scalar
- _yaml_token_version_directive_data_t version_directive
- _yaml_token_tag_directive_data_t tag_directive
- ctypedef struct yaml_token_t:
- yaml_token_type_t type
- _yaml_token_data_t data
- yaml_mark_t start_mark
- yaml_mark_t end_mark
-
- ctypedef struct _yaml_event_stream_start_data_t:
- yaml_encoding_t encoding
- ctypedef struct _yaml_event_document_start_data_tag_directives_t:
- yaml_tag_directive_t *start
- yaml_tag_directive_t *end
- ctypedef struct _yaml_event_document_start_data_t:
- yaml_version_directive_t *version_directive
- _yaml_event_document_start_data_tag_directives_t tag_directives
- int implicit
- ctypedef struct _yaml_event_document_end_data_t:
- int implicit
- ctypedef struct _yaml_event_alias_data_t:
- char *anchor
- ctypedef struct _yaml_event_scalar_data_t:
- char *anchor
- char *tag
- char *value
+ ctypedef union _yaml_token_data_t:
+ _yaml_token_stream_start_data_t stream_start
+ _yaml_token_alias_data_t alias
+ _yaml_token_anchor_data_t anchor
+ _yaml_token_tag_data_t tag
+ _yaml_token_scalar_data_t scalar
+ _yaml_token_version_directive_data_t version_directive
+ _yaml_token_tag_directive_data_t tag_directive
+ ctypedef struct yaml_token_t:
+ yaml_token_type_t type
+ _yaml_token_data_t data
+ yaml_mark_t start_mark
+ yaml_mark_t end_mark
+
+ ctypedef struct _yaml_event_stream_start_data_t:
+ yaml_encoding_t encoding
+ ctypedef struct _yaml_event_document_start_data_tag_directives_t:
+ yaml_tag_directive_t *start
+ yaml_tag_directive_t *end
+ ctypedef struct _yaml_event_document_start_data_t:
+ yaml_version_directive_t *version_directive
+ _yaml_event_document_start_data_tag_directives_t tag_directives
+ int implicit
+ ctypedef struct _yaml_event_document_end_data_t:
+ int implicit
+ ctypedef struct _yaml_event_alias_data_t:
+ char *anchor
+ ctypedef struct _yaml_event_scalar_data_t:
+ char *anchor
+ char *tag
+ char *value
size_t length
- int plain_implicit
- int quoted_implicit
- yaml_scalar_style_t style
- ctypedef struct _yaml_event_sequence_start_data_t:
- char *anchor
- char *tag
- int implicit
- yaml_sequence_style_t style
- ctypedef struct _yaml_event_mapping_start_data_t:
- char *anchor
- char *tag
- int implicit
- yaml_mapping_style_t style
- ctypedef union _yaml_event_data_t:
- _yaml_event_stream_start_data_t stream_start
- _yaml_event_document_start_data_t document_start
- _yaml_event_document_end_data_t document_end
- _yaml_event_alias_data_t alias
- _yaml_event_scalar_data_t scalar
- _yaml_event_sequence_start_data_t sequence_start
- _yaml_event_mapping_start_data_t mapping_start
- ctypedef struct yaml_event_t:
- yaml_event_type_t type
- _yaml_event_data_t data
- yaml_mark_t start_mark
- yaml_mark_t end_mark
-
- ctypedef struct yaml_parser_t:
- yaml_error_type_t error
- char *problem
+ int plain_implicit
+ int quoted_implicit
+ yaml_scalar_style_t style
+ ctypedef struct _yaml_event_sequence_start_data_t:
+ char *anchor
+ char *tag
+ int implicit
+ yaml_sequence_style_t style
+ ctypedef struct _yaml_event_mapping_start_data_t:
+ char *anchor
+ char *tag
+ int implicit
+ yaml_mapping_style_t style
+ ctypedef union _yaml_event_data_t:
+ _yaml_event_stream_start_data_t stream_start
+ _yaml_event_document_start_data_t document_start
+ _yaml_event_document_end_data_t document_end
+ _yaml_event_alias_data_t alias
+ _yaml_event_scalar_data_t scalar
+ _yaml_event_sequence_start_data_t sequence_start
+ _yaml_event_mapping_start_data_t mapping_start
+ ctypedef struct yaml_event_t:
+ yaml_event_type_t type
+ _yaml_event_data_t data
+ yaml_mark_t start_mark
+ yaml_mark_t end_mark
+
+ ctypedef struct yaml_parser_t:
+ yaml_error_type_t error
+ char *problem
size_t problem_offset
- int problem_value
- yaml_mark_t problem_mark
- char *context
- yaml_mark_t context_mark
-
- ctypedef struct yaml_emitter_t:
- yaml_error_type_t error
- char *problem
-
- char *yaml_get_version_string()
- void yaml_get_version(int *major, int *minor, int *patch)
-
- void yaml_token_delete(yaml_token_t *token)
-
- int yaml_stream_start_event_initialize(yaml_event_t *event,
- yaml_encoding_t encoding)
- int yaml_stream_end_event_initialize(yaml_event_t *event)
- int yaml_document_start_event_initialize(yaml_event_t *event,
- yaml_version_directive_t *version_directive,
- yaml_tag_directive_t *tag_directives_start,
- yaml_tag_directive_t *tag_directives_end,
- int implicit)
- int yaml_document_end_event_initialize(yaml_event_t *event,
- int implicit)
+ int problem_value
+ yaml_mark_t problem_mark
+ char *context
+ yaml_mark_t context_mark
+
+ ctypedef struct yaml_emitter_t:
+ yaml_error_type_t error
+ char *problem
+
+ char *yaml_get_version_string()
+ void yaml_get_version(int *major, int *minor, int *patch)
+
+ void yaml_token_delete(yaml_token_t *token)
+
+ int yaml_stream_start_event_initialize(yaml_event_t *event,
+ yaml_encoding_t encoding)
+ int yaml_stream_end_event_initialize(yaml_event_t *event)
+ int yaml_document_start_event_initialize(yaml_event_t *event,
+ yaml_version_directive_t *version_directive,
+ yaml_tag_directive_t *tag_directives_start,
+ yaml_tag_directive_t *tag_directives_end,
+ int implicit)
+ int yaml_document_end_event_initialize(yaml_event_t *event,
+ int implicit)
int yaml_alias_event_initialize(yaml_event_t *event, char *anchor)
- int yaml_scalar_event_initialize(yaml_event_t *event,
+ int yaml_scalar_event_initialize(yaml_event_t *event,
char *anchor, char *tag, char *value, size_t length,
- int plain_implicit, int quoted_implicit,
- yaml_scalar_style_t style)
- int yaml_sequence_start_event_initialize(yaml_event_t *event,
+ int plain_implicit, int quoted_implicit,
+ yaml_scalar_style_t style)
+ int yaml_sequence_start_event_initialize(yaml_event_t *event,
char *anchor, char *tag, int implicit, yaml_sequence_style_t style)
- int yaml_sequence_end_event_initialize(yaml_event_t *event)
- int yaml_mapping_start_event_initialize(yaml_event_t *event,
+ int yaml_sequence_end_event_initialize(yaml_event_t *event)
+ int yaml_mapping_start_event_initialize(yaml_event_t *event,
char *anchor, char *tag, int implicit, yaml_mapping_style_t style)
- int yaml_mapping_end_event_initialize(yaml_event_t *event)
- void yaml_event_delete(yaml_event_t *event)
-
- int yaml_parser_initialize(yaml_parser_t *parser)
- void yaml_parser_delete(yaml_parser_t *parser)
- void yaml_parser_set_input_string(yaml_parser_t *parser,
+ int yaml_mapping_end_event_initialize(yaml_event_t *event)
+ void yaml_event_delete(yaml_event_t *event)
+
+ int yaml_parser_initialize(yaml_parser_t *parser)
+ void yaml_parser_delete(yaml_parser_t *parser)
+ void yaml_parser_set_input_string(yaml_parser_t *parser,
char *input, size_t size)
- void yaml_parser_set_input(yaml_parser_t *parser,
- yaml_read_handler_t *handler, void *data)
- void yaml_parser_set_encoding(yaml_parser_t *parser,
- yaml_encoding_t encoding)
- int yaml_parser_scan(yaml_parser_t *parser, yaml_token_t *token) except *
- int yaml_parser_parse(yaml_parser_t *parser, yaml_event_t *event) except *
-
- int yaml_emitter_initialize(yaml_emitter_t *emitter)
- void yaml_emitter_delete(yaml_emitter_t *emitter)
- void yaml_emitter_set_output_string(yaml_emitter_t *emitter,
+ void yaml_parser_set_input(yaml_parser_t *parser,
+ yaml_read_handler_t *handler, void *data)
+ void yaml_parser_set_encoding(yaml_parser_t *parser,
+ yaml_encoding_t encoding)
+ int yaml_parser_scan(yaml_parser_t *parser, yaml_token_t *token) except *
+ int yaml_parser_parse(yaml_parser_t *parser, yaml_event_t *event) except *
+
+ int yaml_emitter_initialize(yaml_emitter_t *emitter)
+ void yaml_emitter_delete(yaml_emitter_t *emitter)
+ void yaml_emitter_set_output_string(yaml_emitter_t *emitter,
char *output, size_t size, size_t *size_written)
- void yaml_emitter_set_output(yaml_emitter_t *emitter,
- yaml_write_handler_t *handler, void *data)
- void yaml_emitter_set_encoding(yaml_emitter_t *emitter,
- yaml_encoding_t encoding)
- void yaml_emitter_set_canonical(yaml_emitter_t *emitter, int canonical)
- void yaml_emitter_set_indent(yaml_emitter_t *emitter, int indent)
- void yaml_emitter_set_width(yaml_emitter_t *emitter, int width)
- void yaml_emitter_set_unicode(yaml_emitter_t *emitter, int unicode)
- void yaml_emitter_set_break(yaml_emitter_t *emitter,
- yaml_break_t line_break)
- int yaml_emitter_emit(yaml_emitter_t *emitter, yaml_event_t *event) except *
- int yaml_emitter_flush(yaml_emitter_t *emitter)
-
+ void yaml_emitter_set_output(yaml_emitter_t *emitter,
+ yaml_write_handler_t *handler, void *data)
+ void yaml_emitter_set_encoding(yaml_emitter_t *emitter,
+ yaml_encoding_t encoding)
+ void yaml_emitter_set_canonical(yaml_emitter_t *emitter, int canonical)
+ void yaml_emitter_set_indent(yaml_emitter_t *emitter, int indent)
+ void yaml_emitter_set_width(yaml_emitter_t *emitter, int width)
+ void yaml_emitter_set_unicode(yaml_emitter_t *emitter, int unicode)
+ void yaml_emitter_set_break(yaml_emitter_t *emitter,
+ yaml_break_t line_break)
+ int yaml_emitter_emit(yaml_emitter_t *emitter, yaml_event_t *event) except *
+ int yaml_emitter_flush(yaml_emitter_t *emitter)
+
diff --git a/contrib/python/PyYAML/py3/yaml/_yaml.pyx b/contrib/python/PyYAML/py3/yaml/_yaml.pyx
index efdb347aff..ff4efe80b5 100644
--- a/contrib/python/PyYAML/py3/yaml/_yaml.pyx
+++ b/contrib/python/PyYAML/py3/yaml/_yaml.pyx
@@ -1,1527 +1,1527 @@
-
-import yaml
-
-def get_version_string():
+
+import yaml
+
+def get_version_string():
cdef char *value
- value = yaml_get_version_string()
- if PY_MAJOR_VERSION < 3:
- return value
- else:
- return PyUnicode_FromString(value)
-
-def get_version():
- cdef int major, minor, patch
- yaml_get_version(&major, &minor, &patch)
- return (major, minor, patch)
-
-#Mark = yaml.error.Mark
-YAMLError = yaml.error.YAMLError
-ReaderError = yaml.reader.ReaderError
-ScannerError = yaml.scanner.ScannerError
-ParserError = yaml.parser.ParserError
-ComposerError = yaml.composer.ComposerError
-ConstructorError = yaml.constructor.ConstructorError
-EmitterError = yaml.emitter.EmitterError
-SerializerError = yaml.serializer.SerializerError
-RepresenterError = yaml.representer.RepresenterError
-
-StreamStartToken = yaml.tokens.StreamStartToken
-StreamEndToken = yaml.tokens.StreamEndToken
-DirectiveToken = yaml.tokens.DirectiveToken
-DocumentStartToken = yaml.tokens.DocumentStartToken
-DocumentEndToken = yaml.tokens.DocumentEndToken
-BlockSequenceStartToken = yaml.tokens.BlockSequenceStartToken
-BlockMappingStartToken = yaml.tokens.BlockMappingStartToken
-BlockEndToken = yaml.tokens.BlockEndToken
-FlowSequenceStartToken = yaml.tokens.FlowSequenceStartToken
-FlowMappingStartToken = yaml.tokens.FlowMappingStartToken
-FlowSequenceEndToken = yaml.tokens.FlowSequenceEndToken
-FlowMappingEndToken = yaml.tokens.FlowMappingEndToken
-KeyToken = yaml.tokens.KeyToken
-ValueToken = yaml.tokens.ValueToken
-BlockEntryToken = yaml.tokens.BlockEntryToken
-FlowEntryToken = yaml.tokens.FlowEntryToken
-AliasToken = yaml.tokens.AliasToken
-AnchorToken = yaml.tokens.AnchorToken
-TagToken = yaml.tokens.TagToken
-ScalarToken = yaml.tokens.ScalarToken
-
-StreamStartEvent = yaml.events.StreamStartEvent
-StreamEndEvent = yaml.events.StreamEndEvent
-DocumentStartEvent = yaml.events.DocumentStartEvent
-DocumentEndEvent = yaml.events.DocumentEndEvent
-AliasEvent = yaml.events.AliasEvent
-ScalarEvent = yaml.events.ScalarEvent
-SequenceStartEvent = yaml.events.SequenceStartEvent
-SequenceEndEvent = yaml.events.SequenceEndEvent
-MappingStartEvent = yaml.events.MappingStartEvent
-MappingEndEvent = yaml.events.MappingEndEvent
-
-ScalarNode = yaml.nodes.ScalarNode
-SequenceNode = yaml.nodes.SequenceNode
-MappingNode = yaml.nodes.MappingNode
-
-cdef class Mark:
- cdef readonly object name
+ value = yaml_get_version_string()
+ if PY_MAJOR_VERSION < 3:
+ return value
+ else:
+ return PyUnicode_FromString(value)
+
+def get_version():
+ cdef int major, minor, patch
+ yaml_get_version(&major, &minor, &patch)
+ return (major, minor, patch)
+
+#Mark = yaml.error.Mark
+YAMLError = yaml.error.YAMLError
+ReaderError = yaml.reader.ReaderError
+ScannerError = yaml.scanner.ScannerError
+ParserError = yaml.parser.ParserError
+ComposerError = yaml.composer.ComposerError
+ConstructorError = yaml.constructor.ConstructorError
+EmitterError = yaml.emitter.EmitterError
+SerializerError = yaml.serializer.SerializerError
+RepresenterError = yaml.representer.RepresenterError
+
+StreamStartToken = yaml.tokens.StreamStartToken
+StreamEndToken = yaml.tokens.StreamEndToken
+DirectiveToken = yaml.tokens.DirectiveToken
+DocumentStartToken = yaml.tokens.DocumentStartToken
+DocumentEndToken = yaml.tokens.DocumentEndToken
+BlockSequenceStartToken = yaml.tokens.BlockSequenceStartToken
+BlockMappingStartToken = yaml.tokens.BlockMappingStartToken
+BlockEndToken = yaml.tokens.BlockEndToken
+FlowSequenceStartToken = yaml.tokens.FlowSequenceStartToken
+FlowMappingStartToken = yaml.tokens.FlowMappingStartToken
+FlowSequenceEndToken = yaml.tokens.FlowSequenceEndToken
+FlowMappingEndToken = yaml.tokens.FlowMappingEndToken
+KeyToken = yaml.tokens.KeyToken
+ValueToken = yaml.tokens.ValueToken
+BlockEntryToken = yaml.tokens.BlockEntryToken
+FlowEntryToken = yaml.tokens.FlowEntryToken
+AliasToken = yaml.tokens.AliasToken
+AnchorToken = yaml.tokens.AnchorToken
+TagToken = yaml.tokens.TagToken
+ScalarToken = yaml.tokens.ScalarToken
+
+StreamStartEvent = yaml.events.StreamStartEvent
+StreamEndEvent = yaml.events.StreamEndEvent
+DocumentStartEvent = yaml.events.DocumentStartEvent
+DocumentEndEvent = yaml.events.DocumentEndEvent
+AliasEvent = yaml.events.AliasEvent
+ScalarEvent = yaml.events.ScalarEvent
+SequenceStartEvent = yaml.events.SequenceStartEvent
+SequenceEndEvent = yaml.events.SequenceEndEvent
+MappingStartEvent = yaml.events.MappingStartEvent
+MappingEndEvent = yaml.events.MappingEndEvent
+
+ScalarNode = yaml.nodes.ScalarNode
+SequenceNode = yaml.nodes.SequenceNode
+MappingNode = yaml.nodes.MappingNode
+
+cdef class Mark:
+ cdef readonly object name
cdef readonly size_t index
cdef readonly size_t line
cdef readonly size_t column
- cdef readonly buffer
- cdef readonly pointer
-
+ cdef readonly buffer
+ cdef readonly pointer
+
def __init__(self, object name, size_t index, size_t line, size_t column,
- object buffer, object pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self):
- return None
-
- def __str__(self):
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- return where
-
-#class YAMLError(Exception):
-# pass
-#
-#class MarkedYAMLError(YAMLError):
-#
-# def __init__(self, context=None, context_mark=None,
-# problem=None, problem_mark=None, note=None):
-# self.context = context
-# self.context_mark = context_mark
-# self.problem = problem
-# self.problem_mark = problem_mark
-# self.note = note
-#
-# def __str__(self):
-# lines = []
-# if self.context is not None:
-# lines.append(self.context)
-# if self.context_mark is not None \
-# and (self.problem is None or self.problem_mark is None
-# or self.context_mark.name != self.problem_mark.name
-# or self.context_mark.line != self.problem_mark.line
-# or self.context_mark.column != self.problem_mark.column):
-# lines.append(str(self.context_mark))
-# if self.problem is not None:
-# lines.append(self.problem)
-# if self.problem_mark is not None:
-# lines.append(str(self.problem_mark))
-# if self.note is not None:
-# lines.append(self.note)
-# return '\n'.join(lines)
-#
-#class ReaderError(YAMLError):
-#
-# def __init__(self, name, position, character, encoding, reason):
-# self.name = name
-# self.character = character
-# self.position = position
-# self.encoding = encoding
-# self.reason = reason
-#
-# def __str__(self):
-# if isinstance(self.character, str):
-# return "'%s' codec can't decode byte #x%02x: %s\n" \
-# " in \"%s\", position %d" \
-# % (self.encoding, ord(self.character), self.reason,
-# self.name, self.position)
-# else:
-# return "unacceptable character #x%04x: %s\n" \
-# " in \"%s\", position %d" \
-# % (ord(self.character), self.reason,
-# self.name, self.position)
-#
-#class ScannerError(MarkedYAMLError):
-# pass
-#
-#class ParserError(MarkedYAMLError):
-# pass
-#
-#class EmitterError(YAMLError):
-# pass
-#
-#cdef class Token:
-# cdef readonly Mark start_mark
-# cdef readonly Mark end_mark
-# def __init__(self, Mark start_mark, Mark end_mark):
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-#
-#cdef class StreamStartToken(Token):
-# cdef readonly object encoding
-# def __init__(self, Mark start_mark, Mark end_mark, encoding):
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-# self.encoding = encoding
-#
-#cdef class StreamEndToken(Token):
-# pass
-#
-#cdef class DirectiveToken(Token):
-# cdef readonly object name
-# cdef readonly object value
-# def __init__(self, name, value, Mark start_mark, Mark end_mark):
-# self.name = name
-# self.value = value
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-#
-#cdef class DocumentStartToken(Token):
-# pass
-#
-#cdef class DocumentEndToken(Token):
-# pass
-#
-#cdef class BlockSequenceStartToken(Token):
-# pass
-#
-#cdef class BlockMappingStartToken(Token):
-# pass
-#
-#cdef class BlockEndToken(Token):
-# pass
-#
-#cdef class FlowSequenceStartToken(Token):
-# pass
-#
-#cdef class FlowMappingStartToken(Token):
-# pass
-#
-#cdef class FlowSequenceEndToken(Token):
-# pass
-#
-#cdef class FlowMappingEndToken(Token):
-# pass
-#
-#cdef class KeyToken(Token):
-# pass
-#
-#cdef class ValueToken(Token):
-# pass
-#
-#cdef class BlockEntryToken(Token):
-# pass
-#
-#cdef class FlowEntryToken(Token):
-# pass
-#
-#cdef class AliasToken(Token):
-# cdef readonly object value
-# def __init__(self, value, Mark start_mark, Mark end_mark):
-# self.value = value
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-#
-#cdef class AnchorToken(Token):
-# cdef readonly object value
-# def __init__(self, value, Mark start_mark, Mark end_mark):
-# self.value = value
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-#
-#cdef class TagToken(Token):
-# cdef readonly object value
-# def __init__(self, value, Mark start_mark, Mark end_mark):
-# self.value = value
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-#
-#cdef class ScalarToken(Token):
-# cdef readonly object value
-# cdef readonly object plain
-# cdef readonly object style
-# def __init__(self, value, plain, Mark start_mark, Mark end_mark, style=None):
-# self.value = value
-# self.plain = plain
-# self.start_mark = start_mark
-# self.end_mark = end_mark
-# self.style = style
-
-cdef class CParser:
-
- cdef yaml_parser_t parser
- cdef yaml_event_t parsed_event
-
- cdef object stream
- cdef object stream_name
- cdef object current_token
- cdef object current_event
- cdef object anchors
- cdef object stream_cache
+ object buffer, object pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self):
+ return None
+
+ def __str__(self):
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ return where
+
+#class YAMLError(Exception):
+# pass
+#
+#class MarkedYAMLError(YAMLError):
+#
+# def __init__(self, context=None, context_mark=None,
+# problem=None, problem_mark=None, note=None):
+# self.context = context
+# self.context_mark = context_mark
+# self.problem = problem
+# self.problem_mark = problem_mark
+# self.note = note
+#
+# def __str__(self):
+# lines = []
+# if self.context is not None:
+# lines.append(self.context)
+# if self.context_mark is not None \
+# and (self.problem is None or self.problem_mark is None
+# or self.context_mark.name != self.problem_mark.name
+# or self.context_mark.line != self.problem_mark.line
+# or self.context_mark.column != self.problem_mark.column):
+# lines.append(str(self.context_mark))
+# if self.problem is not None:
+# lines.append(self.problem)
+# if self.problem_mark is not None:
+# lines.append(str(self.problem_mark))
+# if self.note is not None:
+# lines.append(self.note)
+# return '\n'.join(lines)
+#
+#class ReaderError(YAMLError):
+#
+# def __init__(self, name, position, character, encoding, reason):
+# self.name = name
+# self.character = character
+# self.position = position
+# self.encoding = encoding
+# self.reason = reason
+#
+# def __str__(self):
+# if isinstance(self.character, str):
+# return "'%s' codec can't decode byte #x%02x: %s\n" \
+# " in \"%s\", position %d" \
+# % (self.encoding, ord(self.character), self.reason,
+# self.name, self.position)
+# else:
+# return "unacceptable character #x%04x: %s\n" \
+# " in \"%s\", position %d" \
+# % (ord(self.character), self.reason,
+# self.name, self.position)
+#
+#class ScannerError(MarkedYAMLError):
+# pass
+#
+#class ParserError(MarkedYAMLError):
+# pass
+#
+#class EmitterError(YAMLError):
+# pass
+#
+#cdef class Token:
+# cdef readonly Mark start_mark
+# cdef readonly Mark end_mark
+# def __init__(self, Mark start_mark, Mark end_mark):
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class StreamStartToken(Token):
+# cdef readonly object encoding
+# def __init__(self, Mark start_mark, Mark end_mark, encoding):
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+# self.encoding = encoding
+#
+#cdef class StreamEndToken(Token):
+# pass
+#
+#cdef class DirectiveToken(Token):
+# cdef readonly object name
+# cdef readonly object value
+# def __init__(self, name, value, Mark start_mark, Mark end_mark):
+# self.name = name
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class DocumentStartToken(Token):
+# pass
+#
+#cdef class DocumentEndToken(Token):
+# pass
+#
+#cdef class BlockSequenceStartToken(Token):
+# pass
+#
+#cdef class BlockMappingStartToken(Token):
+# pass
+#
+#cdef class BlockEndToken(Token):
+# pass
+#
+#cdef class FlowSequenceStartToken(Token):
+# pass
+#
+#cdef class FlowMappingStartToken(Token):
+# pass
+#
+#cdef class FlowSequenceEndToken(Token):
+# pass
+#
+#cdef class FlowMappingEndToken(Token):
+# pass
+#
+#cdef class KeyToken(Token):
+# pass
+#
+#cdef class ValueToken(Token):
+# pass
+#
+#cdef class BlockEntryToken(Token):
+# pass
+#
+#cdef class FlowEntryToken(Token):
+# pass
+#
+#cdef class AliasToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class AnchorToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class TagToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class ScalarToken(Token):
+# cdef readonly object value
+# cdef readonly object plain
+# cdef readonly object style
+# def __init__(self, value, plain, Mark start_mark, Mark end_mark, style=None):
+# self.value = value
+# self.plain = plain
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+# self.style = style
+
+cdef class CParser:
+
+ cdef yaml_parser_t parser
+ cdef yaml_event_t parsed_event
+
+ cdef object stream
+ cdef object stream_name
+ cdef object current_token
+ cdef object current_event
+ cdef object anchors
+ cdef object stream_cache
cdef int stream_cache_len
cdef int stream_cache_pos
- cdef int unicode_source
-
- def __init__(self, stream):
- cdef is_readable
- if yaml_parser_initialize(&self.parser) == 0:
- raise MemoryError
- self.parsed_event.type = YAML_NO_EVENT
- is_readable = 1
- try:
- stream.read
- except AttributeError:
- is_readable = 0
- self.unicode_source = 0
- if is_readable:
- self.stream = stream
- try:
- self.stream_name = stream.name
- except AttributeError:
- if PY_MAJOR_VERSION < 3:
- self.stream_name = '<file>'
- else:
- self.stream_name = u'<file>'
- self.stream_cache = None
- self.stream_cache_len = 0
- self.stream_cache_pos = 0
- yaml_parser_set_input(&self.parser, input_handler, <void *>self)
- else:
- if PyUnicode_CheckExact(stream) != 0:
- stream = PyUnicode_AsUTF8String(stream)
- if PY_MAJOR_VERSION < 3:
- self.stream_name = '<unicode string>'
- else:
- self.stream_name = u'<unicode string>'
- self.unicode_source = 1
- else:
- if PY_MAJOR_VERSION < 3:
- self.stream_name = '<byte string>'
- else:
- self.stream_name = u'<byte string>'
- if PyString_CheckExact(stream) == 0:
- if PY_MAJOR_VERSION < 3:
- raise TypeError("a string or stream input is required")
- else:
- raise TypeError(u"a string or stream input is required")
- self.stream = stream
+ cdef int unicode_source
+
+ def __init__(self, stream):
+ cdef is_readable
+ if yaml_parser_initialize(&self.parser) == 0:
+ raise MemoryError
+ self.parsed_event.type = YAML_NO_EVENT
+ is_readable = 1
+ try:
+ stream.read
+ except AttributeError:
+ is_readable = 0
+ self.unicode_source = 0
+ if is_readable:
+ self.stream = stream
+ try:
+ self.stream_name = stream.name
+ except AttributeError:
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<file>'
+ else:
+ self.stream_name = u'<file>'
+ self.stream_cache = None
+ self.stream_cache_len = 0
+ self.stream_cache_pos = 0
+ yaml_parser_set_input(&self.parser, input_handler, <void *>self)
+ else:
+ if PyUnicode_CheckExact(stream) != 0:
+ stream = PyUnicode_AsUTF8String(stream)
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<unicode string>'
+ else:
+ self.stream_name = u'<unicode string>'
+ self.unicode_source = 1
+ else:
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<byte string>'
+ else:
+ self.stream_name = u'<byte string>'
+ if PyString_CheckExact(stream) == 0:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("a string or stream input is required")
+ else:
+ raise TypeError(u"a string or stream input is required")
+ self.stream = stream
yaml_parser_set_input_string(&self.parser, PyString_AS_STRING(stream), PyString_GET_SIZE(stream))
- self.current_token = None
- self.current_event = None
- self.anchors = {}
-
- def __dealloc__(self):
- yaml_parser_delete(&self.parser)
- yaml_event_delete(&self.parsed_event)
-
- def dispose(self):
- pass
-
- cdef object _parser_error(self):
- if self.parser.error == YAML_MEMORY_ERROR:
- return MemoryError
- elif self.parser.error == YAML_READER_ERROR:
- if PY_MAJOR_VERSION < 3:
- return ReaderError(self.stream_name, self.parser.problem_offset,
- self.parser.problem_value, '?', self.parser.problem)
- else:
- return ReaderError(self.stream_name, self.parser.problem_offset,
- self.parser.problem_value, u'?', PyUnicode_FromString(self.parser.problem))
- elif self.parser.error == YAML_SCANNER_ERROR \
- or self.parser.error == YAML_PARSER_ERROR:
- context_mark = None
- problem_mark = None
- if self.parser.context != NULL:
- context_mark = Mark(self.stream_name,
- self.parser.context_mark.index,
- self.parser.context_mark.line,
- self.parser.context_mark.column, None, None)
- if self.parser.problem != NULL:
- problem_mark = Mark(self.stream_name,
- self.parser.problem_mark.index,
- self.parser.problem_mark.line,
- self.parser.problem_mark.column, None, None)
- context = None
- if self.parser.context != NULL:
- if PY_MAJOR_VERSION < 3:
- context = self.parser.context
- else:
- context = PyUnicode_FromString(self.parser.context)
- if PY_MAJOR_VERSION < 3:
- problem = self.parser.problem
- else:
- problem = PyUnicode_FromString(self.parser.problem)
- if self.parser.error == YAML_SCANNER_ERROR:
- return ScannerError(context, context_mark, problem, problem_mark)
- else:
- return ParserError(context, context_mark, problem, problem_mark)
- if PY_MAJOR_VERSION < 3:
- raise ValueError("no parser error")
- else:
- raise ValueError(u"no parser error")
-
- def raw_scan(self):
- cdef yaml_token_t token
- cdef int done
- cdef int count
- count = 0
- done = 0
- while done == 0:
- if yaml_parser_scan(&self.parser, &token) == 0:
- error = self._parser_error()
- raise error
- if token.type == YAML_NO_TOKEN:
- done = 1
- else:
- count = count+1
- yaml_token_delete(&token)
- return count
-
- cdef object _scan(self):
- cdef yaml_token_t token
- if yaml_parser_scan(&self.parser, &token) == 0:
- error = self._parser_error()
- raise error
- token_object = self._token_to_object(&token)
- yaml_token_delete(&token)
- return token_object
-
- cdef object _token_to_object(self, yaml_token_t *token):
- start_mark = Mark(self.stream_name,
- token.start_mark.index,
- token.start_mark.line,
- token.start_mark.column,
- None, None)
- end_mark = Mark(self.stream_name,
- token.end_mark.index,
- token.end_mark.line,
- token.end_mark.column,
- None, None)
- if token.type == YAML_NO_TOKEN:
- return None
- elif token.type == YAML_STREAM_START_TOKEN:
- encoding = None
- if token.data.stream_start.encoding == YAML_UTF8_ENCODING:
- if self.unicode_source == 0:
- encoding = u"utf-8"
- elif token.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
- encoding = u"utf-16-le"
- elif token.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
- encoding = u"utf-16-be"
- return StreamStartToken(start_mark, end_mark, encoding)
- elif token.type == YAML_STREAM_END_TOKEN:
- return StreamEndToken(start_mark, end_mark)
- elif token.type == YAML_VERSION_DIRECTIVE_TOKEN:
- return DirectiveToken(u"YAML",
- (token.data.version_directive.major,
- token.data.version_directive.minor),
- start_mark, end_mark)
- elif token.type == YAML_TAG_DIRECTIVE_TOKEN:
+ self.current_token = None
+ self.current_event = None
+ self.anchors = {}
+
+ def __dealloc__(self):
+ yaml_parser_delete(&self.parser)
+ yaml_event_delete(&self.parsed_event)
+
+ def dispose(self):
+ pass
+
+ cdef object _parser_error(self):
+ if self.parser.error == YAML_MEMORY_ERROR:
+ return MemoryError
+ elif self.parser.error == YAML_READER_ERROR:
+ if PY_MAJOR_VERSION < 3:
+ return ReaderError(self.stream_name, self.parser.problem_offset,
+ self.parser.problem_value, '?', self.parser.problem)
+ else:
+ return ReaderError(self.stream_name, self.parser.problem_offset,
+ self.parser.problem_value, u'?', PyUnicode_FromString(self.parser.problem))
+ elif self.parser.error == YAML_SCANNER_ERROR \
+ or self.parser.error == YAML_PARSER_ERROR:
+ context_mark = None
+ problem_mark = None
+ if self.parser.context != NULL:
+ context_mark = Mark(self.stream_name,
+ self.parser.context_mark.index,
+ self.parser.context_mark.line,
+ self.parser.context_mark.column, None, None)
+ if self.parser.problem != NULL:
+ problem_mark = Mark(self.stream_name,
+ self.parser.problem_mark.index,
+ self.parser.problem_mark.line,
+ self.parser.problem_mark.column, None, None)
+ context = None
+ if self.parser.context != NULL:
+ if PY_MAJOR_VERSION < 3:
+ context = self.parser.context
+ else:
+ context = PyUnicode_FromString(self.parser.context)
+ if PY_MAJOR_VERSION < 3:
+ problem = self.parser.problem
+ else:
+ problem = PyUnicode_FromString(self.parser.problem)
+ if self.parser.error == YAML_SCANNER_ERROR:
+ return ScannerError(context, context_mark, problem, problem_mark)
+ else:
+ return ParserError(context, context_mark, problem, problem_mark)
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("no parser error")
+ else:
+ raise ValueError(u"no parser error")
+
+ def raw_scan(self):
+ cdef yaml_token_t token
+ cdef int done
+ cdef int count
+ count = 0
+ done = 0
+ while done == 0:
+ if yaml_parser_scan(&self.parser, &token) == 0:
+ error = self._parser_error()
+ raise error
+ if token.type == YAML_NO_TOKEN:
+ done = 1
+ else:
+ count = count+1
+ yaml_token_delete(&token)
+ return count
+
+ cdef object _scan(self):
+ cdef yaml_token_t token
+ if yaml_parser_scan(&self.parser, &token) == 0:
+ error = self._parser_error()
+ raise error
+ token_object = self._token_to_object(&token)
+ yaml_token_delete(&token)
+ return token_object
+
+ cdef object _token_to_object(self, yaml_token_t *token):
+ start_mark = Mark(self.stream_name,
+ token.start_mark.index,
+ token.start_mark.line,
+ token.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ token.end_mark.index,
+ token.end_mark.line,
+ token.end_mark.column,
+ None, None)
+ if token.type == YAML_NO_TOKEN:
+ return None
+ elif token.type == YAML_STREAM_START_TOKEN:
+ encoding = None
+ if token.data.stream_start.encoding == YAML_UTF8_ENCODING:
+ if self.unicode_source == 0:
+ encoding = u"utf-8"
+ elif token.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
+ encoding = u"utf-16-le"
+ elif token.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
+ encoding = u"utf-16-be"
+ return StreamStartToken(start_mark, end_mark, encoding)
+ elif token.type == YAML_STREAM_END_TOKEN:
+ return StreamEndToken(start_mark, end_mark)
+ elif token.type == YAML_VERSION_DIRECTIVE_TOKEN:
+ return DirectiveToken(u"YAML",
+ (token.data.version_directive.major,
+ token.data.version_directive.minor),
+ start_mark, end_mark)
+ elif token.type == YAML_TAG_DIRECTIVE_TOKEN:
handle = PyUnicode_FromString(token.data.tag_directive.handle)
prefix = PyUnicode_FromString(token.data.tag_directive.prefix)
- return DirectiveToken(u"TAG", (handle, prefix),
- start_mark, end_mark)
- elif token.type == YAML_DOCUMENT_START_TOKEN:
- return DocumentStartToken(start_mark, end_mark)
- elif token.type == YAML_DOCUMENT_END_TOKEN:
- return DocumentEndToken(start_mark, end_mark)
- elif token.type == YAML_BLOCK_SEQUENCE_START_TOKEN:
- return BlockSequenceStartToken(start_mark, end_mark)
- elif token.type == YAML_BLOCK_MAPPING_START_TOKEN:
- return BlockMappingStartToken(start_mark, end_mark)
- elif token.type == YAML_BLOCK_END_TOKEN:
- return BlockEndToken(start_mark, end_mark)
- elif token.type == YAML_FLOW_SEQUENCE_START_TOKEN:
- return FlowSequenceStartToken(start_mark, end_mark)
- elif token.type == YAML_FLOW_SEQUENCE_END_TOKEN:
- return FlowSequenceEndToken(start_mark, end_mark)
- elif token.type == YAML_FLOW_MAPPING_START_TOKEN:
- return FlowMappingStartToken(start_mark, end_mark)
- elif token.type == YAML_FLOW_MAPPING_END_TOKEN:
- return FlowMappingEndToken(start_mark, end_mark)
- elif token.type == YAML_BLOCK_ENTRY_TOKEN:
- return BlockEntryToken(start_mark, end_mark)
- elif token.type == YAML_FLOW_ENTRY_TOKEN:
- return FlowEntryToken(start_mark, end_mark)
- elif token.type == YAML_KEY_TOKEN:
- return KeyToken(start_mark, end_mark)
- elif token.type == YAML_VALUE_TOKEN:
- return ValueToken(start_mark, end_mark)
- elif token.type == YAML_ALIAS_TOKEN:
- value = PyUnicode_FromString(token.data.alias.value)
- return AliasToken(value, start_mark, end_mark)
- elif token.type == YAML_ANCHOR_TOKEN:
- value = PyUnicode_FromString(token.data.anchor.value)
- return AnchorToken(value, start_mark, end_mark)
- elif token.type == YAML_TAG_TOKEN:
+ return DirectiveToken(u"TAG", (handle, prefix),
+ start_mark, end_mark)
+ elif token.type == YAML_DOCUMENT_START_TOKEN:
+ return DocumentStartToken(start_mark, end_mark)
+ elif token.type == YAML_DOCUMENT_END_TOKEN:
+ return DocumentEndToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_SEQUENCE_START_TOKEN:
+ return BlockSequenceStartToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_MAPPING_START_TOKEN:
+ return BlockMappingStartToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_END_TOKEN:
+ return BlockEndToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_SEQUENCE_START_TOKEN:
+ return FlowSequenceStartToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_SEQUENCE_END_TOKEN:
+ return FlowSequenceEndToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_MAPPING_START_TOKEN:
+ return FlowMappingStartToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_MAPPING_END_TOKEN:
+ return FlowMappingEndToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_ENTRY_TOKEN:
+ return BlockEntryToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_ENTRY_TOKEN:
+ return FlowEntryToken(start_mark, end_mark)
+ elif token.type == YAML_KEY_TOKEN:
+ return KeyToken(start_mark, end_mark)
+ elif token.type == YAML_VALUE_TOKEN:
+ return ValueToken(start_mark, end_mark)
+ elif token.type == YAML_ALIAS_TOKEN:
+ value = PyUnicode_FromString(token.data.alias.value)
+ return AliasToken(value, start_mark, end_mark)
+ elif token.type == YAML_ANCHOR_TOKEN:
+ value = PyUnicode_FromString(token.data.anchor.value)
+ return AnchorToken(value, start_mark, end_mark)
+ elif token.type == YAML_TAG_TOKEN:
handle = PyUnicode_FromString(token.data.tag.handle)
suffix = PyUnicode_FromString(token.data.tag.suffix)
- if not handle:
- handle = None
- return TagToken((handle, suffix), start_mark, end_mark)
- elif token.type == YAML_SCALAR_TOKEN:
+ if not handle:
+ handle = None
+ return TagToken((handle, suffix), start_mark, end_mark)
+ elif token.type == YAML_SCALAR_TOKEN:
value = PyUnicode_DecodeUTF8(token.data.scalar.value,
- token.data.scalar.length, 'strict')
- plain = False
- style = None
- if token.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
- plain = True
- style = u''
- elif token.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
- style = u'\''
- elif token.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
- style = u'"'
- elif token.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
- style = u'|'
- elif token.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
- style = u'>'
- return ScalarToken(value, plain,
- start_mark, end_mark, style)
- else:
- if PY_MAJOR_VERSION < 3:
- raise ValueError("unknown token type")
- else:
- raise ValueError(u"unknown token type")
-
- def get_token(self):
- if self.current_token is not None:
- value = self.current_token
- self.current_token = None
- else:
- value = self._scan()
- return value
-
- def peek_token(self):
- if self.current_token is None:
- self.current_token = self._scan()
- return self.current_token
-
- def check_token(self, *choices):
- if self.current_token is None:
- self.current_token = self._scan()
- if self.current_token is None:
- return False
- if not choices:
- return True
- token_class = self.current_token.__class__
- for choice in choices:
- if token_class is choice:
- return True
- return False
-
- def raw_parse(self):
- cdef yaml_event_t event
- cdef int done
- cdef int count
- count = 0
- done = 0
- while done == 0:
- if yaml_parser_parse(&self.parser, &event) == 0:
- error = self._parser_error()
- raise error
- if event.type == YAML_NO_EVENT:
- done = 1
- else:
- count = count+1
- yaml_event_delete(&event)
- return count
-
- cdef object _parse(self):
- cdef yaml_event_t event
- if yaml_parser_parse(&self.parser, &event) == 0:
- error = self._parser_error()
- raise error
- event_object = self._event_to_object(&event)
- yaml_event_delete(&event)
- return event_object
-
- cdef object _event_to_object(self, yaml_event_t *event):
- cdef yaml_tag_directive_t *tag_directive
- start_mark = Mark(self.stream_name,
- event.start_mark.index,
- event.start_mark.line,
- event.start_mark.column,
- None, None)
- end_mark = Mark(self.stream_name,
- event.end_mark.index,
- event.end_mark.line,
- event.end_mark.column,
- None, None)
- if event.type == YAML_NO_EVENT:
- return None
- elif event.type == YAML_STREAM_START_EVENT:
- encoding = None
- if event.data.stream_start.encoding == YAML_UTF8_ENCODING:
- if self.unicode_source == 0:
- encoding = u"utf-8"
- elif event.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
- encoding = u"utf-16-le"
- elif event.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
- encoding = u"utf-16-be"
- return StreamStartEvent(start_mark, end_mark, encoding)
- elif event.type == YAML_STREAM_END_EVENT:
- return StreamEndEvent(start_mark, end_mark)
- elif event.type == YAML_DOCUMENT_START_EVENT:
- explicit = False
- if event.data.document_start.implicit == 0:
- explicit = True
- version = None
- if event.data.document_start.version_directive != NULL:
- version = (event.data.document_start.version_directive.major,
- event.data.document_start.version_directive.minor)
- tags = None
- if event.data.document_start.tag_directives.start != NULL:
- tags = {}
- tag_directive = event.data.document_start.tag_directives.start
- while tag_directive != event.data.document_start.tag_directives.end:
+ token.data.scalar.length, 'strict')
+ plain = False
+ style = None
+ if token.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ plain = True
+ style = u''
+ elif token.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif token.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif token.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif token.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ return ScalarToken(value, plain,
+ start_mark, end_mark, style)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("unknown token type")
+ else:
+ raise ValueError(u"unknown token type")
+
+ def get_token(self):
+ if self.current_token is not None:
+ value = self.current_token
+ self.current_token = None
+ else:
+ value = self._scan()
+ return value
+
+ def peek_token(self):
+ if self.current_token is None:
+ self.current_token = self._scan()
+ return self.current_token
+
+ def check_token(self, *choices):
+ if self.current_token is None:
+ self.current_token = self._scan()
+ if self.current_token is None:
+ return False
+ if not choices:
+ return True
+ token_class = self.current_token.__class__
+ for choice in choices:
+ if token_class is choice:
+ return True
+ return False
+
+ def raw_parse(self):
+ cdef yaml_event_t event
+ cdef int done
+ cdef int count
+ count = 0
+ done = 0
+ while done == 0:
+ if yaml_parser_parse(&self.parser, &event) == 0:
+ error = self._parser_error()
+ raise error
+ if event.type == YAML_NO_EVENT:
+ done = 1
+ else:
+ count = count+1
+ yaml_event_delete(&event)
+ return count
+
+ cdef object _parse(self):
+ cdef yaml_event_t event
+ if yaml_parser_parse(&self.parser, &event) == 0:
+ error = self._parser_error()
+ raise error
+ event_object = self._event_to_object(&event)
+ yaml_event_delete(&event)
+ return event_object
+
+ cdef object _event_to_object(self, yaml_event_t *event):
+ cdef yaml_tag_directive_t *tag_directive
+ start_mark = Mark(self.stream_name,
+ event.start_mark.index,
+ event.start_mark.line,
+ event.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ event.end_mark.index,
+ event.end_mark.line,
+ event.end_mark.column,
+ None, None)
+ if event.type == YAML_NO_EVENT:
+ return None
+ elif event.type == YAML_STREAM_START_EVENT:
+ encoding = None
+ if event.data.stream_start.encoding == YAML_UTF8_ENCODING:
+ if self.unicode_source == 0:
+ encoding = u"utf-8"
+ elif event.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
+ encoding = u"utf-16-le"
+ elif event.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
+ encoding = u"utf-16-be"
+ return StreamStartEvent(start_mark, end_mark, encoding)
+ elif event.type == YAML_STREAM_END_EVENT:
+ return StreamEndEvent(start_mark, end_mark)
+ elif event.type == YAML_DOCUMENT_START_EVENT:
+ explicit = False
+ if event.data.document_start.implicit == 0:
+ explicit = True
+ version = None
+ if event.data.document_start.version_directive != NULL:
+ version = (event.data.document_start.version_directive.major,
+ event.data.document_start.version_directive.minor)
+ tags = None
+ if event.data.document_start.tag_directives.start != NULL:
+ tags = {}
+ tag_directive = event.data.document_start.tag_directives.start
+ while tag_directive != event.data.document_start.tag_directives.end:
handle = PyUnicode_FromString(tag_directive.handle)
prefix = PyUnicode_FromString(tag_directive.prefix)
- tags[handle] = prefix
- tag_directive = tag_directive+1
- return DocumentStartEvent(start_mark, end_mark,
- explicit, version, tags)
- elif event.type == YAML_DOCUMENT_END_EVENT:
- explicit = False
- if event.data.document_end.implicit == 0:
- explicit = True
- return DocumentEndEvent(start_mark, end_mark, explicit)
- elif event.type == YAML_ALIAS_EVENT:
- anchor = PyUnicode_FromString(event.data.alias.anchor)
- return AliasEvent(anchor, start_mark, end_mark)
- elif event.type == YAML_SCALAR_EVENT:
- anchor = None
- if event.data.scalar.anchor != NULL:
- anchor = PyUnicode_FromString(event.data.scalar.anchor)
- tag = None
- if event.data.scalar.tag != NULL:
- tag = PyUnicode_FromString(event.data.scalar.tag)
+ tags[handle] = prefix
+ tag_directive = tag_directive+1
+ return DocumentStartEvent(start_mark, end_mark,
+ explicit, version, tags)
+ elif event.type == YAML_DOCUMENT_END_EVENT:
+ explicit = False
+ if event.data.document_end.implicit == 0:
+ explicit = True
+ return DocumentEndEvent(start_mark, end_mark, explicit)
+ elif event.type == YAML_ALIAS_EVENT:
+ anchor = PyUnicode_FromString(event.data.alias.anchor)
+ return AliasEvent(anchor, start_mark, end_mark)
+ elif event.type == YAML_SCALAR_EVENT:
+ anchor = None
+ if event.data.scalar.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.scalar.anchor)
+ tag = None
+ if event.data.scalar.tag != NULL:
+ tag = PyUnicode_FromString(event.data.scalar.tag)
value = PyUnicode_DecodeUTF8(event.data.scalar.value,
- event.data.scalar.length, 'strict')
- plain_implicit = False
- if event.data.scalar.plain_implicit == 1:
- plain_implicit = True
- quoted_implicit = False
- if event.data.scalar.quoted_implicit == 1:
- quoted_implicit = True
- style = None
- if event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
- style = u''
- elif event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
- style = u'\''
- elif event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
- style = u'"'
- elif event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
- style = u'|'
- elif event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
- style = u'>'
- return ScalarEvent(anchor, tag,
- (plain_implicit, quoted_implicit),
- value, start_mark, end_mark, style)
- elif event.type == YAML_SEQUENCE_START_EVENT:
- anchor = None
- if event.data.sequence_start.anchor != NULL:
- anchor = PyUnicode_FromString(event.data.sequence_start.anchor)
- tag = None
- if event.data.sequence_start.tag != NULL:
- tag = PyUnicode_FromString(event.data.sequence_start.tag)
- implicit = False
- if event.data.sequence_start.implicit == 1:
- implicit = True
- flow_style = None
- if event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
- flow_style = True
- elif event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
- flow_style = False
- return SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style)
- elif event.type == YAML_MAPPING_START_EVENT:
- anchor = None
- if event.data.mapping_start.anchor != NULL:
- anchor = PyUnicode_FromString(event.data.mapping_start.anchor)
- tag = None
- if event.data.mapping_start.tag != NULL:
- tag = PyUnicode_FromString(event.data.mapping_start.tag)
- implicit = False
- if event.data.mapping_start.implicit == 1:
- implicit = True
- flow_style = None
- if event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
- flow_style = True
- elif event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
- flow_style = False
- return MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style)
- elif event.type == YAML_SEQUENCE_END_EVENT:
- return SequenceEndEvent(start_mark, end_mark)
- elif event.type == YAML_MAPPING_END_EVENT:
- return MappingEndEvent(start_mark, end_mark)
- else:
- if PY_MAJOR_VERSION < 3:
- raise ValueError("unknown event type")
- else:
- raise ValueError(u"unknown event type")
-
- def get_event(self):
- if self.current_event is not None:
- value = self.current_event
- self.current_event = None
- else:
- value = self._parse()
- return value
-
- def peek_event(self):
- if self.current_event is None:
- self.current_event = self._parse()
- return self.current_event
-
- def check_event(self, *choices):
- if self.current_event is None:
- self.current_event = self._parse()
- if self.current_event is None:
- return False
- if not choices:
- return True
- event_class = self.current_event.__class__
- for choice in choices:
- if event_class is choice:
- return True
- return False
-
- def check_node(self):
- self._parse_next_event()
- if self.parsed_event.type == YAML_STREAM_START_EVENT:
- yaml_event_delete(&self.parsed_event)
- self._parse_next_event()
- if self.parsed_event.type != YAML_STREAM_END_EVENT:
- return True
- return False
-
- def get_node(self):
- self._parse_next_event()
- if self.parsed_event.type != YAML_STREAM_END_EVENT:
- return self._compose_document()
-
- def get_single_node(self):
- self._parse_next_event()
- yaml_event_delete(&self.parsed_event)
- self._parse_next_event()
- document = None
- if self.parsed_event.type != YAML_STREAM_END_EVENT:
- document = self._compose_document()
- self._parse_next_event()
- if self.parsed_event.type != YAML_STREAM_END_EVENT:
- mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- if PY_MAJOR_VERSION < 3:
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document", mark)
- else:
- raise ComposerError(u"expected a single document in the stream",
- document.start_mark, u"but found another document", mark)
- return document
-
- cdef object _compose_document(self):
- yaml_event_delete(&self.parsed_event)
- node = self._compose_node(None, None)
- self._parse_next_event()
- yaml_event_delete(&self.parsed_event)
- self.anchors = {}
- return node
-
- cdef object _compose_node(self, object parent, object index):
- self._parse_next_event()
- if self.parsed_event.type == YAML_ALIAS_EVENT:
- anchor = PyUnicode_FromString(self.parsed_event.data.alias.anchor)
- if anchor not in self.anchors:
- mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- if PY_MAJOR_VERSION < 3:
- raise ComposerError(None, None, "found undefined alias", mark)
- else:
- raise ComposerError(None, None, u"found undefined alias", mark)
- yaml_event_delete(&self.parsed_event)
- return self.anchors[anchor]
- anchor = None
- if self.parsed_event.type == YAML_SCALAR_EVENT \
- and self.parsed_event.data.scalar.anchor != NULL:
- anchor = PyUnicode_FromString(self.parsed_event.data.scalar.anchor)
- elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT \
- and self.parsed_event.data.sequence_start.anchor != NULL:
- anchor = PyUnicode_FromString(self.parsed_event.data.sequence_start.anchor)
- elif self.parsed_event.type == YAML_MAPPING_START_EVENT \
- and self.parsed_event.data.mapping_start.anchor != NULL:
- anchor = PyUnicode_FromString(self.parsed_event.data.mapping_start.anchor)
- if anchor is not None:
- if anchor in self.anchors:
- mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- if PY_MAJOR_VERSION < 3:
+ event.data.scalar.length, 'strict')
+ plain_implicit = False
+ if event.data.scalar.plain_implicit == 1:
+ plain_implicit = True
+ quoted_implicit = False
+ if event.data.scalar.quoted_implicit == 1:
+ quoted_implicit = True
+ style = None
+ if event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ style = u''
+ elif event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ return ScalarEvent(anchor, tag,
+ (plain_implicit, quoted_implicit),
+ value, start_mark, end_mark, style)
+ elif event.type == YAML_SEQUENCE_START_EVENT:
+ anchor = None
+ if event.data.sequence_start.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.sequence_start.anchor)
+ tag = None
+ if event.data.sequence_start.tag != NULL:
+ tag = PyUnicode_FromString(event.data.sequence_start.tag)
+ implicit = False
+ if event.data.sequence_start.implicit == 1:
+ implicit = True
+ flow_style = None
+ if event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
+ flow_style = True
+ elif event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
+ flow_style = False
+ return SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style)
+ elif event.type == YAML_MAPPING_START_EVENT:
+ anchor = None
+ if event.data.mapping_start.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.mapping_start.anchor)
+ tag = None
+ if event.data.mapping_start.tag != NULL:
+ tag = PyUnicode_FromString(event.data.mapping_start.tag)
+ implicit = False
+ if event.data.mapping_start.implicit == 1:
+ implicit = True
+ flow_style = None
+ if event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
+ flow_style = True
+ elif event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
+ flow_style = False
+ return MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style)
+ elif event.type == YAML_SEQUENCE_END_EVENT:
+ return SequenceEndEvent(start_mark, end_mark)
+ elif event.type == YAML_MAPPING_END_EVENT:
+ return MappingEndEvent(start_mark, end_mark)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("unknown event type")
+ else:
+ raise ValueError(u"unknown event type")
+
+ def get_event(self):
+ if self.current_event is not None:
+ value = self.current_event
+ self.current_event = None
+ else:
+ value = self._parse()
+ return value
+
+ def peek_event(self):
+ if self.current_event is None:
+ self.current_event = self._parse()
+ return self.current_event
+
+ def check_event(self, *choices):
+ if self.current_event is None:
+ self.current_event = self._parse()
+ if self.current_event is None:
+ return False
+ if not choices:
+ return True
+ event_class = self.current_event.__class__
+ for choice in choices:
+ if event_class is choice:
+ return True
+ return False
+
+ def check_node(self):
+ self._parse_next_event()
+ if self.parsed_event.type == YAML_STREAM_START_EVENT:
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ return True
+ return False
+
+ def get_node(self):
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ return self._compose_document()
+
+ def get_single_node(self):
+ self._parse_next_event()
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ document = None
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ document = self._compose_document()
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document", mark)
+ else:
+ raise ComposerError(u"expected a single document in the stream",
+ document.start_mark, u"but found another document", mark)
+ return document
+
+ cdef object _compose_document(self):
+ yaml_event_delete(&self.parsed_event)
+ node = self._compose_node(None, None)
+ self._parse_next_event()
+ yaml_event_delete(&self.parsed_event)
+ self.anchors = {}
+ return node
+
+ cdef object _compose_node(self, object parent, object index):
+ self._parse_next_event()
+ if self.parsed_event.type == YAML_ALIAS_EVENT:
+ anchor = PyUnicode_FromString(self.parsed_event.data.alias.anchor)
+ if anchor not in self.anchors:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError(None, None, "found undefined alias", mark)
+ else:
+ raise ComposerError(None, None, u"found undefined alias", mark)
+ yaml_event_delete(&self.parsed_event)
+ return self.anchors[anchor]
+ anchor = None
+ if self.parsed_event.type == YAML_SCALAR_EVENT \
+ and self.parsed_event.data.scalar.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.scalar.anchor)
+ elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT \
+ and self.parsed_event.data.sequence_start.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.sequence_start.anchor)
+ elif self.parsed_event.type == YAML_MAPPING_START_EVENT \
+ and self.parsed_event.data.mapping_start.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.mapping_start.anchor)
+ if anchor is not None:
+ if anchor in self.anchors:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
raise ComposerError("found duplicate anchor; first occurrence",
self.anchors[anchor].start_mark, "second occurrence", mark)
- else:
+ else:
raise ComposerError(u"found duplicate anchor; first occurrence",
self.anchors[anchor].start_mark, u"second occurrence", mark)
- self.descend_resolver(parent, index)
- if self.parsed_event.type == YAML_SCALAR_EVENT:
- node = self._compose_scalar_node(anchor)
- elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT:
- node = self._compose_sequence_node(anchor)
- elif self.parsed_event.type == YAML_MAPPING_START_EVENT:
- node = self._compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- cdef _compose_scalar_node(self, object anchor):
- start_mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- end_mark = Mark(self.stream_name,
- self.parsed_event.end_mark.index,
- self.parsed_event.end_mark.line,
- self.parsed_event.end_mark.column,
- None, None)
+ self.descend_resolver(parent, index)
+ if self.parsed_event.type == YAML_SCALAR_EVENT:
+ node = self._compose_scalar_node(anchor)
+ elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT:
+ node = self._compose_sequence_node(anchor)
+ elif self.parsed_event.type == YAML_MAPPING_START_EVENT:
+ node = self._compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ cdef _compose_scalar_node(self, object anchor):
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
value = PyUnicode_DecodeUTF8(self.parsed_event.data.scalar.value,
- self.parsed_event.data.scalar.length, 'strict')
- plain_implicit = False
- if self.parsed_event.data.scalar.plain_implicit == 1:
- plain_implicit = True
- quoted_implicit = False
- if self.parsed_event.data.scalar.quoted_implicit == 1:
- quoted_implicit = True
- if self.parsed_event.data.scalar.tag == NULL \
- or (self.parsed_event.data.scalar.tag[0] == c'!'
- and self.parsed_event.data.scalar.tag[1] == c'\0'):
- tag = self.resolve(ScalarNode, value, (plain_implicit, quoted_implicit))
- else:
- tag = PyUnicode_FromString(self.parsed_event.data.scalar.tag)
- style = None
- if self.parsed_event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
- style = u''
- elif self.parsed_event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
- style = u'\''
- elif self.parsed_event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
- style = u'"'
- elif self.parsed_event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
- style = u'|'
- elif self.parsed_event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
- style = u'>'
- node = ScalarNode(tag, value, start_mark, end_mark, style)
- if anchor is not None:
- self.anchors[anchor] = node
- yaml_event_delete(&self.parsed_event)
- return node
-
- cdef _compose_sequence_node(self, object anchor):
- cdef int index
- start_mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- implicit = False
- if self.parsed_event.data.sequence_start.implicit == 1:
- implicit = True
- if self.parsed_event.data.sequence_start.tag == NULL \
- or (self.parsed_event.data.sequence_start.tag[0] == c'!'
- and self.parsed_event.data.sequence_start.tag[1] == c'\0'):
- tag = self.resolve(SequenceNode, None, implicit)
- else:
- tag = PyUnicode_FromString(self.parsed_event.data.sequence_start.tag)
- flow_style = None
- if self.parsed_event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
- flow_style = True
- elif self.parsed_event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
- flow_style = False
- value = []
- node = SequenceNode(tag, value, start_mark, None, flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- yaml_event_delete(&self.parsed_event)
- index = 0
- self._parse_next_event()
- while self.parsed_event.type != YAML_SEQUENCE_END_EVENT:
- value.append(self._compose_node(node, index))
- index = index+1
- self._parse_next_event()
- node.end_mark = Mark(self.stream_name,
- self.parsed_event.end_mark.index,
- self.parsed_event.end_mark.line,
- self.parsed_event.end_mark.column,
- None, None)
- yaml_event_delete(&self.parsed_event)
- return node
-
- cdef _compose_mapping_node(self, object anchor):
- start_mark = Mark(self.stream_name,
- self.parsed_event.start_mark.index,
- self.parsed_event.start_mark.line,
- self.parsed_event.start_mark.column,
- None, None)
- implicit = False
- if self.parsed_event.data.mapping_start.implicit == 1:
- implicit = True
- if self.parsed_event.data.mapping_start.tag == NULL \
- or (self.parsed_event.data.mapping_start.tag[0] == c'!'
- and self.parsed_event.data.mapping_start.tag[1] == c'\0'):
- tag = self.resolve(MappingNode, None, implicit)
- else:
- tag = PyUnicode_FromString(self.parsed_event.data.mapping_start.tag)
- flow_style = None
- if self.parsed_event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
- flow_style = True
- elif self.parsed_event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
- flow_style = False
- value = []
- node = MappingNode(tag, value, start_mark, None, flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- yaml_event_delete(&self.parsed_event)
- self._parse_next_event()
- while self.parsed_event.type != YAML_MAPPING_END_EVENT:
- item_key = self._compose_node(node, None)
- item_value = self._compose_node(node, item_key)
- value.append((item_key, item_value))
- self._parse_next_event()
- node.end_mark = Mark(self.stream_name,
- self.parsed_event.end_mark.index,
- self.parsed_event.end_mark.line,
- self.parsed_event.end_mark.column,
- None, None)
- yaml_event_delete(&self.parsed_event)
- return node
-
- cdef int _parse_next_event(self) except 0:
- if self.parsed_event.type == YAML_NO_EVENT:
- if yaml_parser_parse(&self.parser, &self.parsed_event) == 0:
- error = self._parser_error()
- raise error
- return 1
-
+ self.parsed_event.data.scalar.length, 'strict')
+ plain_implicit = False
+ if self.parsed_event.data.scalar.plain_implicit == 1:
+ plain_implicit = True
+ quoted_implicit = False
+ if self.parsed_event.data.scalar.quoted_implicit == 1:
+ quoted_implicit = True
+ if self.parsed_event.data.scalar.tag == NULL \
+ or (self.parsed_event.data.scalar.tag[0] == c'!'
+ and self.parsed_event.data.scalar.tag[1] == c'\0'):
+ tag = self.resolve(ScalarNode, value, (plain_implicit, quoted_implicit))
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.scalar.tag)
+ style = None
+ if self.parsed_event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ style = u''
+ elif self.parsed_event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif self.parsed_event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif self.parsed_event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif self.parsed_event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ node = ScalarNode(tag, value, start_mark, end_mark, style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef _compose_sequence_node(self, object anchor):
+ cdef int index
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ implicit = False
+ if self.parsed_event.data.sequence_start.implicit == 1:
+ implicit = True
+ if self.parsed_event.data.sequence_start.tag == NULL \
+ or (self.parsed_event.data.sequence_start.tag[0] == c'!'
+ and self.parsed_event.data.sequence_start.tag[1] == c'\0'):
+ tag = self.resolve(SequenceNode, None, implicit)
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.sequence_start.tag)
+ flow_style = None
+ if self.parsed_event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
+ flow_style = True
+ elif self.parsed_event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
+ flow_style = False
+ value = []
+ node = SequenceNode(tag, value, start_mark, None, flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ index = 0
+ self._parse_next_event()
+ while self.parsed_event.type != YAML_SEQUENCE_END_EVENT:
+ value.append(self._compose_node(node, index))
+ index = index+1
+ self._parse_next_event()
+ node.end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef _compose_mapping_node(self, object anchor):
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ implicit = False
+ if self.parsed_event.data.mapping_start.implicit == 1:
+ implicit = True
+ if self.parsed_event.data.mapping_start.tag == NULL \
+ or (self.parsed_event.data.mapping_start.tag[0] == c'!'
+ and self.parsed_event.data.mapping_start.tag[1] == c'\0'):
+ tag = self.resolve(MappingNode, None, implicit)
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.mapping_start.tag)
+ flow_style = None
+ if self.parsed_event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
+ flow_style = True
+ elif self.parsed_event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
+ flow_style = False
+ value = []
+ node = MappingNode(tag, value, start_mark, None, flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ while self.parsed_event.type != YAML_MAPPING_END_EVENT:
+ item_key = self._compose_node(node, None)
+ item_value = self._compose_node(node, item_key)
+ value.append((item_key, item_value))
+ self._parse_next_event()
+ node.end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef int _parse_next_event(self) except 0:
+ if self.parsed_event.type == YAML_NO_EVENT:
+ if yaml_parser_parse(&self.parser, &self.parsed_event) == 0:
+ error = self._parser_error()
+ raise error
+ return 1
+
cdef int input_handler(void *data, char *buffer, size_t size, size_t *read) except 0:
- cdef CParser parser
- parser = <CParser>data
- if parser.stream_cache is None:
- value = parser.stream.read(size)
- if PyUnicode_CheckExact(value) != 0:
- value = PyUnicode_AsUTF8String(value)
- parser.unicode_source = 1
- if PyString_CheckExact(value) == 0:
- if PY_MAJOR_VERSION < 3:
- raise TypeError("a string value is expected")
- else:
- raise TypeError(u"a string value is expected")
- parser.stream_cache = value
- parser.stream_cache_pos = 0
- parser.stream_cache_len = PyString_GET_SIZE(value)
- if (parser.stream_cache_len - parser.stream_cache_pos) < size:
- size = parser.stream_cache_len - parser.stream_cache_pos
- if size > 0:
+ cdef CParser parser
+ parser = <CParser>data
+ if parser.stream_cache is None:
+ value = parser.stream.read(size)
+ if PyUnicode_CheckExact(value) != 0:
+ value = PyUnicode_AsUTF8String(value)
+ parser.unicode_source = 1
+ if PyString_CheckExact(value) == 0:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("a string value is expected")
+ else:
+ raise TypeError(u"a string value is expected")
+ parser.stream_cache = value
+ parser.stream_cache_pos = 0
+ parser.stream_cache_len = PyString_GET_SIZE(value)
+ if (parser.stream_cache_len - parser.stream_cache_pos) < size:
+ size = parser.stream_cache_len - parser.stream_cache_pos
+ if size > 0:
memcpy(buffer, PyString_AS_STRING(parser.stream_cache)
- + parser.stream_cache_pos, size)
- read[0] = size
- parser.stream_cache_pos += size
- if parser.stream_cache_pos == parser.stream_cache_len:
- parser.stream_cache = None
- return 1
-
-cdef class CEmitter:
-
- cdef yaml_emitter_t emitter
-
- cdef object stream
-
- cdef int document_start_implicit
- cdef int document_end_implicit
- cdef object use_version
- cdef object use_tags
-
- cdef object serialized_nodes
- cdef object anchors
- cdef int last_alias_id
- cdef int closed
- cdef int dump_unicode
- cdef object use_encoding
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- if yaml_emitter_initialize(&self.emitter) == 0:
- raise MemoryError
- self.stream = stream
- self.dump_unicode = 0
- if PY_MAJOR_VERSION < 3:
- if getattr3(stream, 'encoding', None):
- self.dump_unicode = 1
- else:
- if hasattr(stream, u'encoding'):
- self.dump_unicode = 1
- self.use_encoding = encoding
+ + parser.stream_cache_pos, size)
+ read[0] = size
+ parser.stream_cache_pos += size
+ if parser.stream_cache_pos == parser.stream_cache_len:
+ parser.stream_cache = None
+ return 1
+
+cdef class CEmitter:
+
+ cdef yaml_emitter_t emitter
+
+ cdef object stream
+
+ cdef int document_start_implicit
+ cdef int document_end_implicit
+ cdef object use_version
+ cdef object use_tags
+
+ cdef object serialized_nodes
+ cdef object anchors
+ cdef int last_alias_id
+ cdef int closed
+ cdef int dump_unicode
+ cdef object use_encoding
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ if yaml_emitter_initialize(&self.emitter) == 0:
+ raise MemoryError
+ self.stream = stream
+ self.dump_unicode = 0
+ if PY_MAJOR_VERSION < 3:
+ if getattr3(stream, 'encoding', None):
+ self.dump_unicode = 1
+ else:
+ if hasattr(stream, u'encoding'):
+ self.dump_unicode = 1
+ self.use_encoding = encoding
yaml_emitter_set_output(&self.emitter, output_handler, <void *>self)
- if canonical:
- yaml_emitter_set_canonical(&self.emitter, 1)
- if indent is not None:
- yaml_emitter_set_indent(&self.emitter, indent)
- if width is not None:
- yaml_emitter_set_width(&self.emitter, width)
- if allow_unicode:
- yaml_emitter_set_unicode(&self.emitter, 1)
- if line_break is not None:
- if line_break == '\r':
- yaml_emitter_set_break(&self.emitter, YAML_CR_BREAK)
- elif line_break == '\n':
- yaml_emitter_set_break(&self.emitter, YAML_LN_BREAK)
- elif line_break == '\r\n':
- yaml_emitter_set_break(&self.emitter, YAML_CRLN_BREAK)
- self.document_start_implicit = 1
- if explicit_start:
- self.document_start_implicit = 0
- self.document_end_implicit = 1
- if explicit_end:
- self.document_end_implicit = 0
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_alias_id = 0
- self.closed = -1
-
- def __dealloc__(self):
- yaml_emitter_delete(&self.emitter)
-
- def dispose(self):
- pass
-
- cdef object _emitter_error(self):
- if self.emitter.error == YAML_MEMORY_ERROR:
- return MemoryError
- elif self.emitter.error == YAML_EMITTER_ERROR:
- if PY_MAJOR_VERSION < 3:
- problem = self.emitter.problem
- else:
- problem = PyUnicode_FromString(self.emitter.problem)
- return EmitterError(problem)
- if PY_MAJOR_VERSION < 3:
- raise ValueError("no emitter error")
- else:
- raise ValueError(u"no emitter error")
-
- cdef int _object_to_event(self, object event_object, yaml_event_t *event) except 0:
- cdef yaml_encoding_t encoding
- cdef yaml_version_directive_t version_directive_value
- cdef yaml_version_directive_t *version_directive
- cdef yaml_tag_directive_t tag_directives_value[128]
- cdef yaml_tag_directive_t *tag_directives_start
- cdef yaml_tag_directive_t *tag_directives_end
- cdef int implicit
- cdef int plain_implicit
- cdef int quoted_implicit
- cdef char *anchor
- cdef char *tag
- cdef char *value
- cdef int length
- cdef yaml_scalar_style_t scalar_style
- cdef yaml_sequence_style_t sequence_style
- cdef yaml_mapping_style_t mapping_style
- event_class = event_object.__class__
- if event_class is StreamStartEvent:
- encoding = YAML_UTF8_ENCODING
- if event_object.encoding == u'utf-16-le' or event_object.encoding == 'utf-16-le':
- encoding = YAML_UTF16LE_ENCODING
- elif event_object.encoding == u'utf-16-be' or event_object.encoding == 'utf-16-be':
- encoding = YAML_UTF16BE_ENCODING
- if event_object.encoding is None:
- self.dump_unicode = 1
- if self.dump_unicode == 1:
- encoding = YAML_UTF8_ENCODING
- yaml_stream_start_event_initialize(event, encoding)
- elif event_class is StreamEndEvent:
- yaml_stream_end_event_initialize(event)
- elif event_class is DocumentStartEvent:
- version_directive = NULL
- if event_object.version:
- version_directive_value.major = event_object.version[0]
- version_directive_value.minor = event_object.version[1]
- version_directive = &version_directive_value
- tag_directives_start = NULL
- tag_directives_end = NULL
- if event_object.tags:
- if len(event_object.tags) > 128:
- if PY_MAJOR_VERSION < 3:
- raise ValueError("too many tags")
- else:
- raise ValueError(u"too many tags")
- tag_directives_start = tag_directives_value
- tag_directives_end = tag_directives_value
- cache = []
- for handle in event_object.tags:
- prefix = event_object.tags[handle]
- if PyUnicode_CheckExact(handle):
- handle = PyUnicode_AsUTF8String(handle)
- cache.append(handle)
- if not PyString_CheckExact(handle):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag handle must be a string")
- else:
- raise TypeError(u"tag handle must be a string")
+ if canonical:
+ yaml_emitter_set_canonical(&self.emitter, 1)
+ if indent is not None:
+ yaml_emitter_set_indent(&self.emitter, indent)
+ if width is not None:
+ yaml_emitter_set_width(&self.emitter, width)
+ if allow_unicode:
+ yaml_emitter_set_unicode(&self.emitter, 1)
+ if line_break is not None:
+ if line_break == '\r':
+ yaml_emitter_set_break(&self.emitter, YAML_CR_BREAK)
+ elif line_break == '\n':
+ yaml_emitter_set_break(&self.emitter, YAML_LN_BREAK)
+ elif line_break == '\r\n':
+ yaml_emitter_set_break(&self.emitter, YAML_CRLN_BREAK)
+ self.document_start_implicit = 1
+ if explicit_start:
+ self.document_start_implicit = 0
+ self.document_end_implicit = 1
+ if explicit_end:
+ self.document_end_implicit = 0
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_alias_id = 0
+ self.closed = -1
+
+ def __dealloc__(self):
+ yaml_emitter_delete(&self.emitter)
+
+ def dispose(self):
+ pass
+
+ cdef object _emitter_error(self):
+ if self.emitter.error == YAML_MEMORY_ERROR:
+ return MemoryError
+ elif self.emitter.error == YAML_EMITTER_ERROR:
+ if PY_MAJOR_VERSION < 3:
+ problem = self.emitter.problem
+ else:
+ problem = PyUnicode_FromString(self.emitter.problem)
+ return EmitterError(problem)
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("no emitter error")
+ else:
+ raise ValueError(u"no emitter error")
+
+ cdef int _object_to_event(self, object event_object, yaml_event_t *event) except 0:
+ cdef yaml_encoding_t encoding
+ cdef yaml_version_directive_t version_directive_value
+ cdef yaml_version_directive_t *version_directive
+ cdef yaml_tag_directive_t tag_directives_value[128]
+ cdef yaml_tag_directive_t *tag_directives_start
+ cdef yaml_tag_directive_t *tag_directives_end
+ cdef int implicit
+ cdef int plain_implicit
+ cdef int quoted_implicit
+ cdef char *anchor
+ cdef char *tag
+ cdef char *value
+ cdef int length
+ cdef yaml_scalar_style_t scalar_style
+ cdef yaml_sequence_style_t sequence_style
+ cdef yaml_mapping_style_t mapping_style
+ event_class = event_object.__class__
+ if event_class is StreamStartEvent:
+ encoding = YAML_UTF8_ENCODING
+ if event_object.encoding == u'utf-16-le' or event_object.encoding == 'utf-16-le':
+ encoding = YAML_UTF16LE_ENCODING
+ elif event_object.encoding == u'utf-16-be' or event_object.encoding == 'utf-16-be':
+ encoding = YAML_UTF16BE_ENCODING
+ if event_object.encoding is None:
+ self.dump_unicode = 1
+ if self.dump_unicode == 1:
+ encoding = YAML_UTF8_ENCODING
+ yaml_stream_start_event_initialize(event, encoding)
+ elif event_class is StreamEndEvent:
+ yaml_stream_end_event_initialize(event)
+ elif event_class is DocumentStartEvent:
+ version_directive = NULL
+ if event_object.version:
+ version_directive_value.major = event_object.version[0]
+ version_directive_value.minor = event_object.version[1]
+ version_directive = &version_directive_value
+ tag_directives_start = NULL
+ tag_directives_end = NULL
+ if event_object.tags:
+ if len(event_object.tags) > 128:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("too many tags")
+ else:
+ raise ValueError(u"too many tags")
+ tag_directives_start = tag_directives_value
+ tag_directives_end = tag_directives_value
+ cache = []
+ for handle in event_object.tags:
+ prefix = event_object.tags[handle]
+ if PyUnicode_CheckExact(handle):
+ handle = PyUnicode_AsUTF8String(handle)
+ cache.append(handle)
+ if not PyString_CheckExact(handle):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag handle must be a string")
+ else:
+ raise TypeError(u"tag handle must be a string")
tag_directives_end.handle = PyString_AS_STRING(handle)
- if PyUnicode_CheckExact(prefix):
- prefix = PyUnicode_AsUTF8String(prefix)
- cache.append(prefix)
- if not PyString_CheckExact(prefix):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag prefix must be a string")
- else:
- raise TypeError(u"tag prefix must be a string")
+ if PyUnicode_CheckExact(prefix):
+ prefix = PyUnicode_AsUTF8String(prefix)
+ cache.append(prefix)
+ if not PyString_CheckExact(prefix):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag prefix must be a string")
+ else:
+ raise TypeError(u"tag prefix must be a string")
tag_directives_end.prefix = PyString_AS_STRING(prefix)
- tag_directives_end = tag_directives_end+1
- implicit = 1
- if event_object.explicit:
- implicit = 0
- if yaml_document_start_event_initialize(event, version_directive,
- tag_directives_start, tag_directives_end, implicit) == 0:
- raise MemoryError
- elif event_class is DocumentEndEvent:
- implicit = 1
- if event_object.explicit:
- implicit = 0
- yaml_document_end_event_initialize(event, implicit)
- elif event_class is AliasEvent:
- anchor = NULL
- anchor_object = event_object.anchor
- if PyUnicode_CheckExact(anchor_object):
- anchor_object = PyUnicode_AsUTF8String(anchor_object)
- if not PyString_CheckExact(anchor_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("anchor must be a string")
- else:
- raise TypeError(u"anchor must be a string")
- anchor = PyString_AS_STRING(anchor_object)
+ tag_directives_end = tag_directives_end+1
+ implicit = 1
+ if event_object.explicit:
+ implicit = 0
+ if yaml_document_start_event_initialize(event, version_directive,
+ tag_directives_start, tag_directives_end, implicit) == 0:
+ raise MemoryError
+ elif event_class is DocumentEndEvent:
+ implicit = 1
+ if event_object.explicit:
+ implicit = 0
+ yaml_document_end_event_initialize(event, implicit)
+ elif event_class is AliasEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
if yaml_alias_event_initialize(event, anchor) == 0:
- raise MemoryError
- elif event_class is ScalarEvent:
- anchor = NULL
- anchor_object = event_object.anchor
- if anchor_object is not None:
- if PyUnicode_CheckExact(anchor_object):
- anchor_object = PyUnicode_AsUTF8String(anchor_object)
- if not PyString_CheckExact(anchor_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("anchor must be a string")
- else:
- raise TypeError(u"anchor must be a string")
- anchor = PyString_AS_STRING(anchor_object)
- tag = NULL
- tag_object = event_object.tag
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- value_object = event_object.value
- if PyUnicode_CheckExact(value_object):
- value_object = PyUnicode_AsUTF8String(value_object)
- if not PyString_CheckExact(value_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("value must be a string")
- else:
- raise TypeError(u"value must be a string")
- value = PyString_AS_STRING(value_object)
- length = PyString_GET_SIZE(value_object)
- plain_implicit = 0
- quoted_implicit = 0
- if event_object.implicit is not None:
- plain_implicit = event_object.implicit[0]
- quoted_implicit = event_object.implicit[1]
- style_object = event_object.style
- scalar_style = YAML_PLAIN_SCALAR_STYLE
- if style_object == "'" or style_object == u"'":
- scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
- elif style_object == "\"" or style_object == u"\"":
- scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
- elif style_object == "|" or style_object == u"|":
- scalar_style = YAML_LITERAL_SCALAR_STYLE
- elif style_object == ">" or style_object == u">":
- scalar_style = YAML_FOLDED_SCALAR_STYLE
+ raise MemoryError
+ elif event_class is ScalarEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ value_object = event_object.value
+ if PyUnicode_CheckExact(value_object):
+ value_object = PyUnicode_AsUTF8String(value_object)
+ if not PyString_CheckExact(value_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("value must be a string")
+ else:
+ raise TypeError(u"value must be a string")
+ value = PyString_AS_STRING(value_object)
+ length = PyString_GET_SIZE(value_object)
+ plain_implicit = 0
+ quoted_implicit = 0
+ if event_object.implicit is not None:
+ plain_implicit = event_object.implicit[0]
+ quoted_implicit = event_object.implicit[1]
+ style_object = event_object.style
+ scalar_style = YAML_PLAIN_SCALAR_STYLE
+ if style_object == "'" or style_object == u"'":
+ scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
+ elif style_object == "\"" or style_object == u"\"":
+ scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ elif style_object == "|" or style_object == u"|":
+ scalar_style = YAML_LITERAL_SCALAR_STYLE
+ elif style_object == ">" or style_object == u">":
+ scalar_style = YAML_FOLDED_SCALAR_STYLE
if yaml_scalar_event_initialize(event, anchor, tag, value, length,
- plain_implicit, quoted_implicit, scalar_style) == 0:
- raise MemoryError
- elif event_class is SequenceStartEvent:
- anchor = NULL
- anchor_object = event_object.anchor
- if anchor_object is not None:
- if PyUnicode_CheckExact(anchor_object):
- anchor_object = PyUnicode_AsUTF8String(anchor_object)
- if not PyString_CheckExact(anchor_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("anchor must be a string")
- else:
- raise TypeError(u"anchor must be a string")
- anchor = PyString_AS_STRING(anchor_object)
- tag = NULL
- tag_object = event_object.tag
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- implicit = 0
- if event_object.implicit:
- implicit = 1
- sequence_style = YAML_BLOCK_SEQUENCE_STYLE
- if event_object.flow_style:
- sequence_style = YAML_FLOW_SEQUENCE_STYLE
+ plain_implicit, quoted_implicit, scalar_style) == 0:
+ raise MemoryError
+ elif event_class is SequenceStartEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ implicit = 0
+ if event_object.implicit:
+ implicit = 1
+ sequence_style = YAML_BLOCK_SEQUENCE_STYLE
+ if event_object.flow_style:
+ sequence_style = YAML_FLOW_SEQUENCE_STYLE
if yaml_sequence_start_event_initialize(event, anchor, tag,
- implicit, sequence_style) == 0:
- raise MemoryError
- elif event_class is MappingStartEvent:
- anchor = NULL
- anchor_object = event_object.anchor
- if anchor_object is not None:
- if PyUnicode_CheckExact(anchor_object):
- anchor_object = PyUnicode_AsUTF8String(anchor_object)
- if not PyString_CheckExact(anchor_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("anchor must be a string")
- else:
- raise TypeError(u"anchor must be a string")
- anchor = PyString_AS_STRING(anchor_object)
- tag = NULL
- tag_object = event_object.tag
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- implicit = 0
- if event_object.implicit:
- implicit = 1
- mapping_style = YAML_BLOCK_MAPPING_STYLE
- if event_object.flow_style:
- mapping_style = YAML_FLOW_MAPPING_STYLE
+ implicit, sequence_style) == 0:
+ raise MemoryError
+ elif event_class is MappingStartEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ implicit = 0
+ if event_object.implicit:
+ implicit = 1
+ mapping_style = YAML_BLOCK_MAPPING_STYLE
+ if event_object.flow_style:
+ mapping_style = YAML_FLOW_MAPPING_STYLE
if yaml_mapping_start_event_initialize(event, anchor, tag,
- implicit, mapping_style) == 0:
- raise MemoryError
- elif event_class is SequenceEndEvent:
- yaml_sequence_end_event_initialize(event)
- elif event_class is MappingEndEvent:
- yaml_mapping_end_event_initialize(event)
- else:
- if PY_MAJOR_VERSION < 3:
- raise TypeError("invalid event %s" % event_object)
- else:
- raise TypeError(u"invalid event %s" % event_object)
- return 1
-
- def emit(self, event_object):
- cdef yaml_event_t event
- self._object_to_event(event_object, &event)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
-
- def open(self):
- cdef yaml_event_t event
- cdef yaml_encoding_t encoding
- if self.closed == -1:
- if self.use_encoding == u'utf-16-le' or self.use_encoding == 'utf-16-le':
- encoding = YAML_UTF16LE_ENCODING
- elif self.use_encoding == u'utf-16-be' or self.use_encoding == 'utf-16-be':
- encoding = YAML_UTF16BE_ENCODING
- else:
- encoding = YAML_UTF8_ENCODING
- if self.use_encoding is None:
- self.dump_unicode = 1
- if self.dump_unicode == 1:
- encoding = YAML_UTF8_ENCODING
- yaml_stream_start_event_initialize(&event, encoding)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- self.closed = 0
- elif self.closed == 1:
- if PY_MAJOR_VERSION < 3:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError(u"serializer is closed")
- else:
- if PY_MAJOR_VERSION < 3:
- raise SerializerError("serializer is already opened")
- else:
- raise SerializerError(u"serializer is already opened")
-
- def close(self):
- cdef yaml_event_t event
- if self.closed == -1:
- if PY_MAJOR_VERSION < 3:
- raise SerializerError("serializer is not opened")
- else:
- raise SerializerError(u"serializer is not opened")
- elif self.closed == 0:
- yaml_stream_end_event_initialize(&event)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- self.closed = 1
-
- def serialize(self, node):
- cdef yaml_event_t event
- cdef yaml_version_directive_t version_directive_value
- cdef yaml_version_directive_t *version_directive
- cdef yaml_tag_directive_t tag_directives_value[128]
- cdef yaml_tag_directive_t *tag_directives_start
- cdef yaml_tag_directive_t *tag_directives_end
- if self.closed == -1:
- if PY_MAJOR_VERSION < 3:
- raise SerializerError("serializer is not opened")
- else:
- raise SerializerError(u"serializer is not opened")
- elif self.closed == 1:
- if PY_MAJOR_VERSION < 3:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError(u"serializer is closed")
- cache = []
- version_directive = NULL
- if self.use_version:
- version_directive_value.major = self.use_version[0]
- version_directive_value.minor = self.use_version[1]
- version_directive = &version_directive_value
- tag_directives_start = NULL
- tag_directives_end = NULL
- if self.use_tags:
- if len(self.use_tags) > 128:
- if PY_MAJOR_VERSION < 3:
- raise ValueError("too many tags")
- else:
- raise ValueError(u"too many tags")
- tag_directives_start = tag_directives_value
- tag_directives_end = tag_directives_value
- for handle in self.use_tags:
- prefix = self.use_tags[handle]
- if PyUnicode_CheckExact(handle):
- handle = PyUnicode_AsUTF8String(handle)
- cache.append(handle)
- if not PyString_CheckExact(handle):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag handle must be a string")
- else:
- raise TypeError(u"tag handle must be a string")
+ implicit, mapping_style) == 0:
+ raise MemoryError
+ elif event_class is SequenceEndEvent:
+ yaml_sequence_end_event_initialize(event)
+ elif event_class is MappingEndEvent:
+ yaml_mapping_end_event_initialize(event)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("invalid event %s" % event_object)
+ else:
+ raise TypeError(u"invalid event %s" % event_object)
+ return 1
+
+ def emit(self, event_object):
+ cdef yaml_event_t event
+ self._object_to_event(event_object, &event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+
+ def open(self):
+ cdef yaml_event_t event
+ cdef yaml_encoding_t encoding
+ if self.closed == -1:
+ if self.use_encoding == u'utf-16-le' or self.use_encoding == 'utf-16-le':
+ encoding = YAML_UTF16LE_ENCODING
+ elif self.use_encoding == u'utf-16-be' or self.use_encoding == 'utf-16-be':
+ encoding = YAML_UTF16BE_ENCODING
+ else:
+ encoding = YAML_UTF8_ENCODING
+ if self.use_encoding is None:
+ self.dump_unicode = 1
+ if self.dump_unicode == 1:
+ encoding = YAML_UTF8_ENCODING
+ yaml_stream_start_event_initialize(&event, encoding)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.closed = 0
+ elif self.closed == 1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError(u"serializer is closed")
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is already opened")
+ else:
+ raise SerializerError(u"serializer is already opened")
+
+ def close(self):
+ cdef yaml_event_t event
+ if self.closed == -1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is not opened")
+ else:
+ raise SerializerError(u"serializer is not opened")
+ elif self.closed == 0:
+ yaml_stream_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.closed = 1
+
+ def serialize(self, node):
+ cdef yaml_event_t event
+ cdef yaml_version_directive_t version_directive_value
+ cdef yaml_version_directive_t *version_directive
+ cdef yaml_tag_directive_t tag_directives_value[128]
+ cdef yaml_tag_directive_t *tag_directives_start
+ cdef yaml_tag_directive_t *tag_directives_end
+ if self.closed == -1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is not opened")
+ else:
+ raise SerializerError(u"serializer is not opened")
+ elif self.closed == 1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError(u"serializer is closed")
+ cache = []
+ version_directive = NULL
+ if self.use_version:
+ version_directive_value.major = self.use_version[0]
+ version_directive_value.minor = self.use_version[1]
+ version_directive = &version_directive_value
+ tag_directives_start = NULL
+ tag_directives_end = NULL
+ if self.use_tags:
+ if len(self.use_tags) > 128:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("too many tags")
+ else:
+ raise ValueError(u"too many tags")
+ tag_directives_start = tag_directives_value
+ tag_directives_end = tag_directives_value
+ for handle in self.use_tags:
+ prefix = self.use_tags[handle]
+ if PyUnicode_CheckExact(handle):
+ handle = PyUnicode_AsUTF8String(handle)
+ cache.append(handle)
+ if not PyString_CheckExact(handle):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag handle must be a string")
+ else:
+ raise TypeError(u"tag handle must be a string")
tag_directives_end.handle = PyString_AS_STRING(handle)
- if PyUnicode_CheckExact(prefix):
- prefix = PyUnicode_AsUTF8String(prefix)
- cache.append(prefix)
- if not PyString_CheckExact(prefix):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag prefix must be a string")
- else:
- raise TypeError(u"tag prefix must be a string")
+ if PyUnicode_CheckExact(prefix):
+ prefix = PyUnicode_AsUTF8String(prefix)
+ cache.append(prefix)
+ if not PyString_CheckExact(prefix):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag prefix must be a string")
+ else:
+ raise TypeError(u"tag prefix must be a string")
tag_directives_end.prefix = PyString_AS_STRING(prefix)
- tag_directives_end = tag_directives_end+1
- if yaml_document_start_event_initialize(&event, version_directive,
- tag_directives_start, tag_directives_end,
- self.document_start_implicit) == 0:
- raise MemoryError
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- self._anchor_node(node)
- self._serialize_node(node, None, None)
- yaml_document_end_event_initialize(&event, self.document_end_implicit)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_alias_id = 0
-
- cdef int _anchor_node(self, object node) except 0:
- if node in self.anchors:
- if self.anchors[node] is None:
- self.last_alias_id = self.last_alias_id+1
- self.anchors[node] = u"id%03d" % self.last_alias_id
- else:
- self.anchors[node] = None
- node_class = node.__class__
- if node_class is SequenceNode:
- for item in node.value:
- self._anchor_node(item)
- elif node_class is MappingNode:
- for key, value in node.value:
- self._anchor_node(key)
- self._anchor_node(value)
- return 1
-
- cdef int _serialize_node(self, object node, object parent, object index) except 0:
- cdef yaml_event_t event
- cdef int implicit
- cdef int plain_implicit
- cdef int quoted_implicit
- cdef char *anchor
- cdef char *tag
- cdef char *value
- cdef int length
- cdef int item_index
- cdef yaml_scalar_style_t scalar_style
- cdef yaml_sequence_style_t sequence_style
- cdef yaml_mapping_style_t mapping_style
- anchor_object = self.anchors[node]
- anchor = NULL
- if anchor_object is not None:
- if PyUnicode_CheckExact(anchor_object):
- anchor_object = PyUnicode_AsUTF8String(anchor_object)
- if not PyString_CheckExact(anchor_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("anchor must be a string")
- else:
- raise TypeError(u"anchor must be a string")
- anchor = PyString_AS_STRING(anchor_object)
- if node in self.serialized_nodes:
+ tag_directives_end = tag_directives_end+1
+ if yaml_document_start_event_initialize(&event, version_directive,
+ tag_directives_start, tag_directives_end,
+ self.document_start_implicit) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self._anchor_node(node)
+ self._serialize_node(node, None, None)
+ yaml_document_end_event_initialize(&event, self.document_end_implicit)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_alias_id = 0
+
+ cdef int _anchor_node(self, object node) except 0:
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.last_alias_id = self.last_alias_id+1
+ self.anchors[node] = u"id%03d" % self.last_alias_id
+ else:
+ self.anchors[node] = None
+ node_class = node.__class__
+ if node_class is SequenceNode:
+ for item in node.value:
+ self._anchor_node(item)
+ elif node_class is MappingNode:
+ for key, value in node.value:
+ self._anchor_node(key)
+ self._anchor_node(value)
+ return 1
+
+ cdef int _serialize_node(self, object node, object parent, object index) except 0:
+ cdef yaml_event_t event
+ cdef int implicit
+ cdef int plain_implicit
+ cdef int quoted_implicit
+ cdef char *anchor
+ cdef char *tag
+ cdef char *value
+ cdef int length
+ cdef int item_index
+ cdef yaml_scalar_style_t scalar_style
+ cdef yaml_sequence_style_t sequence_style
+ cdef yaml_mapping_style_t mapping_style
+ anchor_object = self.anchors[node]
+ anchor = NULL
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ if node in self.serialized_nodes:
if yaml_alias_event_initialize(&event, anchor) == 0:
- raise MemoryError
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- else:
- node_class = node.__class__
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if node_class is ScalarNode:
- plain_implicit = 0
- quoted_implicit = 0
- tag_object = node.tag
- if self.resolve(ScalarNode, node.value, (True, False)) == tag_object:
- plain_implicit = 1
- if self.resolve(ScalarNode, node.value, (False, True)) == tag_object:
- quoted_implicit = 1
- tag = NULL
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- value_object = node.value
- if PyUnicode_CheckExact(value_object):
- value_object = PyUnicode_AsUTF8String(value_object)
- if not PyString_CheckExact(value_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("value must be a string")
- else:
- raise TypeError(u"value must be a string")
- value = PyString_AS_STRING(value_object)
- length = PyString_GET_SIZE(value_object)
- style_object = node.style
- scalar_style = YAML_PLAIN_SCALAR_STYLE
- if style_object == "'" or style_object == u"'":
- scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
- elif style_object == "\"" or style_object == u"\"":
- scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
- elif style_object == "|" or style_object == u"|":
- scalar_style = YAML_LITERAL_SCALAR_STYLE
- elif style_object == ">" or style_object == u">":
- scalar_style = YAML_FOLDED_SCALAR_STYLE
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ else:
+ node_class = node.__class__
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if node_class is ScalarNode:
+ plain_implicit = 0
+ quoted_implicit = 0
+ tag_object = node.tag
+ if self.resolve(ScalarNode, node.value, (True, False)) == tag_object:
+ plain_implicit = 1
+ if self.resolve(ScalarNode, node.value, (False, True)) == tag_object:
+ quoted_implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ value_object = node.value
+ if PyUnicode_CheckExact(value_object):
+ value_object = PyUnicode_AsUTF8String(value_object)
+ if not PyString_CheckExact(value_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("value must be a string")
+ else:
+ raise TypeError(u"value must be a string")
+ value = PyString_AS_STRING(value_object)
+ length = PyString_GET_SIZE(value_object)
+ style_object = node.style
+ scalar_style = YAML_PLAIN_SCALAR_STYLE
+ if style_object == "'" or style_object == u"'":
+ scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
+ elif style_object == "\"" or style_object == u"\"":
+ scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ elif style_object == "|" or style_object == u"|":
+ scalar_style = YAML_LITERAL_SCALAR_STYLE
+ elif style_object == ">" or style_object == u">":
+ scalar_style = YAML_FOLDED_SCALAR_STYLE
if yaml_scalar_event_initialize(&event, anchor, tag, value, length,
- plain_implicit, quoted_implicit, scalar_style) == 0:
- raise MemoryError
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- elif node_class is SequenceNode:
- implicit = 0
- tag_object = node.tag
- if self.resolve(SequenceNode, node.value, True) == tag_object:
- implicit = 1
- tag = NULL
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- sequence_style = YAML_BLOCK_SEQUENCE_STYLE
- if node.flow_style:
- sequence_style = YAML_FLOW_SEQUENCE_STYLE
+ plain_implicit, quoted_implicit, scalar_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ elif node_class is SequenceNode:
+ implicit = 0
+ tag_object = node.tag
+ if self.resolve(SequenceNode, node.value, True) == tag_object:
+ implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ sequence_style = YAML_BLOCK_SEQUENCE_STYLE
+ if node.flow_style:
+ sequence_style = YAML_FLOW_SEQUENCE_STYLE
if yaml_sequence_start_event_initialize(&event, anchor, tag,
- implicit, sequence_style) == 0:
- raise MemoryError
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- item_index = 0
- for item in node.value:
- self._serialize_node(item, node, item_index)
- item_index = item_index+1
- yaml_sequence_end_event_initialize(&event)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- elif node_class is MappingNode:
- implicit = 0
- tag_object = node.tag
- if self.resolve(MappingNode, node.value, True) == tag_object:
- implicit = 1
- tag = NULL
- if tag_object is not None:
- if PyUnicode_CheckExact(tag_object):
- tag_object = PyUnicode_AsUTF8String(tag_object)
- if not PyString_CheckExact(tag_object):
- if PY_MAJOR_VERSION < 3:
- raise TypeError("tag must be a string")
- else:
- raise TypeError(u"tag must be a string")
- tag = PyString_AS_STRING(tag_object)
- mapping_style = YAML_BLOCK_MAPPING_STYLE
- if node.flow_style:
- mapping_style = YAML_FLOW_MAPPING_STYLE
+ implicit, sequence_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ item_index = 0
+ for item in node.value:
+ self._serialize_node(item, node, item_index)
+ item_index = item_index+1
+ yaml_sequence_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ elif node_class is MappingNode:
+ implicit = 0
+ tag_object = node.tag
+ if self.resolve(MappingNode, node.value, True) == tag_object:
+ implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ mapping_style = YAML_BLOCK_MAPPING_STYLE
+ if node.flow_style:
+ mapping_style = YAML_FLOW_MAPPING_STYLE
if yaml_mapping_start_event_initialize(&event, anchor, tag,
- implicit, mapping_style) == 0:
- raise MemoryError
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- for item_key, item_value in node.value:
- self._serialize_node(item_key, node, None)
- self._serialize_node(item_value, node, item_key)
- yaml_mapping_end_event_initialize(&event)
- if yaml_emitter_emit(&self.emitter, &event) == 0:
- error = self._emitter_error()
- raise error
- self.ascend_resolver()
- return 1
-
+ implicit, mapping_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ for item_key, item_value in node.value:
+ self._serialize_node(item_key, node, None)
+ self._serialize_node(item_value, node, item_key)
+ yaml_mapping_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.ascend_resolver()
+ return 1
+
cdef int output_handler(void *data, char *buffer, size_t size) except 0:
- cdef CEmitter emitter
- emitter = <CEmitter>data
- if emitter.dump_unicode == 0:
+ cdef CEmitter emitter
+ emitter = <CEmitter>data
+ if emitter.dump_unicode == 0:
value = PyString_FromStringAndSize(buffer, size)
- else:
+ else:
value = PyUnicode_DecodeUTF8(buffer, size, 'strict')
- emitter.stream.write(value)
- return 1
-
+ emitter.stream.write(value)
+ return 1
+
diff --git a/contrib/python/PyYAML/ya.make b/contrib/python/PyYAML/ya.make
index ae05873a52..a604fce51f 100644
--- a/contrib/python/PyYAML/ya.make
+++ b/contrib/python/PyYAML/ya.make
@@ -1,5 +1,5 @@
PY23_LIBRARY()
-
+
LICENSE(MIT)
OWNER(g:python-contrib)
@@ -13,7 +13,7 @@ ELSE()
contrib/python/PyYAML/py3
)
ENDIF()
-
+
NO_LINT()
END()
diff --git a/contrib/python/cffi/ya.make b/contrib/python/cffi/ya.make
index 2c9e58e0b4..7bff9477f9 100644
--- a/contrib/python/cffi/ya.make
+++ b/contrib/python/cffi/ya.make
@@ -7,12 +7,12 @@ OWNER(g:python-contrib)
VERSION(1.15.0)
PEERDIR(
- contrib/restricted/libffi
+ contrib/restricted/libffi
contrib/python/pycparser
)
ADDINCL(
- contrib/restricted/libffi/include
+ contrib/restricted/libffi/include
)
NO_COMPILER_WARNINGS()
diff --git a/contrib/python/decorator/LICENSE.txt b/contrib/python/decorator/LICENSE.txt
index 9d8e5641c5..b0ade0487e 100644
--- a/contrib/python/decorator/LICENSE.txt
+++ b/contrib/python/decorator/LICENSE.txt
@@ -1,26 +1,26 @@
Copyright (c) 2005-2018, Michele Simionato
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- Redistributions in bytecode form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ Redistributions in bytecode form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
diff --git a/contrib/python/decorator/decorator.py b/contrib/python/decorator/decorator.py
index 5f595c7014..b1f8b567e9 100644
--- a/contrib/python/decorator/decorator.py
+++ b/contrib/python/decorator/decorator.py
@@ -1,64 +1,64 @@
-# ######################### LICENSE ############################ #
-
+# ######################### LICENSE ############################ #
+
# Copyright (c) 2005-2018, Michele Simionato
-# All rights reserved.
-
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-
-# Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# Redistributions in bytecode form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-# DAMAGE.
-
-"""
-Decorator module, see http://pypi.python.org/pypi/decorator
-for the documentation.
-"""
-from __future__ import print_function
-
-import re
-import sys
-import inspect
-import operator
-import itertools
-import collections
-
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+
+# Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# Redistributions in bytecode form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+
+"""
+Decorator module, see http://pypi.python.org/pypi/decorator
+for the documentation.
+"""
+from __future__ import print_function
+
+import re
+import sys
+import inspect
+import operator
+import itertools
+import collections
+
__version__ = '4.4.2'
-
+
if sys.version_info >= (3,):
- from inspect import getfullargspec
-
- def get_init(cls):
- return cls.__init__
-else:
+ from inspect import getfullargspec
+
+ def get_init(cls):
+ return cls.__init__
+else:
FullArgSpec = collections.namedtuple(
'FullArgSpec', 'args varargs varkw defaults '
'kwonlyargs kwonlydefaults annotations')
def getfullargspec(f):
- "A quick and dirty replacement for getfullargspec for Python 2.X"
+ "A quick and dirty replacement for getfullargspec for Python 2.X"
return FullArgSpec._make(inspect.getargspec(f) + ([], None, {}))
-
- def get_init(cls):
- return cls.__init__.__func__
-
+
+ def get_init(cls):
+ return cls.__init__.__func__
+
try:
iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
@@ -71,43 +71,43 @@ except ImportError:
# assume no generator function in old Python versions
def isgeneratorfunction(caller):
return False
-
-
+
+
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
-
-
-# basic functionality
-class FunctionMaker(object):
- """
- An object with the ability to create functions with a given signature.
- It has attributes name, doc, module, signature, defaults, dict and
- methods update and make.
- """
-
- # Atomic get-and-increment provided by the GIL
- _compile_count = itertools.count()
-
+
+
+# basic functionality
+class FunctionMaker(object):
+ """
+ An object with the ability to create functions with a given signature.
+ It has attributes name, doc, module, signature, defaults, dict and
+ methods update and make.
+ """
+
+ # Atomic get-and-increment provided by the GIL
+ _compile_count = itertools.count()
+
# make pylint happy
args = varargs = varkw = defaults = kwonlyargs = kwonlydefaults = ()
- def __init__(self, func=None, name=None, signature=None,
- defaults=None, doc=None, module=None, funcdict=None):
- self.shortsignature = signature
- if func:
- # func can be a class or a callable, but not an instance method
- self.name = func.__name__
- if self.name == '<lambda>': # small hack for lambda functions
- self.name = '_lambda_'
- self.doc = func.__doc__
- self.module = func.__module__
- if inspect.isfunction(func):
- argspec = getfullargspec(func)
- self.annotations = getattr(func, '__annotations__', {})
- for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
- 'kwonlydefaults'):
- setattr(self, a, getattr(argspec, a))
- for i, arg in enumerate(self.args):
- setattr(self, 'arg%d' % i, arg)
+ def __init__(self, func=None, name=None, signature=None,
+ defaults=None, doc=None, module=None, funcdict=None):
+ self.shortsignature = signature
+ if func:
+ # func can be a class or a callable, but not an instance method
+ self.name = func.__name__
+ if self.name == '<lambda>': # small hack for lambda functions
+ self.name = '_lambda_'
+ self.doc = func.__doc__
+ self.module = func.__module__
+ if inspect.isfunction(func):
+ argspec = getfullargspec(func)
+ self.annotations = getattr(func, '__annotations__', {})
+ for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
+ 'kwonlydefaults'):
+ setattr(self, a, getattr(argspec, a))
+ for i, arg in enumerate(self.args):
+ setattr(self, 'arg%d' % i, arg)
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
@@ -123,95 +123,95 @@ class FunctionMaker(object):
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
- self.dict = func.__dict__.copy()
- # func=None happens when decorating a caller
- if name:
- self.name = name
- if signature is not None:
- self.signature = signature
- if defaults:
- self.defaults = defaults
- if doc:
- self.doc = doc
- if module:
- self.module = module
- if funcdict:
- self.dict = funcdict
- # check existence required attributes
- assert hasattr(self, 'name')
- if not hasattr(self, 'signature'):
- raise TypeError('You are decorating a non function: %s' % func)
-
- def update(self, func, **kw):
- "Update the signature of func with the data in self"
- func.__name__ = self.name
- func.__doc__ = getattr(self, 'doc', None)
- func.__dict__ = getattr(self, 'dict', {})
+ self.dict = func.__dict__.copy()
+ # func=None happens when decorating a caller
+ if name:
+ self.name = name
+ if signature is not None:
+ self.signature = signature
+ if defaults:
+ self.defaults = defaults
+ if doc:
+ self.doc = doc
+ if module:
+ self.module = module
+ if funcdict:
+ self.dict = funcdict
+ # check existence required attributes
+ assert hasattr(self, 'name')
+ if not hasattr(self, 'signature'):
+ raise TypeError('You are decorating a non function: %s' % func)
+
+ def update(self, func, **kw):
+ "Update the signature of func with the data in self"
+ func.__name__ = self.name
+ func.__doc__ = getattr(self, 'doc', None)
+ func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults or None
- func.__annotations__ = getattr(self, 'annotations', None)
- try:
- frame = sys._getframe(3)
- except AttributeError: # for IronPython and similar implementations
- callermodule = '?'
- else:
- callermodule = frame.f_globals.get('__name__', '?')
- func.__module__ = getattr(self, 'module', callermodule)
- func.__dict__.update(kw)
-
- def make(self, src_templ, evaldict=None, addsource=False, **attrs):
- "Make a new function from a given template and update the signature"
- src = src_templ % vars(self) # expand name and signature
- evaldict = evaldict or {}
+ func.__annotations__ = getattr(self, 'annotations', None)
+ try:
+ frame = sys._getframe(3)
+ except AttributeError: # for IronPython and similar implementations
+ callermodule = '?'
+ else:
+ callermodule = frame.f_globals.get('__name__', '?')
+ func.__module__ = getattr(self, 'module', callermodule)
+ func.__dict__.update(kw)
+
+ def make(self, src_templ, evaldict=None, addsource=False, **attrs):
+ "Make a new function from a given template and update the signature"
+ src = src_templ % vars(self) # expand name and signature
+ evaldict = evaldict or {}
mo = DEF.search(src)
- if mo is None:
- raise SyntaxError('not a valid function template\n%s' % src)
- name = mo.group(1) # extract the function name
- names = set([name] + [arg.strip(' *') for arg in
- self.shortsignature.split(',')])
- for n in names:
- if n in ('_func_', '_call_'):
- raise NameError('%s is overridden in\n%s' % (n, src))
-
+ if mo is None:
+ raise SyntaxError('not a valid function template\n%s' % src)
+ name = mo.group(1) # extract the function name
+ names = set([name] + [arg.strip(' *') for arg in
+ self.shortsignature.split(',')])
+ for n in names:
+ if n in ('_func_', '_call_'):
+ raise NameError('%s is overridden in\n%s' % (n, src))
+
if not src.endswith('\n'): # add a newline for old Pythons
src += '\n'
- # Ensure each generated function has a unique filename for profilers
- # (such as cProfile) that depend on the tuple of (<filename>,
- # <definition line>, <function name>) being unique.
+ # Ensure each generated function has a unique filename for profilers
+ # (such as cProfile) that depend on the tuple of (<filename>,
+ # <definition line>, <function name>) being unique.
filename = '<decorator-gen-%d>' % next(self._compile_count)
- try:
- code = compile(src, filename, 'single')
- exec(code, evaldict)
+ try:
+ code = compile(src, filename, 'single')
+ exec(code, evaldict)
except Exception:
- print('Error in generated code:', file=sys.stderr)
- print(src, file=sys.stderr)
- raise
- func = evaldict[name]
- if addsource:
- attrs['__source__'] = src
- self.update(func, **attrs)
- return func
-
- @classmethod
- def create(cls, obj, body, evaldict, defaults=None,
- doc=None, module=None, addsource=True, **attrs):
- """
- Create a function from the strings name, signature and body.
- evaldict is the evaluation dictionary. If addsource is true an
- attribute __source__ is added to the result. The attributes attrs
- are added, if any.
- """
- if isinstance(obj, str): # "name(signature)"
- name, rest = obj.strip().split('(', 1)
- signature = rest[:-1] # strip a right parens
- func = None
- else: # a function
- name = None
- signature = None
- func = obj
- self = cls(func, name, signature, defaults, doc, module)
- ibody = '\n'.join(' ' + line for line in body.splitlines())
+ print('Error in generated code:', file=sys.stderr)
+ print(src, file=sys.stderr)
+ raise
+ func = evaldict[name]
+ if addsource:
+ attrs['__source__'] = src
+ self.update(func, **attrs)
+ return func
+
+ @classmethod
+ def create(cls, obj, body, evaldict, defaults=None,
+ doc=None, module=None, addsource=True, **attrs):
+ """
+ Create a function from the strings name, signature and body.
+ evaldict is the evaluation dictionary. If addsource is true an
+ attribute __source__ is added to the result. The attributes attrs
+ are added, if any.
+ """
+ if isinstance(obj, str): # "name(signature)"
+ name, rest = obj.strip().split('(', 1)
+ signature = rest[:-1] # strip a right parens
+ func = None
+ else: # a function
+ name = None
+ signature = None
+ func = obj
+ self = cls(func, name, signature, defaults, doc, module)
+ ibody = '\n'.join(' ' + line for line in body.splitlines())
caller = evaldict.get('_call_') # when called from `decorate`
if caller and iscoroutinefunction(caller):
body = ('async def %(name)s(%(signature)s):\n' + ibody).replace(
@@ -219,15 +219,15 @@ class FunctionMaker(object):
else:
body = 'def %(name)s(%(signature)s):\n' + ibody
return self.make(body, evaldict, addsource, **attrs)
-
-
+
+
def decorate(func, caller, extras=()):
- """
- decorate(func, caller) decorates a function using a caller.
+ """
+ decorate(func, caller) decorates a function using a caller.
If the caller is a generator function, the resulting function
will be a generator function.
- """
- evaldict = dict(_call_=caller, _func_=func)
+ """
+ evaldict = dict(_call_=caller, _func_=func)
es = ''
for i, extra in enumerate(extras):
ex = '_e%d_' % i
@@ -250,37 +250,37 @@ def decorate(func, caller, extras=()):
fun = FunctionMaker.create(
func, "return _call_(_func_, %s%%(shortsignature)s)" % es,
evaldict, __wrapped__=func)
- if hasattr(func, '__qualname__'):
- fun.__qualname__ = func.__qualname__
- return fun
-
-
-def decorator(caller, _func=None):
- """decorator(caller) converts a caller function into a decorator"""
- if _func is not None: # return a decorated function
- # this is obsolete behavior; you should use decorate instead
- return decorate(_func, caller)
- # else return a decorator function
+ if hasattr(func, '__qualname__'):
+ fun.__qualname__ = func.__qualname__
+ return fun
+
+
+def decorator(caller, _func=None):
+ """decorator(caller) converts a caller function into a decorator"""
+ if _func is not None: # return a decorated function
+ # this is obsolete behavior; you should use decorate instead
+ return decorate(_func, caller)
+ # else return a decorator function
defaultargs, defaults = '', ()
- if inspect.isclass(caller):
- name = caller.__name__.lower()
- doc = 'decorator(%s) converts functions/generators into ' \
- 'factories of %s objects' % (caller.__name__, caller.__name__)
- elif inspect.isfunction(caller):
- if caller.__name__ == '<lambda>':
- name = '_lambda_'
- else:
- name = caller.__name__
- doc = caller.__doc__
+ if inspect.isclass(caller):
+ name = caller.__name__.lower()
+ doc = 'decorator(%s) converts functions/generators into ' \
+ 'factories of %s objects' % (caller.__name__, caller.__name__)
+ elif inspect.isfunction(caller):
+ if caller.__name__ == '<lambda>':
+ name = '_lambda_'
+ else:
+ name = caller.__name__
+ doc = caller.__doc__
nargs = caller.__code__.co_argcount
ndefs = len(caller.__defaults__ or ())
defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs])
if defaultargs:
defaultargs += ','
defaults = caller.__defaults__
- else: # assume caller is an object with a __call__ method
- name = caller.__class__.__name__.lower()
- doc = caller.__call__.__doc__
+ else: # assume caller is an object with a __call__ method
+ name = caller.__class__.__name__.lower()
+ doc = caller.__call__.__doc__
evaldict = dict(_call=caller, _decorate_=decorate)
dec = FunctionMaker.create(
'%s(func, %s)' % (name, defaultargs),
@@ -290,165 +290,165 @@ def decorator(caller, _func=None):
if defaults:
dec.__defaults__ = (None,) + defaults
return dec
-
-
-# ####################### contextmanager ####################### #
-
-try: # Python >= 3.2
- from contextlib import _GeneratorContextManager
-except ImportError: # Python >= 2.5
- from contextlib import GeneratorContextManager as _GeneratorContextManager
-
-
-class ContextManager(_GeneratorContextManager):
- def __call__(self, func):
- """Context manager decorator"""
- return FunctionMaker.create(
- func, "with _self_: return _func_(%(shortsignature)s)",
- dict(_self_=self, _func_=func), __wrapped__=func)
-
-
-init = getfullargspec(_GeneratorContextManager.__init__)
-n_args = len(init.args)
-if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
- def __init__(self, g, *a, **k):
- return _GeneratorContextManager.__init__(self, g(*a, **k))
- ContextManager.__init__ = __init__
-elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
- pass
-elif n_args == 4: # (self, gen, args, kwds) Python 3.5
- def __init__(self, g, *a, **k):
- return _GeneratorContextManager.__init__(self, g, a, k)
- ContextManager.__init__ = __init__
-
+
+
+# ####################### contextmanager ####################### #
+
+try: # Python >= 3.2
+ from contextlib import _GeneratorContextManager
+except ImportError: # Python >= 2.5
+ from contextlib import GeneratorContextManager as _GeneratorContextManager
+
+
+class ContextManager(_GeneratorContextManager):
+ def __call__(self, func):
+ """Context manager decorator"""
+ return FunctionMaker.create(
+ func, "with _self_: return _func_(%(shortsignature)s)",
+ dict(_self_=self, _func_=func), __wrapped__=func)
+
+
+init = getfullargspec(_GeneratorContextManager.__init__)
+n_args = len(init.args)
+if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
+ def __init__(self, g, *a, **k):
+ return _GeneratorContextManager.__init__(self, g(*a, **k))
+ ContextManager.__init__ = __init__
+elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
+ pass
+elif n_args == 4: # (self, gen, args, kwds) Python 3.5
+ def __init__(self, g, *a, **k):
+ return _GeneratorContextManager.__init__(self, g, a, k)
+ ContextManager.__init__ = __init__
+
_contextmanager = decorator(ContextManager)
-
-
+
+
def contextmanager(func):
# Enable Pylint config: contextmanager-decorators=decorator.contextmanager
return _contextmanager(func)
-# ############################ dispatch_on ############################ #
-
-def append(a, vancestors):
- """
- Append ``a`` to the list of the virtual ancestors, unless it is already
- included.
- """
- add = True
- for j, va in enumerate(vancestors):
- if issubclass(va, a):
- add = False
- break
- if issubclass(a, va):
- vancestors[j] = a
- add = False
- if add:
- vancestors.append(a)
-
-
-# inspired from simplegeneric by P.J. Eby and functools.singledispatch
-def dispatch_on(*dispatch_args):
- """
- Factory of decorators turning a function into a generic function
- dispatching on the given arguments.
- """
- assert dispatch_args, 'No dispatch args passed'
- dispatch_str = '(%s,)' % ', '.join(dispatch_args)
-
- def check(arguments, wrong=operator.ne, msg=''):
- """Make sure one passes the expected number of arguments"""
- if wrong(len(arguments), len(dispatch_args)):
- raise TypeError('Expected %d arguments, got %d%s' %
- (len(dispatch_args), len(arguments), msg))
-
- def gen_func_dec(func):
- """Decorator turning a function into a generic function"""
-
- # first check the dispatch arguments
- argset = set(getfullargspec(func).args)
- if not set(dispatch_args) <= argset:
- raise NameError('Unknown dispatch arguments %s' % dispatch_str)
-
- typemap = {}
-
- def vancestors(*types):
- """
- Get a list of sets of virtual ancestors for the given types
- """
- check(types)
- ras = [[] for _ in range(len(dispatch_args))]
- for types_ in typemap:
- for t, type_, ra in zip(types, types_, ras):
+# ############################ dispatch_on ############################ #
+
+def append(a, vancestors):
+ """
+ Append ``a`` to the list of the virtual ancestors, unless it is already
+ included.
+ """
+ add = True
+ for j, va in enumerate(vancestors):
+ if issubclass(va, a):
+ add = False
+ break
+ if issubclass(a, va):
+ vancestors[j] = a
+ add = False
+ if add:
+ vancestors.append(a)
+
+
+# inspired from simplegeneric by P.J. Eby and functools.singledispatch
+def dispatch_on(*dispatch_args):
+ """
+ Factory of decorators turning a function into a generic function
+ dispatching on the given arguments.
+ """
+ assert dispatch_args, 'No dispatch args passed'
+ dispatch_str = '(%s,)' % ', '.join(dispatch_args)
+
+ def check(arguments, wrong=operator.ne, msg=''):
+ """Make sure one passes the expected number of arguments"""
+ if wrong(len(arguments), len(dispatch_args)):
+ raise TypeError('Expected %d arguments, got %d%s' %
+ (len(dispatch_args), len(arguments), msg))
+
+ def gen_func_dec(func):
+ """Decorator turning a function into a generic function"""
+
+ # first check the dispatch arguments
+ argset = set(getfullargspec(func).args)
+ if not set(dispatch_args) <= argset:
+ raise NameError('Unknown dispatch arguments %s' % dispatch_str)
+
+ typemap = {}
+
+ def vancestors(*types):
+ """
+ Get a list of sets of virtual ancestors for the given types
+ """
+ check(types)
+ ras = [[] for _ in range(len(dispatch_args))]
+ for types_ in typemap:
+ for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.mro():
- append(type_, ra)
- return [set(ra) for ra in ras]
-
- def ancestors(*types):
- """
- Get a list of virtual MROs, one for each type
- """
- check(types)
- lists = []
- for t, vas in zip(types, vancestors(*types)):
- n_vas = len(vas)
- if n_vas > 1:
- raise RuntimeError(
- 'Ambiguous dispatch for %s: %s' % (t, vas))
- elif n_vas == 1:
- va, = vas
+ append(type_, ra)
+ return [set(ra) for ra in ras]
+
+ def ancestors(*types):
+ """
+ Get a list of virtual MROs, one for each type
+ """
+ check(types)
+ lists = []
+ for t, vas in zip(types, vancestors(*types)):
+ n_vas = len(vas)
+ if n_vas > 1:
+ raise RuntimeError(
+ 'Ambiguous dispatch for %s: %s' % (t, vas))
+ elif n_vas == 1:
+ va, = vas
mro = type('t', (t, va), {}).mro()[1:]
- else:
+ else:
mro = t.mro()
- lists.append(mro[:-1]) # discard t and object
- return lists
-
- def register(*types):
- """
- Decorator to register an implementation for the given types
- """
- check(types)
-
- def dec(f):
- check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
- typemap[types] = f
- return f
- return dec
-
- def dispatch_info(*types):
- """
- An utility to introspect the dispatch algorithm
- """
- check(types)
- lst = []
- for anc in itertools.product(*ancestors(*types)):
- lst.append(tuple(a.__name__ for a in anc))
- return lst
-
- def _dispatch(dispatch_args, *args, **kw):
- types = tuple(type(arg) for arg in dispatch_args)
- try: # fast path
- f = typemap[types]
- except KeyError:
- pass
- else:
- return f(*args, **kw)
- combinations = itertools.product(*ancestors(*types))
- next(combinations) # the first one has been already tried
- for types_ in combinations:
- f = typemap.get(types_)
- if f is not None:
- return f(*args, **kw)
-
- # else call the default implementation
- return func(*args, **kw)
-
- return FunctionMaker.create(
- func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
- dict(_f_=_dispatch), register=register, default=func,
- typemap=typemap, vancestors=vancestors, ancestors=ancestors,
- dispatch_info=dispatch_info, __wrapped__=func)
-
- gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
- return gen_func_dec
+ lists.append(mro[:-1]) # discard t and object
+ return lists
+
+ def register(*types):
+ """
+ Decorator to register an implementation for the given types
+ """
+ check(types)
+
+ def dec(f):
+ check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
+ typemap[types] = f
+ return f
+ return dec
+
+ def dispatch_info(*types):
+ """
+ An utility to introspect the dispatch algorithm
+ """
+ check(types)
+ lst = []
+ for anc in itertools.product(*ancestors(*types)):
+ lst.append(tuple(a.__name__ for a in anc))
+ return lst
+
+ def _dispatch(dispatch_args, *args, **kw):
+ types = tuple(type(arg) for arg in dispatch_args)
+ try: # fast path
+ f = typemap[types]
+ except KeyError:
+ pass
+ else:
+ return f(*args, **kw)
+ combinations = itertools.product(*ancestors(*types))
+ next(combinations) # the first one has been already tried
+ for types_ in combinations:
+ f = typemap.get(types_)
+ if f is not None:
+ return f(*args, **kw)
+
+ # else call the default implementation
+ return func(*args, **kw)
+
+ return FunctionMaker.create(
+ func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
+ dict(_f_=_dispatch), register=register, default=func,
+ typemap=typemap, vancestors=vancestors, ancestors=ancestors,
+ dispatch_info=dispatch_info, __wrapped__=func)
+
+ gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
+ return gen_func_dec
diff --git a/contrib/python/decorator/ya.make b/contrib/python/decorator/ya.make
index 6eccf23147..fbb7ebf626 100644
--- a/contrib/python/decorator/ya.make
+++ b/contrib/python/decorator/ya.make
@@ -3,23 +3,23 @@ PY23_LIBRARY()
LICENSE(BSD-3-Clause)
OWNER(g:python-contrib borman)
-
+
VERSION(4.4.2)
NO_LINT()
-PY_SRCS(
- TOP_LEVEL
- decorator.py
-)
-
+PY_SRCS(
+ TOP_LEVEL
+ decorator.py
+)
+
RESOURCE_FILES(
PREFIX contrib/python/decorator/
.dist-info/METADATA
.dist-info/top_level.txt
)
-END()
+END()
RECURSE_FOR_TESTS(
tests
diff --git a/contrib/python/ipython/py2/COPYING.rst b/contrib/python/ipython/py2/COPYING.rst
index 2a973f9dcb..59674acdc8 100644
--- a/contrib/python/ipython/py2/COPYING.rst
+++ b/contrib/python/ipython/py2/COPYING.rst
@@ -1,74 +1,74 @@
-=============================
- The IPython licensing terms
-=============================
-
-IPython is licensed under the terms of the Modified BSD License (also known as
-New or Revised or 3-Clause BSD), as follows:
-
-- Copyright (c) 2008-2014, IPython Development Team
-- Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
-- Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
-- Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-Redistributions of source code must retain the above copyright notice, this
-list of conditions and the following disclaimer.
-
-Redistributions in binary form must reproduce the above copyright notice, this
-list of conditions and the following disclaimer in the documentation and/or
-other materials provided with the distribution.
-
-Neither the name of the IPython Development Team nor the names of its
-contributors may be used to endorse or promote products derived from this
-software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-About the IPython Development Team
-----------------------------------
-
-Fernando Perez began IPython in 2001 based on code from Janko Hauser
-<jhauser@zscout.de> and Nathaniel Gray <n8gray@caltech.edu>. Fernando is still
-the project lead.
-
-The IPython Development Team is the set of all contributors to the IPython
-project. This includes all of the IPython subprojects. A full list with
-details is kept in the documentation directory, in the file
-``about/credits.txt``.
-
-The core team that coordinates development on GitHub can be found here:
-https://github.com/ipython/.
-
-Our Copyright Policy
---------------------
-
-IPython uses a shared copyright model. Each contributor maintains copyright
-over their contributions to IPython. But, it is important to note that these
-contributions are typically only changes to the repositories. Thus, the IPython
-source code, in its entirety is not the copyright of any single person or
-institution. Instead, it is the collective copyright of the entire IPython
-Development Team. If individual contributors want to maintain a record of what
-changes/contributions they have specific copyright on, they should indicate
-their copyright in the commit message of the change, when they commit the
-change to one of the IPython repositories.
-
-With this in mind, the following banner should be used in any source code file
-to indicate the copyright and license terms:
-
-::
-
- # Copyright (c) IPython Development Team.
- # Distributed under the terms of the Modified BSD License.
+=============================
+ The IPython licensing terms
+=============================
+
+IPython is licensed under the terms of the Modified BSD License (also known as
+New or Revised or 3-Clause BSD), as follows:
+
+- Copyright (c) 2008-2014, IPython Development Team
+- Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
+- Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
+- Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+Neither the name of the IPython Development Team nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+About the IPython Development Team
+----------------------------------
+
+Fernando Perez began IPython in 2001 based on code from Janko Hauser
+<jhauser@zscout.de> and Nathaniel Gray <n8gray@caltech.edu>. Fernando is still
+the project lead.
+
+The IPython Development Team is the set of all contributors to the IPython
+project. This includes all of the IPython subprojects. A full list with
+details is kept in the documentation directory, in the file
+``about/credits.txt``.
+
+The core team that coordinates development on GitHub can be found here:
+https://github.com/ipython/.
+
+Our Copyright Policy
+--------------------
+
+IPython uses a shared copyright model. Each contributor maintains copyright
+over their contributions to IPython. But, it is important to note that these
+contributions are typically only changes to the repositories. Thus, the IPython
+source code, in its entirety is not the copyright of any single person or
+institution. Instead, it is the collective copyright of the entire IPython
+Development Team. If individual contributors want to maintain a record of what
+changes/contributions they have specific copyright on, they should indicate
+their copyright in the commit message of the change, when they commit the
+change to one of the IPython repositories.
+
+With this in mind, the following banner should be used in any source code file
+to indicate the copyright and license terms:
+
+::
+
+ # Copyright (c) IPython Development Team.
+ # Distributed under the terms of the Modified BSD License.
diff --git a/contrib/python/ipython/py2/IPython/__init__.py b/contrib/python/ipython/py2/IPython/__init__.py
index aa5121da24..9b450da6a0 100644
--- a/contrib/python/ipython/py2/IPython/__init__.py
+++ b/contrib/python/ipython/py2/IPython/__init__.py
@@ -1,146 +1,146 @@
-# encoding: utf-8
-"""
-IPython: tools for interactive and parallel computing in Python.
-
-http://ipython.org
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2008-2011, IPython Development Team.
-# Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
-# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
-# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-from __future__ import absolute_import
-
-import os
-import sys
-import warnings
-
-#-----------------------------------------------------------------------------
-# Setup everything
-#-----------------------------------------------------------------------------
-
-# Don't forget to also update setup.py when this changes!
-v = sys.version_info
-if v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):
- raise ImportError('IPython requires Python version 2.7 or 3.3 or above.')
-del v
-
-# Make it easy to import extensions - they are always directly on pythonpath.
-# Therefore, non-IPython modules can be added to extensions directory.
-# This should probably be in ipapp.py.
-sys.path.append(os.path.join(os.path.dirname(__file__), "extensions"))
-
-#-----------------------------------------------------------------------------
-# Setup the top level names
-#-----------------------------------------------------------------------------
-
-from .core.getipython import get_ipython
-from .core import release
-from .core.application import Application
-from .terminal.embed import embed
-
-from .core.interactiveshell import InteractiveShell
-from .testing import test
-from .utils.sysinfo import sys_info
-from .utils.frame import extract_module_locals
-
-# Release data
-__author__ = '%s <%s>' % (release.author, release.author_email)
-__license__ = release.license
-__version__ = release.version
-version_info = release.version_info
-
-def embed_kernel(module=None, local_ns=None, **kwargs):
- """Embed and start an IPython kernel in a given scope.
-
- If you don't want the kernel to initialize the namespace
- from the scope of the surrounding function,
- and/or you want to load full IPython configuration,
- you probably want `IPython.start_kernel()` instead.
-
- Parameters
- ----------
- module : ModuleType, optional
- The module to load into IPython globals (default: caller)
- local_ns : dict, optional
- The namespace to load into IPython user namespace (default: caller)
-
- kwargs : various, optional
- Further keyword args are relayed to the IPKernelApp constructor,
- allowing configuration of the Kernel. Will only have an effect
- on the first embed_kernel call for a given process.
- """
-
- (caller_module, caller_locals) = extract_module_locals(1)
- if module is None:
- module = caller_module
- if local_ns is None:
- local_ns = caller_locals
-
- # Only import .zmq when we really need it
- from ipykernel.embed import embed_kernel as real_embed_kernel
- real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
-
-def start_ipython(argv=None, **kwargs):
- """Launch a normal IPython instance (as opposed to embedded)
-
- `IPython.embed()` puts a shell in a particular calling scope,
- such as a function or method for debugging purposes,
- which is often not desirable.
-
- `start_ipython()` does full, regular IPython initialization,
- including loading startup files, configuration, etc.
- much of which is skipped by `embed()`.
-
- This is a public API method, and will survive implementation changes.
-
- Parameters
- ----------
-
- argv : list or None, optional
- If unspecified or None, IPython will parse command-line options from sys.argv.
- To prevent any command-line parsing, pass an empty list: `argv=[]`.
- user_ns : dict, optional
- specify this dictionary to initialize the IPython user namespace with particular values.
- kwargs : various, optional
- Any other kwargs will be passed to the Application constructor,
- such as `config`.
- """
- from IPython.terminal.ipapp import launch_new_instance
- return launch_new_instance(argv=argv, **kwargs)
-
-def start_kernel(argv=None, **kwargs):
- """Launch a normal IPython kernel instance (as opposed to embedded)
-
- `IPython.embed_kernel()` puts a shell in a particular calling scope,
- such as a function or method for debugging purposes,
- which is often not desirable.
-
- `start_kernel()` does full, regular IPython initialization,
- including loading startup files, configuration, etc.
- much of which is skipped by `embed()`.
-
- Parameters
- ----------
-
- argv : list or None, optional
- If unspecified or None, IPython will parse command-line options from sys.argv.
- To prevent any command-line parsing, pass an empty list: `argv=[]`.
- user_ns : dict, optional
- specify this dictionary to initialize the IPython user namespace with particular values.
- kwargs : various, optional
- Any other kwargs will be passed to the Application constructor,
- such as `config`.
- """
- from IPython.kernel.zmq.kernelapp import launch_new_instance
- return launch_new_instance(argv=argv, **kwargs)
-
+# encoding: utf-8
+"""
+IPython: tools for interactive and parallel computing in Python.
+
+http://ipython.org
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2008-2011, IPython Development Team.
+# Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
+# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
+# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+from __future__ import absolute_import
+
+import os
+import sys
+import warnings
+
+#-----------------------------------------------------------------------------
+# Setup everything
+#-----------------------------------------------------------------------------
+
+# Don't forget to also update setup.py when this changes!
+v = sys.version_info
+if v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):
+ raise ImportError('IPython requires Python version 2.7 or 3.3 or above.')
+del v
+
+# Make it easy to import extensions - they are always directly on pythonpath.
+# Therefore, non-IPython modules can be added to extensions directory.
+# This should probably be in ipapp.py.
+sys.path.append(os.path.join(os.path.dirname(__file__), "extensions"))
+
+#-----------------------------------------------------------------------------
+# Setup the top level names
+#-----------------------------------------------------------------------------
+
+from .core.getipython import get_ipython
+from .core import release
+from .core.application import Application
+from .terminal.embed import embed
+
+from .core.interactiveshell import InteractiveShell
+from .testing import test
+from .utils.sysinfo import sys_info
+from .utils.frame import extract_module_locals
+
+# Release data
+__author__ = '%s <%s>' % (release.author, release.author_email)
+__license__ = release.license
+__version__ = release.version
+version_info = release.version_info
+
+def embed_kernel(module=None, local_ns=None, **kwargs):
+ """Embed and start an IPython kernel in a given scope.
+
+ If you don't want the kernel to initialize the namespace
+ from the scope of the surrounding function,
+ and/or you want to load full IPython configuration,
+ you probably want `IPython.start_kernel()` instead.
+
+ Parameters
+ ----------
+ module : ModuleType, optional
+ The module to load into IPython globals (default: caller)
+ local_ns : dict, optional
+ The namespace to load into IPython user namespace (default: caller)
+
+ kwargs : various, optional
+ Further keyword args are relayed to the IPKernelApp constructor,
+ allowing configuration of the Kernel. Will only have an effect
+ on the first embed_kernel call for a given process.
+ """
+
+ (caller_module, caller_locals) = extract_module_locals(1)
+ if module is None:
+ module = caller_module
+ if local_ns is None:
+ local_ns = caller_locals
+
+ # Only import .zmq when we really need it
+ from ipykernel.embed import embed_kernel as real_embed_kernel
+ real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
+
+def start_ipython(argv=None, **kwargs):
+ """Launch a normal IPython instance (as opposed to embedded)
+
+ `IPython.embed()` puts a shell in a particular calling scope,
+ such as a function or method for debugging purposes,
+ which is often not desirable.
+
+ `start_ipython()` does full, regular IPython initialization,
+ including loading startup files, configuration, etc.
+ much of which is skipped by `embed()`.
+
+ This is a public API method, and will survive implementation changes.
+
+ Parameters
+ ----------
+
+ argv : list or None, optional
+ If unspecified or None, IPython will parse command-line options from sys.argv.
+ To prevent any command-line parsing, pass an empty list: `argv=[]`.
+ user_ns : dict, optional
+ specify this dictionary to initialize the IPython user namespace with particular values.
+ kwargs : various, optional
+ Any other kwargs will be passed to the Application constructor,
+ such as `config`.
+ """
+ from IPython.terminal.ipapp import launch_new_instance
+ return launch_new_instance(argv=argv, **kwargs)
+
+def start_kernel(argv=None, **kwargs):
+ """Launch a normal IPython kernel instance (as opposed to embedded)
+
+ `IPython.embed_kernel()` puts a shell in a particular calling scope,
+ such as a function or method for debugging purposes,
+ which is often not desirable.
+
+ `start_kernel()` does full, regular IPython initialization,
+ including loading startup files, configuration, etc.
+ much of which is skipped by `embed()`.
+
+ Parameters
+ ----------
+
+ argv : list or None, optional
+ If unspecified or None, IPython will parse command-line options from sys.argv.
+ To prevent any command-line parsing, pass an empty list: `argv=[]`.
+ user_ns : dict, optional
+ specify this dictionary to initialize the IPython user namespace with particular values.
+ kwargs : various, optional
+ Any other kwargs will be passed to the Application constructor,
+ such as `config`.
+ """
+ from IPython.kernel.zmq.kernelapp import launch_new_instance
+ return launch_new_instance(argv=argv, **kwargs)
+
diff --git a/contrib/python/ipython/py2/IPython/__main__.py b/contrib/python/ipython/py2/IPython/__main__.py
index 2e142249b5..d5123f33a2 100644
--- a/contrib/python/ipython/py2/IPython/__main__.py
+++ b/contrib/python/ipython/py2/IPython/__main__.py
@@ -1,14 +1,14 @@
-# encoding: utf-8
-"""Terminal-based IPython entry point.
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012, IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from IPython import start_ipython
-
-start_ipython()
+# encoding: utf-8
+"""Terminal-based IPython entry point.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012, IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from IPython import start_ipython
+
+start_ipython()
diff --git a/contrib/python/ipython/py2/IPython/config.py b/contrib/python/ipython/py2/IPython/config.py
index ac8d0aa4bd..cf2bacafad 100644
--- a/contrib/python/ipython/py2/IPython/config.py
+++ b/contrib/python/ipython/py2/IPython/config.py
@@ -1,19 +1,19 @@
-"""
-Shim to maintain backwards compatibility with old IPython.config imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-from warnings import warn
-
-from IPython.utils.shimmodule import ShimModule, ShimWarning
-
+"""
+Shim to maintain backwards compatibility with old IPython.config imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from warnings import warn
+
+from IPython.utils.shimmodule import ShimModule, ShimWarning
+
warn("The `IPython.config` package has been deprecated since IPython 4.0. "
- "You should import from traitlets.config instead.", ShimWarning)
-
-
-# Unconditionally insert the shim into sys.modules so that further import calls
-# trigger the custom attribute access above
-
-sys.modules['IPython.config'] = ShimModule(src='IPython.config', mirror='traitlets.config')
+ "You should import from traitlets.config instead.", ShimWarning)
+
+
+# Unconditionally insert the shim into sys.modules so that further import calls
+# trigger the custom attribute access above
+
+sys.modules['IPython.config'] = ShimModule(src='IPython.config', mirror='traitlets.config')
diff --git a/contrib/python/ipython/py2/IPython/consoleapp.py b/contrib/python/ipython/py2/IPython/consoleapp.py
index e2ffbbf664..14903bdc74 100644
--- a/contrib/python/ipython/py2/IPython/consoleapp.py
+++ b/contrib/python/ipython/py2/IPython/consoleapp.py
@@ -1,12 +1,12 @@
-"""
-Shim to maintain backwards compatibility with old IPython.consoleapp imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from warnings import warn
-
-warn("The `IPython.consoleapp` package has been deprecated. "
- "You should import from jupyter_client.consoleapp instead.")
-
-from jupyter_client.consoleapp import *
+"""
+Shim to maintain backwards compatibility with old IPython.consoleapp imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from warnings import warn
+
+warn("The `IPython.consoleapp` package has been deprecated. "
+ "You should import from jupyter_client.consoleapp instead.")
+
+from jupyter_client.consoleapp import *
diff --git a/contrib/python/ipython/py2/IPython/core/alias.py b/contrib/python/ipython/py2/IPython/core/alias.py
index 66ba986b40..28a9ccb00d 100644
--- a/contrib/python/ipython/py2/IPython/core/alias.py
+++ b/contrib/python/ipython/py2/IPython/core/alias.py
@@ -1,257 +1,257 @@
-# encoding: utf-8
-"""
-System command aliases.
-
-Authors:
-
-* Fernando Perez
-* Brian Granger
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import os
-import re
-import sys
-
-from traitlets.config.configurable import Configurable
-from IPython.core.error import UsageError
-
-from IPython.utils.py3compat import string_types
-from traitlets import List, Instance
+# encoding: utf-8
+"""
+System command aliases.
+
+Authors:
+
+* Fernando Perez
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+import re
+import sys
+
+from traitlets.config.configurable import Configurable
+from IPython.core.error import UsageError
+
+from IPython.utils.py3compat import string_types
+from traitlets import List, Instance
from logging import error
-
-#-----------------------------------------------------------------------------
-# Utilities
-#-----------------------------------------------------------------------------
-
-# This is used as the pattern for calls to split_user_input.
-shell_line_split = re.compile(r'^(\s*)()(\S+)(.*$)')
-
-def default_aliases():
- """Return list of shell aliases to auto-define.
- """
- # Note: the aliases defined here should be safe to use on a kernel
- # regardless of what frontend it is attached to. Frontends that use a
- # kernel in-process can define additional aliases that will only work in
- # their case. For example, things like 'less' or 'clear' that manipulate
- # the terminal should NOT be declared here, as they will only work if the
- # kernel is running inside a true terminal, and not over the network.
-
- if os.name == 'posix':
- default_aliases = [('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
- ('mv', 'mv'), ('rm', 'rm'), ('cp', 'cp'),
- ('cat', 'cat'),
- ]
- # Useful set of ls aliases. The GNU and BSD options are a little
- # different, so we make aliases that provide as similar as possible
- # behavior in ipython, by passing the right flags for each platform
- if sys.platform.startswith('linux'):
- ls_aliases = [('ls', 'ls -F --color'),
- # long ls
- ('ll', 'ls -F -o --color'),
- # ls normal files only
- ('lf', 'ls -F -o --color %l | grep ^-'),
- # ls symbolic links
- ('lk', 'ls -F -o --color %l | grep ^l'),
- # directories or links to directories,
- ('ldir', 'ls -F -o --color %l | grep /$'),
- # things which are executable
- ('lx', 'ls -F -o --color %l | grep ^-..x'),
- ]
- elif sys.platform.startswith('openbsd') or sys.platform.startswith('netbsd'):
- # OpenBSD, NetBSD. The ls implementation on these platforms do not support
- # the -G switch and lack the ability to use colorized output.
- ls_aliases = [('ls', 'ls -F'),
- # long ls
- ('ll', 'ls -F -l'),
- # ls normal files only
- ('lf', 'ls -F -l %l | grep ^-'),
- # ls symbolic links
- ('lk', 'ls -F -l %l | grep ^l'),
- # directories or links to directories,
- ('ldir', 'ls -F -l %l | grep /$'),
- # things which are executable
- ('lx', 'ls -F -l %l | grep ^-..x'),
- ]
- else:
- # BSD, OSX, etc.
- ls_aliases = [('ls', 'ls -F -G'),
- # long ls
- ('ll', 'ls -F -l -G'),
- # ls normal files only
- ('lf', 'ls -F -l -G %l | grep ^-'),
- # ls symbolic links
- ('lk', 'ls -F -l -G %l | grep ^l'),
- # directories or links to directories,
- ('ldir', 'ls -F -G -l %l | grep /$'),
- # things which are executable
- ('lx', 'ls -F -l -G %l | grep ^-..x'),
- ]
- default_aliases = default_aliases + ls_aliases
- elif os.name in ['nt', 'dos']:
- default_aliases = [('ls', 'dir /on'),
- ('ddir', 'dir /ad /on'), ('ldir', 'dir /ad /on'),
- ('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
- ('echo', 'echo'), ('ren', 'ren'), ('copy', 'copy'),
- ]
- else:
- default_aliases = []
-
- return default_aliases
-
-
-class AliasError(Exception):
- pass
-
-
-class InvalidAliasError(AliasError):
- pass
-
-class Alias(object):
- """Callable object storing the details of one alias.
-
- Instances are registered as magic functions to allow use of aliases.
- """
-
- # Prepare blacklist
- blacklist = {'cd','popd','pushd','dhist','alias','unalias'}
-
- def __init__(self, shell, name, cmd):
- self.shell = shell
- self.name = name
- self.cmd = cmd
- self.__doc__ = "Alias for `!{}`".format(cmd)
- self.nargs = self.validate()
-
- def validate(self):
- """Validate the alias, and return the number of arguments."""
- if self.name in self.blacklist:
- raise InvalidAliasError("The name %s can't be aliased "
- "because it is a keyword or builtin." % self.name)
- try:
- caller = self.shell.magics_manager.magics['line'][self.name]
- except KeyError:
- pass
- else:
- if not isinstance(caller, Alias):
- raise InvalidAliasError("The name %s can't be aliased "
- "because it is another magic command." % self.name)
-
- if not (isinstance(self.cmd, string_types)):
- raise InvalidAliasError("An alias command must be a string, "
- "got: %r" % self.cmd)
-
- nargs = self.cmd.count('%s') - self.cmd.count('%%s')
-
- if (nargs > 0) and (self.cmd.find('%l') >= 0):
- raise InvalidAliasError('The %s and %l specifiers are mutually '
- 'exclusive in alias definitions.')
-
- return nargs
-
- def __repr__(self):
- return "<alias {} for {!r}>".format(self.name, self.cmd)
-
- def __call__(self, rest=''):
- cmd = self.cmd
- nargs = self.nargs
- # Expand the %l special to be the user's input line
- if cmd.find('%l') >= 0:
- cmd = cmd.replace('%l', rest)
- rest = ''
-
- if nargs==0:
- if cmd.find('%%s') >= 1:
- cmd = cmd.replace('%%s', '%s')
- # Simple, argument-less aliases
- cmd = '%s %s' % (cmd, rest)
- else:
- # Handle aliases with positional arguments
- args = rest.split(None, nargs)
- if len(args) < nargs:
- raise UsageError('Alias <%s> requires %s arguments, %s given.' %
- (self.name, nargs, len(args)))
- cmd = '%s %s' % (cmd % tuple(args[:nargs]),' '.join(args[nargs:]))
-
- self.shell.system(cmd)
-
-#-----------------------------------------------------------------------------
-# Main AliasManager class
-#-----------------------------------------------------------------------------
-
-class AliasManager(Configurable):
-
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
+# This is used as the pattern for calls to split_user_input.
+shell_line_split = re.compile(r'^(\s*)()(\S+)(.*$)')
+
+def default_aliases():
+ """Return list of shell aliases to auto-define.
+ """
+ # Note: the aliases defined here should be safe to use on a kernel
+ # regardless of what frontend it is attached to. Frontends that use a
+ # kernel in-process can define additional aliases that will only work in
+ # their case. For example, things like 'less' or 'clear' that manipulate
+ # the terminal should NOT be declared here, as they will only work if the
+ # kernel is running inside a true terminal, and not over the network.
+
+ if os.name == 'posix':
+ default_aliases = [('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
+ ('mv', 'mv'), ('rm', 'rm'), ('cp', 'cp'),
+ ('cat', 'cat'),
+ ]
+ # Useful set of ls aliases. The GNU and BSD options are a little
+ # different, so we make aliases that provide as similar as possible
+ # behavior in ipython, by passing the right flags for each platform
+ if sys.platform.startswith('linux'):
+ ls_aliases = [('ls', 'ls -F --color'),
+ # long ls
+ ('ll', 'ls -F -o --color'),
+ # ls normal files only
+ ('lf', 'ls -F -o --color %l | grep ^-'),
+ # ls symbolic links
+ ('lk', 'ls -F -o --color %l | grep ^l'),
+ # directories or links to directories,
+ ('ldir', 'ls -F -o --color %l | grep /$'),
+ # things which are executable
+ ('lx', 'ls -F -o --color %l | grep ^-..x'),
+ ]
+ elif sys.platform.startswith('openbsd') or sys.platform.startswith('netbsd'):
+ # OpenBSD, NetBSD. The ls implementation on these platforms do not support
+ # the -G switch and lack the ability to use colorized output.
+ ls_aliases = [('ls', 'ls -F'),
+ # long ls
+ ('ll', 'ls -F -l'),
+ # ls normal files only
+ ('lf', 'ls -F -l %l | grep ^-'),
+ # ls symbolic links
+ ('lk', 'ls -F -l %l | grep ^l'),
+ # directories or links to directories,
+ ('ldir', 'ls -F -l %l | grep /$'),
+ # things which are executable
+ ('lx', 'ls -F -l %l | grep ^-..x'),
+ ]
+ else:
+ # BSD, OSX, etc.
+ ls_aliases = [('ls', 'ls -F -G'),
+ # long ls
+ ('ll', 'ls -F -l -G'),
+ # ls normal files only
+ ('lf', 'ls -F -l -G %l | grep ^-'),
+ # ls symbolic links
+ ('lk', 'ls -F -l -G %l | grep ^l'),
+ # directories or links to directories,
+ ('ldir', 'ls -F -G -l %l | grep /$'),
+ # things which are executable
+ ('lx', 'ls -F -l -G %l | grep ^-..x'),
+ ]
+ default_aliases = default_aliases + ls_aliases
+ elif os.name in ['nt', 'dos']:
+ default_aliases = [('ls', 'dir /on'),
+ ('ddir', 'dir /ad /on'), ('ldir', 'dir /ad /on'),
+ ('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
+ ('echo', 'echo'), ('ren', 'ren'), ('copy', 'copy'),
+ ]
+ else:
+ default_aliases = []
+
+ return default_aliases
+
+
+class AliasError(Exception):
+ pass
+
+
+class InvalidAliasError(AliasError):
+ pass
+
+class Alias(object):
+ """Callable object storing the details of one alias.
+
+ Instances are registered as magic functions to allow use of aliases.
+ """
+
+ # Prepare blacklist
+ blacklist = {'cd','popd','pushd','dhist','alias','unalias'}
+
+ def __init__(self, shell, name, cmd):
+ self.shell = shell
+ self.name = name
+ self.cmd = cmd
+ self.__doc__ = "Alias for `!{}`".format(cmd)
+ self.nargs = self.validate()
+
+ def validate(self):
+ """Validate the alias, and return the number of arguments."""
+ if self.name in self.blacklist:
+ raise InvalidAliasError("The name %s can't be aliased "
+ "because it is a keyword or builtin." % self.name)
+ try:
+ caller = self.shell.magics_manager.magics['line'][self.name]
+ except KeyError:
+ pass
+ else:
+ if not isinstance(caller, Alias):
+ raise InvalidAliasError("The name %s can't be aliased "
+ "because it is another magic command." % self.name)
+
+ if not (isinstance(self.cmd, string_types)):
+ raise InvalidAliasError("An alias command must be a string, "
+ "got: %r" % self.cmd)
+
+ nargs = self.cmd.count('%s') - self.cmd.count('%%s')
+
+ if (nargs > 0) and (self.cmd.find('%l') >= 0):
+ raise InvalidAliasError('The %s and %l specifiers are mutually '
+ 'exclusive in alias definitions.')
+
+ return nargs
+
+ def __repr__(self):
+ return "<alias {} for {!r}>".format(self.name, self.cmd)
+
+ def __call__(self, rest=''):
+ cmd = self.cmd
+ nargs = self.nargs
+ # Expand the %l special to be the user's input line
+ if cmd.find('%l') >= 0:
+ cmd = cmd.replace('%l', rest)
+ rest = ''
+
+ if nargs==0:
+ if cmd.find('%%s') >= 1:
+ cmd = cmd.replace('%%s', '%s')
+ # Simple, argument-less aliases
+ cmd = '%s %s' % (cmd, rest)
+ else:
+ # Handle aliases with positional arguments
+ args = rest.split(None, nargs)
+ if len(args) < nargs:
+ raise UsageError('Alias <%s> requires %s arguments, %s given.' %
+ (self.name, nargs, len(args)))
+ cmd = '%s %s' % (cmd % tuple(args[:nargs]),' '.join(args[nargs:]))
+
+ self.shell.system(cmd)
+
+#-----------------------------------------------------------------------------
+# Main AliasManager class
+#-----------------------------------------------------------------------------
+
+class AliasManager(Configurable):
+
default_aliases = List(default_aliases()).tag(config=True)
user_aliases = List(default_value=[]).tag(config=True)
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
-
- def __init__(self, shell=None, **kwargs):
- super(AliasManager, self).__init__(shell=shell, **kwargs)
- # For convenient access
- self.linemagics = self.shell.magics_manager.magics['line']
- self.init_aliases()
-
- def init_aliases(self):
- # Load default & user aliases
- for name, cmd in self.default_aliases + self.user_aliases:
- self.soft_define_alias(name, cmd)
-
- @property
- def aliases(self):
- return [(n, func.cmd) for (n, func) in self.linemagics.items()
- if isinstance(func, Alias)]
-
- def soft_define_alias(self, name, cmd):
- """Define an alias, but don't raise on an AliasError."""
- try:
- self.define_alias(name, cmd)
- except AliasError as e:
- error("Invalid alias: %s" % e)
-
- def define_alias(self, name, cmd):
- """Define a new alias after validating it.
-
- This will raise an :exc:`AliasError` if there are validation
- problems.
- """
- caller = Alias(shell=self.shell, name=name, cmd=cmd)
- self.shell.magics_manager.register_function(caller, magic_kind='line',
- magic_name=name)
-
- def get_alias(self, name):
- """Return an alias, or None if no alias by that name exists."""
- aname = self.linemagics.get(name, None)
- return aname if isinstance(aname, Alias) else None
-
- def is_alias(self, name):
- """Return whether or not a given name has been defined as an alias"""
- return self.get_alias(name) is not None
-
- def undefine_alias(self, name):
- if self.is_alias(name):
- del self.linemagics[name]
- else:
- raise ValueError('%s is not an alias' % name)
-
- def clear_aliases(self):
- for name, cmd in self.aliases:
- self.undefine_alias(name)
-
- def retrieve_alias(self, name):
- """Retrieve the command to which an alias expands."""
- caller = self.get_alias(name)
- if caller:
- return caller.cmd
- else:
- raise ValueError('%s is not an alias' % name)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
+ def __init__(self, shell=None, **kwargs):
+ super(AliasManager, self).__init__(shell=shell, **kwargs)
+ # For convenient access
+ self.linemagics = self.shell.magics_manager.magics['line']
+ self.init_aliases()
+
+ def init_aliases(self):
+ # Load default & user aliases
+ for name, cmd in self.default_aliases + self.user_aliases:
+ self.soft_define_alias(name, cmd)
+
+ @property
+ def aliases(self):
+ return [(n, func.cmd) for (n, func) in self.linemagics.items()
+ if isinstance(func, Alias)]
+
+ def soft_define_alias(self, name, cmd):
+ """Define an alias, but don't raise on an AliasError."""
+ try:
+ self.define_alias(name, cmd)
+ except AliasError as e:
+ error("Invalid alias: %s" % e)
+
+ def define_alias(self, name, cmd):
+ """Define a new alias after validating it.
+
+ This will raise an :exc:`AliasError` if there are validation
+ problems.
+ """
+ caller = Alias(shell=self.shell, name=name, cmd=cmd)
+ self.shell.magics_manager.register_function(caller, magic_kind='line',
+ magic_name=name)
+
+ def get_alias(self, name):
+ """Return an alias, or None if no alias by that name exists."""
+ aname = self.linemagics.get(name, None)
+ return aname if isinstance(aname, Alias) else None
+
+ def is_alias(self, name):
+ """Return whether or not a given name has been defined as an alias"""
+ return self.get_alias(name) is not None
+
+ def undefine_alias(self, name):
+ if self.is_alias(name):
+ del self.linemagics[name]
+ else:
+ raise ValueError('%s is not an alias' % name)
+
+ def clear_aliases(self):
+ for name, cmd in self.aliases:
+ self.undefine_alias(name)
+
+ def retrieve_alias(self, name):
+ """Retrieve the command to which an alias expands."""
+ caller = self.get_alias(name)
+ if caller:
+ return caller.cmd
+ else:
+ raise ValueError('%s is not an alias' % name)
diff --git a/contrib/python/ipython/py2/IPython/core/application.py b/contrib/python/ipython/py2/IPython/core/application.py
index 99e94a342a..af28133945 100644
--- a/contrib/python/ipython/py2/IPython/core/application.py
+++ b/contrib/python/ipython/py2/IPython/core/application.py
@@ -1,49 +1,49 @@
-# encoding: utf-8
-"""
-An application for IPython.
-
-All top-level applications should use the classes in this module for
-handling configuration and creating configurables.
-
-The job of an :class:`Application` is to create the master configuration
-object and then create the configurable objects, passing the config to them.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import atexit
+# encoding: utf-8
+"""
+An application for IPython.
+
+All top-level applications should use the classes in this module for
+handling configuration and creating configurables.
+
+The job of an :class:`Application` is to create the master configuration
+object and then create the configurable objects, passing the config to them.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import atexit
from copy import deepcopy
-import glob
-import logging
-import os
-import shutil
-import sys
-
-from traitlets.config.application import Application, catch_config_error
-from traitlets.config.loader import ConfigFileNotFound, PyFileConfigLoader
-from IPython.core import release, crashhandler
-from IPython.core.profiledir import ProfileDir, ProfileDirError
-from IPython.paths import get_ipython_dir, get_ipython_package_dir
-from IPython.utils.path import ensure_dir_exists
-from IPython.utils import py3compat
+import glob
+import logging
+import os
+import shutil
+import sys
+
+from traitlets.config.application import Application, catch_config_error
+from traitlets.config.loader import ConfigFileNotFound, PyFileConfigLoader
+from IPython.core import release, crashhandler
+from IPython.core.profiledir import ProfileDir, ProfileDirError
+from IPython.paths import get_ipython_dir, get_ipython_package_dir
+from IPython.utils.path import ensure_dir_exists
+from IPython.utils import py3compat
from traitlets import (
List, Unicode, Type, Bool, Dict, Set, Instance, Undefined,
default, observe,
)
-
-if os.name == 'nt':
- programdata = os.environ.get('PROGRAMDATA', None)
- if programdata:
- SYSTEM_CONFIG_DIRS = [os.path.join(programdata, 'ipython')]
- else: # PROGRAMDATA is not defined by default on XP.
- SYSTEM_CONFIG_DIRS = []
-else:
- SYSTEM_CONFIG_DIRS = [
- "/usr/local/etc/ipython",
- "/etc/ipython",
- ]
-
+
+if os.name == 'nt':
+ programdata = os.environ.get('PROGRAMDATA', None)
+ if programdata:
+ SYSTEM_CONFIG_DIRS = [os.path.join(programdata, 'ipython')]
+ else: # PROGRAMDATA is not defined by default on XP.
+ SYSTEM_CONFIG_DIRS = []
+else:
+ SYSTEM_CONFIG_DIRS = [
+ "/usr/local/etc/ipython",
+ "/etc/ipython",
+ ]
+
ENV_CONFIG_DIRS = []
_env_config_dir = os.path.join(sys.prefix, 'etc', 'ipython')
@@ -62,241 +62,241 @@ else:
IPYTHON_SUPPRESS_CONFIG_ERRORS = False
else:
sys.exit("Unsupported value for environment variable: 'IPYTHON_SUPPRESS_CONFIG_ERRORS' is set to '%s' which is none of {'0', '1', 'false', 'true', ''}."% _envvar )
-
-# aliases and flags
-
-base_aliases = {
- 'profile-dir' : 'ProfileDir.location',
- 'profile' : 'BaseIPythonApplication.profile',
- 'ipython-dir' : 'BaseIPythonApplication.ipython_dir',
- 'log-level' : 'Application.log_level',
- 'config' : 'BaseIPythonApplication.extra_config_file',
-}
-
-base_flags = dict(
- debug = ({'Application' : {'log_level' : logging.DEBUG}},
- "set log level to logging.DEBUG (maximize logging output)"),
- quiet = ({'Application' : {'log_level' : logging.CRITICAL}},
- "set log level to logging.CRITICAL (minimize logging output)"),
- init = ({'BaseIPythonApplication' : {
- 'copy_config_files' : True,
- 'auto_create' : True}
- }, """Initialize profile with default config files. This is equivalent
- to running `ipython profile create <profile>` prior to startup.
- """)
-)
-
-class ProfileAwareConfigLoader(PyFileConfigLoader):
- """A Python file config loader that is aware of IPython profiles."""
- def load_subconfig(self, fname, path=None, profile=None):
- if profile is not None:
- try:
- profile_dir = ProfileDir.find_profile_dir_by_name(
- get_ipython_dir(),
- profile,
- )
- except ProfileDirError:
- return
- path = profile_dir.location
- return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path)
-
-class BaseIPythonApplication(Application):
-
- name = Unicode(u'ipython')
- description = Unicode(u'IPython: an enhanced interactive Python shell.')
- version = Unicode(release.version)
-
- aliases = Dict(base_aliases)
- flags = Dict(base_flags)
- classes = List([ProfileDir])
-
- # enable `load_subconfig('cfg.py', profile='name')`
- python_config_loader_class = ProfileAwareConfigLoader
-
- # Track whether the config_file has changed,
- # because some logic happens only if we aren't using the default.
- config_file_specified = Set()
-
- config_file_name = Unicode()
+
+# aliases and flags
+
+base_aliases = {
+ 'profile-dir' : 'ProfileDir.location',
+ 'profile' : 'BaseIPythonApplication.profile',
+ 'ipython-dir' : 'BaseIPythonApplication.ipython_dir',
+ 'log-level' : 'Application.log_level',
+ 'config' : 'BaseIPythonApplication.extra_config_file',
+}
+
+base_flags = dict(
+ debug = ({'Application' : {'log_level' : logging.DEBUG}},
+ "set log level to logging.DEBUG (maximize logging output)"),
+ quiet = ({'Application' : {'log_level' : logging.CRITICAL}},
+ "set log level to logging.CRITICAL (minimize logging output)"),
+ init = ({'BaseIPythonApplication' : {
+ 'copy_config_files' : True,
+ 'auto_create' : True}
+ }, """Initialize profile with default config files. This is equivalent
+ to running `ipython profile create <profile>` prior to startup.
+ """)
+)
+
+class ProfileAwareConfigLoader(PyFileConfigLoader):
+ """A Python file config loader that is aware of IPython profiles."""
+ def load_subconfig(self, fname, path=None, profile=None):
+ if profile is not None:
+ try:
+ profile_dir = ProfileDir.find_profile_dir_by_name(
+ get_ipython_dir(),
+ profile,
+ )
+ except ProfileDirError:
+ return
+ path = profile_dir.location
+ return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path)
+
+class BaseIPythonApplication(Application):
+
+ name = Unicode(u'ipython')
+ description = Unicode(u'IPython: an enhanced interactive Python shell.')
+ version = Unicode(release.version)
+
+ aliases = Dict(base_aliases)
+ flags = Dict(base_flags)
+ classes = List([ProfileDir])
+
+ # enable `load_subconfig('cfg.py', profile='name')`
+ python_config_loader_class = ProfileAwareConfigLoader
+
+ # Track whether the config_file has changed,
+ # because some logic happens only if we aren't using the default.
+ config_file_specified = Set()
+
+ config_file_name = Unicode()
@default('config_file_name')
- def _config_file_name_default(self):
- return self.name.replace('-','_') + u'_config.py'
+ def _config_file_name_default(self):
+ return self.name.replace('-','_') + u'_config.py'
@observe('config_file_name')
def _config_file_name_changed(self, change):
if change['new'] != change['old']:
self.config_file_specified.add(change['new'])
-
- # The directory that contains IPython's builtin profiles.
- builtin_profile_dir = Unicode(
- os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
- )
-
- config_file_paths = List(Unicode())
+
+ # The directory that contains IPython's builtin profiles.
+ builtin_profile_dir = Unicode(
+ os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
+ )
+
+ config_file_paths = List(Unicode())
@default('config_file_paths')
- def _config_file_paths_default(self):
- return [py3compat.getcwd()]
-
+ def _config_file_paths_default(self):
+ return [py3compat.getcwd()]
+
extra_config_file = Unicode(
- help="""Path to an extra config file to load.
-
- If specified, load this config file in addition to any other IPython config.
+ help="""Path to an extra config file to load.
+
+ If specified, load this config file in addition to any other IPython config.
""").tag(config=True)
@observe('extra_config_file')
def _extra_config_file_changed(self, change):
old = change['old']
new = change['new']
- try:
- self.config_files.remove(old)
- except ValueError:
- pass
- self.config_file_specified.add(new)
- self.config_files.append(new)
-
+ try:
+ self.config_files.remove(old)
+ except ValueError:
+ pass
+ self.config_file_specified.add(new)
+ self.config_files.append(new)
+
profile = Unicode(u'default',
- help="""The IPython profile to use."""
+ help="""The IPython profile to use."""
).tag(config=True)
@observe('profile')
def _profile_changed(self, change):
- self.builtin_profile_dir = os.path.join(
+ self.builtin_profile_dir = os.path.join(
get_ipython_package_dir(), u'config', u'profile', change['new']
- )
-
+ )
+
ipython_dir = Unicode(
- help="""
- The name of the IPython directory. This directory is used for logging
- configuration (through profiles), history storage, etc. The default
- is usually $HOME/.ipython. This option can also be specified through
- the environment variable IPYTHONDIR.
- """
+ help="""
+ The name of the IPython directory. This directory is used for logging
+ configuration (through profiles), history storage, etc. The default
+ is usually $HOME/.ipython. This option can also be specified through
+ the environment variable IPYTHONDIR.
+ """
).tag(config=True)
@default('ipython_dir')
- def _ipython_dir_default(self):
- d = get_ipython_dir()
+ def _ipython_dir_default(self):
+ d = get_ipython_dir()
self._ipython_dir_changed({
'name': 'ipython_dir',
'old': d,
'new': d,
})
- return d
-
- _in_init_profile_dir = False
- profile_dir = Instance(ProfileDir, allow_none=True)
+ return d
+
+ _in_init_profile_dir = False
+ profile_dir = Instance(ProfileDir, allow_none=True)
@default('profile_dir')
- def _profile_dir_default(self):
- # avoid recursion
- if self._in_init_profile_dir:
- return
- # profile_dir requested early, force initialization
- self.init_profile_dir()
- return self.profile_dir
-
+ def _profile_dir_default(self):
+ # avoid recursion
+ if self._in_init_profile_dir:
+ return
+ # profile_dir requested early, force initialization
+ self.init_profile_dir()
+ return self.profile_dir
+
overwrite = Bool(False,
help="""Whether to overwrite existing config files when copying"""
).tag(config=True)
auto_create = Bool(False,
help="""Whether to create profile dir if it doesn't exist"""
).tag(config=True)
-
- config_files = List(Unicode())
+
+ config_files = List(Unicode())
@default('config_files')
- def _config_files_default(self):
- return [self.config_file_name]
-
+ def _config_files_default(self):
+ return [self.config_file_name]
+
copy_config_files = Bool(False,
- help="""Whether to install the default config files into the profile dir.
- If a new profile is being created, and IPython contains config files for that
- profile, then they will be staged into the new directory. Otherwise,
- default config files will be automatically generated.
+ help="""Whether to install the default config files into the profile dir.
+ If a new profile is being created, and IPython contains config files for that
+ profile, then they will be staged into the new directory. Otherwise,
+ default config files will be automatically generated.
""").tag(config=True)
-
+
verbose_crash = Bool(False,
- help="""Create a massive crash report when IPython encounters what may be an
- internal error. The default is to append a short message to the
+ help="""Create a massive crash report when IPython encounters what may be an
+ internal error. The default is to append a short message to the
usual traceback""").tag(config=True)
-
- # The class to use as the crash handler.
- crash_handler_class = Type(crashhandler.CrashHandler)
-
- @catch_config_error
- def __init__(self, **kwargs):
- super(BaseIPythonApplication, self).__init__(**kwargs)
- # ensure current working directory exists
- try:
- py3compat.getcwd()
- except:
- # exit if cwd doesn't exist
- self.log.error("Current working directory doesn't exist.")
- self.exit(1)
-
- #-------------------------------------------------------------------------
- # Various stages of Application creation
- #-------------------------------------------------------------------------
-
- deprecated_subcommands = {}
-
- def initialize_subcommand(self, subc, argv=None):
- if subc in self.deprecated_subcommands:
- self.log.warning("Subcommand `ipython {sub}` is deprecated and will be removed "
- "in future versions.".format(sub=subc))
+
+ # The class to use as the crash handler.
+ crash_handler_class = Type(crashhandler.CrashHandler)
+
+ @catch_config_error
+ def __init__(self, **kwargs):
+ super(BaseIPythonApplication, self).__init__(**kwargs)
+ # ensure current working directory exists
+ try:
+ py3compat.getcwd()
+ except:
+ # exit if cwd doesn't exist
+ self.log.error("Current working directory doesn't exist.")
+ self.exit(1)
+
+ #-------------------------------------------------------------------------
+ # Various stages of Application creation
+ #-------------------------------------------------------------------------
+
+ deprecated_subcommands = {}
+
+ def initialize_subcommand(self, subc, argv=None):
+ if subc in self.deprecated_subcommands:
+ self.log.warning("Subcommand `ipython {sub}` is deprecated and will be removed "
+ "in future versions.".format(sub=subc))
self.log.warning("You likely want to use `jupyter {sub}` in the "
"future".format(sub=subc))
- return super(BaseIPythonApplication, self).initialize_subcommand(subc, argv)
-
- def init_crash_handler(self):
- """Create a crash handler, typically setting sys.excepthook to it."""
- self.crash_handler = self.crash_handler_class(self)
- sys.excepthook = self.excepthook
- def unset_crashhandler():
- sys.excepthook = sys.__excepthook__
- atexit.register(unset_crashhandler)
-
- def excepthook(self, etype, evalue, tb):
- """this is sys.excepthook after init_crashhandler
-
- set self.verbose_crash=True to use our full crashhandler, instead of
- a regular traceback with a short message (crash_handler_lite)
- """
-
- if self.verbose_crash:
- return self.crash_handler(etype, evalue, tb)
- else:
- return crashhandler.crash_handler_lite(etype, evalue, tb)
+ return super(BaseIPythonApplication, self).initialize_subcommand(subc, argv)
+
+ def init_crash_handler(self):
+ """Create a crash handler, typically setting sys.excepthook to it."""
+ self.crash_handler = self.crash_handler_class(self)
+ sys.excepthook = self.excepthook
+ def unset_crashhandler():
+ sys.excepthook = sys.__excepthook__
+ atexit.register(unset_crashhandler)
+
+ def excepthook(self, etype, evalue, tb):
+ """this is sys.excepthook after init_crashhandler
+
+ set self.verbose_crash=True to use our full crashhandler, instead of
+ a regular traceback with a short message (crash_handler_lite)
+ """
+
+ if self.verbose_crash:
+ return self.crash_handler(etype, evalue, tb)
+ else:
+ return crashhandler.crash_handler_lite(etype, evalue, tb)
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
old = change['old']
new = change['new']
- if old is not Undefined:
- str_old = py3compat.cast_bytes_py2(os.path.abspath(old),
- sys.getfilesystemencoding()
- )
- if str_old in sys.path:
- sys.path.remove(str_old)
- str_path = py3compat.cast_bytes_py2(os.path.abspath(new),
- sys.getfilesystemencoding()
- )
- sys.path.append(str_path)
- ensure_dir_exists(new)
- readme = os.path.join(new, 'README')
- readme_src = os.path.join(get_ipython_package_dir(), u'config', u'profile', 'README')
- if not os.path.exists(readme) and os.path.exists(readme_src):
- shutil.copy(readme_src, readme)
- for d in ('extensions', 'nbextensions'):
- path = os.path.join(new, d)
- try:
- ensure_dir_exists(path)
- except OSError as e:
- # this will not be EEXIST
- self.log.error("couldn't create path %s: %s", path, e)
- self.log.debug("IPYTHONDIR set to: %s" % new)
-
+ if old is not Undefined:
+ str_old = py3compat.cast_bytes_py2(os.path.abspath(old),
+ sys.getfilesystemencoding()
+ )
+ if str_old in sys.path:
+ sys.path.remove(str_old)
+ str_path = py3compat.cast_bytes_py2(os.path.abspath(new),
+ sys.getfilesystemencoding()
+ )
+ sys.path.append(str_path)
+ ensure_dir_exists(new)
+ readme = os.path.join(new, 'README')
+ readme_src = os.path.join(get_ipython_package_dir(), u'config', u'profile', 'README')
+ if not os.path.exists(readme) and os.path.exists(readme_src):
+ shutil.copy(readme_src, readme)
+ for d in ('extensions', 'nbextensions'):
+ path = os.path.join(new, d)
+ try:
+ ensure_dir_exists(path)
+ except OSError as e:
+ # this will not be EEXIST
+ self.log.error("couldn't create path %s: %s", path, e)
+ self.log.debug("IPYTHONDIR set to: %s" % new)
+
def load_config_file(self, suppress_errors=IPYTHON_SUPPRESS_CONFIG_ERRORS):
- """Load the config file.
-
- By default, errors in loading config are handled, and a warning
- printed on screen. For testing, the suppress_errors option is set
- to False, so errors will make tests fail.
+ """Load the config file.
+
+ By default, errors in loading config are handled, and a warning
+ printed on screen. For testing, the suppress_errors option is set
+ to False, so errors will make tests fail.
`supress_errors` default value is to be `None` in which case the
behavior default to the one of `traitlets.Application`.
@@ -307,161 +307,161 @@ class BaseIPythonApplication(Application):
- to `None` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '' (empty string) or leaving it unset.
Any other value are invalid, and will make IPython exit with a non-zero return code.
- """
+ """
- self.log.debug("Searching path %s for config files", self.config_file_paths)
- base_config = 'ipython_config.py'
- self.log.debug("Attempting to load config file: %s" %
- base_config)
- try:
+ self.log.debug("Searching path %s for config files", self.config_file_paths)
+ base_config = 'ipython_config.py'
+ self.log.debug("Attempting to load config file: %s" %
+ base_config)
+ try:
if suppress_errors is not None:
old_value = Application.raise_config_file_errors
Application.raise_config_file_errors = not suppress_errors;
- Application.load_config_file(
- self,
- base_config,
- path=self.config_file_paths
- )
- except ConfigFileNotFound:
- # ignore errors loading parent
- self.log.debug("Config file %s not found", base_config)
- pass
+ Application.load_config_file(
+ self,
+ base_config,
+ path=self.config_file_paths
+ )
+ except ConfigFileNotFound:
+ # ignore errors loading parent
+ self.log.debug("Config file %s not found", base_config)
+ pass
if suppress_errors is not None:
Application.raise_config_file_errors = old_value
-
- for config_file_name in self.config_files:
- if not config_file_name or config_file_name == base_config:
- continue
- self.log.debug("Attempting to load config file: %s" %
- self.config_file_name)
- try:
- Application.load_config_file(
- self,
- config_file_name,
- path=self.config_file_paths
- )
- except ConfigFileNotFound:
- # Only warn if the default config file was NOT being used.
- if config_file_name in self.config_file_specified:
- msg = self.log.warning
- else:
- msg = self.log.debug
- msg("Config file not found, skipping: %s", config_file_name)
- except Exception:
- # For testing purposes.
- if not suppress_errors:
- raise
- self.log.warning("Error loading config file: %s" %
- self.config_file_name, exc_info=True)
-
- def init_profile_dir(self):
- """initialize the profile dir"""
- self._in_init_profile_dir = True
- if self.profile_dir is not None:
- # already ran
- return
- if 'ProfileDir.location' not in self.config:
- # location not specified, find by profile name
- try:
- p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
- except ProfileDirError:
- # not found, maybe create it (always create default profile)
- if self.auto_create or self.profile == 'default':
- try:
- p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
- except ProfileDirError:
- self.log.fatal("Could not create profile: %r"%self.profile)
- self.exit(1)
- else:
- self.log.info("Created profile dir: %r"%p.location)
- else:
- self.log.fatal("Profile %r not found."%self.profile)
- self.exit(1)
- else:
- self.log.debug("Using existing profile dir: %r"%p.location)
- else:
- location = self.config.ProfileDir.location
- # location is fully specified
- try:
- p = ProfileDir.find_profile_dir(location, self.config)
- except ProfileDirError:
- # not found, maybe create it
- if self.auto_create:
- try:
- p = ProfileDir.create_profile_dir(location, self.config)
- except ProfileDirError:
- self.log.fatal("Could not create profile directory: %r"%location)
- self.exit(1)
- else:
- self.log.debug("Creating new profile dir: %r"%location)
- else:
- self.log.fatal("Profile directory %r not found."%location)
- self.exit(1)
- else:
- self.log.info("Using existing profile dir: %r"%location)
- # if profile_dir is specified explicitly, set profile name
- dir_name = os.path.basename(p.location)
- if dir_name.startswith('profile_'):
- self.profile = dir_name[8:]
-
- self.profile_dir = p
- self.config_file_paths.append(p.location)
- self._in_init_profile_dir = False
-
- def init_config_files(self):
- """[optionally] copy default config files into profile dir."""
+
+ for config_file_name in self.config_files:
+ if not config_file_name or config_file_name == base_config:
+ continue
+ self.log.debug("Attempting to load config file: %s" %
+ self.config_file_name)
+ try:
+ Application.load_config_file(
+ self,
+ config_file_name,
+ path=self.config_file_paths
+ )
+ except ConfigFileNotFound:
+ # Only warn if the default config file was NOT being used.
+ if config_file_name in self.config_file_specified:
+ msg = self.log.warning
+ else:
+ msg = self.log.debug
+ msg("Config file not found, skipping: %s", config_file_name)
+ except Exception:
+ # For testing purposes.
+ if not suppress_errors:
+ raise
+ self.log.warning("Error loading config file: %s" %
+ self.config_file_name, exc_info=True)
+
+ def init_profile_dir(self):
+ """initialize the profile dir"""
+ self._in_init_profile_dir = True
+ if self.profile_dir is not None:
+ # already ran
+ return
+ if 'ProfileDir.location' not in self.config:
+ # location not specified, find by profile name
+ try:
+ p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
+ except ProfileDirError:
+ # not found, maybe create it (always create default profile)
+ if self.auto_create or self.profile == 'default':
+ try:
+ p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
+ except ProfileDirError:
+ self.log.fatal("Could not create profile: %r"%self.profile)
+ self.exit(1)
+ else:
+ self.log.info("Created profile dir: %r"%p.location)
+ else:
+ self.log.fatal("Profile %r not found."%self.profile)
+ self.exit(1)
+ else:
+ self.log.debug("Using existing profile dir: %r"%p.location)
+ else:
+ location = self.config.ProfileDir.location
+ # location is fully specified
+ try:
+ p = ProfileDir.find_profile_dir(location, self.config)
+ except ProfileDirError:
+ # not found, maybe create it
+ if self.auto_create:
+ try:
+ p = ProfileDir.create_profile_dir(location, self.config)
+ except ProfileDirError:
+ self.log.fatal("Could not create profile directory: %r"%location)
+ self.exit(1)
+ else:
+ self.log.debug("Creating new profile dir: %r"%location)
+ else:
+ self.log.fatal("Profile directory %r not found."%location)
+ self.exit(1)
+ else:
+ self.log.info("Using existing profile dir: %r"%location)
+ # if profile_dir is specified explicitly, set profile name
+ dir_name = os.path.basename(p.location)
+ if dir_name.startswith('profile_'):
+ self.profile = dir_name[8:]
+
+ self.profile_dir = p
+ self.config_file_paths.append(p.location)
+ self._in_init_profile_dir = False
+
+ def init_config_files(self):
+ """[optionally] copy default config files into profile dir."""
self.config_file_paths.extend(ENV_CONFIG_DIRS)
- self.config_file_paths.extend(SYSTEM_CONFIG_DIRS)
- # copy config files
- path = self.builtin_profile_dir
- if self.copy_config_files:
- src = self.profile
-
- cfg = self.config_file_name
- if path and os.path.exists(os.path.join(path, cfg)):
- self.log.warning("Staging %r from %s into %r [overwrite=%s]"%(
- cfg, src, self.profile_dir.location, self.overwrite)
- )
- self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
- else:
- self.stage_default_config_file()
- else:
- # Still stage *bundled* config files, but not generated ones
- # This is necessary for `ipython profile=sympy` to load the profile
- # on the first go
- files = glob.glob(os.path.join(path, '*.py'))
- for fullpath in files:
- cfg = os.path.basename(fullpath)
- if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
- # file was copied
- self.log.warning("Staging bundled %s from %s into %r"%(
- cfg, self.profile, self.profile_dir.location)
- )
-
-
- def stage_default_config_file(self):
- """auto generate default config file, and stage it into the profile."""
- s = self.generate_config_file()
- fname = os.path.join(self.profile_dir.location, self.config_file_name)
- if self.overwrite or not os.path.exists(fname):
- self.log.warning("Generating default config file: %r"%(fname))
- with open(fname, 'w') as f:
- f.write(s)
-
- @catch_config_error
- def initialize(self, argv=None):
- # don't hook up crash handler before parsing command-line
- self.parse_command_line(argv)
- self.init_crash_handler()
- if self.subapp is not None:
- # stop here if subapp is taking over
- return
+ self.config_file_paths.extend(SYSTEM_CONFIG_DIRS)
+ # copy config files
+ path = self.builtin_profile_dir
+ if self.copy_config_files:
+ src = self.profile
+
+ cfg = self.config_file_name
+ if path and os.path.exists(os.path.join(path, cfg)):
+ self.log.warning("Staging %r from %s into %r [overwrite=%s]"%(
+ cfg, src, self.profile_dir.location, self.overwrite)
+ )
+ self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
+ else:
+ self.stage_default_config_file()
+ else:
+ # Still stage *bundled* config files, but not generated ones
+ # This is necessary for `ipython profile=sympy` to load the profile
+ # on the first go
+ files = glob.glob(os.path.join(path, '*.py'))
+ for fullpath in files:
+ cfg = os.path.basename(fullpath)
+ if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
+ # file was copied
+ self.log.warning("Staging bundled %s from %s into %r"%(
+ cfg, self.profile, self.profile_dir.location)
+ )
+
+
+ def stage_default_config_file(self):
+ """auto generate default config file, and stage it into the profile."""
+ s = self.generate_config_file()
+ fname = os.path.join(self.profile_dir.location, self.config_file_name)
+ if self.overwrite or not os.path.exists(fname):
+ self.log.warning("Generating default config file: %r"%(fname))
+ with open(fname, 'w') as f:
+ f.write(s)
+
+ @catch_config_error
+ def initialize(self, argv=None):
+ # don't hook up crash handler before parsing command-line
+ self.parse_command_line(argv)
+ self.init_crash_handler()
+ if self.subapp is not None:
+ # stop here if subapp is taking over
+ return
# save a copy of CLI config to re-load after config files
# so that it has highest priority
cl_config = deepcopy(self.config)
- self.init_profile_dir()
- self.init_config_files()
- self.load_config_file()
- # enforce cl-opts override configfile opts:
- self.update_config(cl_config)
+ self.init_profile_dir()
+ self.init_config_files()
+ self.load_config_file()
+ # enforce cl-opts override configfile opts:
+ self.update_config(cl_config)
diff --git a/contrib/python/ipython/py2/IPython/core/autocall.py b/contrib/python/ipython/py2/IPython/core/autocall.py
index 4ef2bce59c..bab7f859c9 100644
--- a/contrib/python/ipython/py2/IPython/core/autocall.py
+++ b/contrib/python/ipython/py2/IPython/core/autocall.py
@@ -1,70 +1,70 @@
-# encoding: utf-8
-"""
-Autocall capabilities for IPython.core.
-
-Authors:
-
-* Brian Granger
-* Fernando Perez
-* Thomas Kluyver
-
-Notes
------
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-class IPyAutocall(object):
- """ Instances of this class are always autocalled
-
- This happens regardless of 'autocall' variable state. Use this to
- develop macro-like mechanisms.
- """
- _ip = None
- rewrite = True
- def __init__(self, ip=None):
- self._ip = ip
-
- def set_ip(self, ip):
- """ Will be used to set _ip point to current ipython instance b/f call
-
- Override this method if you don't want this to happen.
-
- """
- self._ip = ip
-
-
-class ExitAutocall(IPyAutocall):
- """An autocallable object which will be added to the user namespace so that
- exit, exit(), quit or quit() are all valid ways to close the shell."""
- rewrite = False
-
- def __call__(self):
- self._ip.ask_exit()
-
-class ZMQExitAutocall(ExitAutocall):
- """Exit IPython. Autocallable, so it needn't be explicitly called.
-
- Parameters
- ----------
- keep_kernel : bool
- If True, leave the kernel alive. Otherwise, tell the kernel to exit too
- (default).
- """
- def __call__(self, keep_kernel=False):
- self._ip.keepkernel_on_exit = keep_kernel
- self._ip.ask_exit()
+# encoding: utf-8
+"""
+Autocall capabilities for IPython.core.
+
+Authors:
+
+* Brian Granger
+* Fernando Perez
+* Thomas Kluyver
+
+Notes
+-----
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+class IPyAutocall(object):
+ """ Instances of this class are always autocalled
+
+ This happens regardless of 'autocall' variable state. Use this to
+ develop macro-like mechanisms.
+ """
+ _ip = None
+ rewrite = True
+ def __init__(self, ip=None):
+ self._ip = ip
+
+ def set_ip(self, ip):
+ """ Will be used to set _ip point to current ipython instance b/f call
+
+ Override this method if you don't want this to happen.
+
+ """
+ self._ip = ip
+
+
+class ExitAutocall(IPyAutocall):
+ """An autocallable object which will be added to the user namespace so that
+ exit, exit(), quit or quit() are all valid ways to close the shell."""
+ rewrite = False
+
+ def __call__(self):
+ self._ip.ask_exit()
+
+class ZMQExitAutocall(ExitAutocall):
+ """Exit IPython. Autocallable, so it needn't be explicitly called.
+
+ Parameters
+ ----------
+ keep_kernel : bool
+ If True, leave the kernel alive. Otherwise, tell the kernel to exit too
+ (default).
+ """
+ def __call__(self, keep_kernel=False):
+ self._ip.keepkernel_on_exit = keep_kernel
+ self._ip.ask_exit()
diff --git a/contrib/python/ipython/py2/IPython/core/builtin_trap.py b/contrib/python/ipython/py2/IPython/core/builtin_trap.py
index 011362599c..909a555c73 100644
--- a/contrib/python/ipython/py2/IPython/core/builtin_trap.py
+++ b/contrib/python/ipython/py2/IPython/core/builtin_trap.py
@@ -1,114 +1,114 @@
-"""
-A context manager for managing things injected into :mod:`__builtin__`.
-
-Authors:
-
-* Brian Granger
-* Fernando Perez
-"""
-#-----------------------------------------------------------------------------
-# Copyright (C) 2010-2011 The IPython Development Team.
-#
-# Distributed under the terms of the BSD License.
-#
-# Complete license in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-from traitlets.config.configurable import Configurable
-
-from IPython.utils.py3compat import builtin_mod, iteritems
-from traitlets import Instance
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-
-class __BuiltinUndefined(object): pass
-BuiltinUndefined = __BuiltinUndefined()
-
-class __HideBuiltin(object): pass
-HideBuiltin = __HideBuiltin()
-
-
-class BuiltinTrap(Configurable):
-
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
- allow_none=True)
-
- def __init__(self, shell=None):
- super(BuiltinTrap, self).__init__(shell=shell, config=None)
- self._orig_builtins = {}
- # We define this to track if a single BuiltinTrap is nested.
- # Only turn off the trap when the outermost call to __exit__ is made.
- self._nested_level = 0
- self.shell = shell
- # builtins we always add - if set to HideBuiltin, they will just
- # be removed instead of being replaced by something else
- self.auto_builtins = {'exit': HideBuiltin,
- 'quit': HideBuiltin,
- 'get_ipython': self.shell.get_ipython,
- }
- # Recursive reload function
- try:
- from IPython.lib import deepreload
- if self.shell.deep_reload:
- from warnings import warn
+"""
+A context manager for managing things injected into :mod:`__builtin__`.
+
+Authors:
+
+* Brian Granger
+* Fernando Perez
+"""
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team.
+#
+# Distributed under the terms of the BSD License.
+#
+# Complete license in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from traitlets.config.configurable import Configurable
+
+from IPython.utils.py3compat import builtin_mod, iteritems
+from traitlets import Instance
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+class __BuiltinUndefined(object): pass
+BuiltinUndefined = __BuiltinUndefined()
+
+class __HideBuiltin(object): pass
+HideBuiltin = __HideBuiltin()
+
+
+class BuiltinTrap(Configurable):
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+
+ def __init__(self, shell=None):
+ super(BuiltinTrap, self).__init__(shell=shell, config=None)
+ self._orig_builtins = {}
+ # We define this to track if a single BuiltinTrap is nested.
+ # Only turn off the trap when the outermost call to __exit__ is made.
+ self._nested_level = 0
+ self.shell = shell
+ # builtins we always add - if set to HideBuiltin, they will just
+ # be removed instead of being replaced by something else
+ self.auto_builtins = {'exit': HideBuiltin,
+ 'quit': HideBuiltin,
+ 'get_ipython': self.shell.get_ipython,
+ }
+ # Recursive reload function
+ try:
+ from IPython.lib import deepreload
+ if self.shell.deep_reload:
+ from warnings import warn
warn("Automatically replacing builtin `reload` by `deepreload.reload` is deprecated since IPython 4.0, please import `reload` explicitly from `IPython.lib.deepreload", DeprecationWarning)
- self.auto_builtins['reload'] = deepreload._dreload
- else:
- self.auto_builtins['dreload']= deepreload._dreload
- except ImportError:
- pass
-
- def __enter__(self):
- if self._nested_level == 0:
- self.activate()
- self._nested_level += 1
- # I return self, so callers can use add_builtin in a with clause.
- return self
-
- def __exit__(self, type, value, traceback):
- if self._nested_level == 1:
- self.deactivate()
- self._nested_level -= 1
- # Returning False will cause exceptions to propagate
- return False
-
- def add_builtin(self, key, value):
- """Add a builtin and save the original."""
- bdict = builtin_mod.__dict__
- orig = bdict.get(key, BuiltinUndefined)
- if value is HideBuiltin:
- if orig is not BuiltinUndefined: #same as 'key in bdict'
- self._orig_builtins[key] = orig
- del bdict[key]
- else:
- self._orig_builtins[key] = orig
- bdict[key] = value
-
- def remove_builtin(self, key, orig):
- """Remove an added builtin and re-set the original."""
- if orig is BuiltinUndefined:
- del builtin_mod.__dict__[key]
- else:
- builtin_mod.__dict__[key] = orig
-
- def activate(self):
- """Store ipython references in the __builtin__ namespace."""
-
- add_builtin = self.add_builtin
- for name, func in iteritems(self.auto_builtins):
- add_builtin(name, func)
-
- def deactivate(self):
- """Remove any builtins which might have been added by add_builtins, or
- restore overwritten ones to their previous values."""
- remove_builtin = self.remove_builtin
- for key, val in iteritems(self._orig_builtins):
- remove_builtin(key, val)
- self._orig_builtins.clear()
- self._builtins_added = False
+ self.auto_builtins['reload'] = deepreload._dreload
+ else:
+ self.auto_builtins['dreload']= deepreload._dreload
+ except ImportError:
+ pass
+
+ def __enter__(self):
+ if self._nested_level == 0:
+ self.activate()
+ self._nested_level += 1
+ # I return self, so callers can use add_builtin in a with clause.
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if self._nested_level == 1:
+ self.deactivate()
+ self._nested_level -= 1
+ # Returning False will cause exceptions to propagate
+ return False
+
+ def add_builtin(self, key, value):
+ """Add a builtin and save the original."""
+ bdict = builtin_mod.__dict__
+ orig = bdict.get(key, BuiltinUndefined)
+ if value is HideBuiltin:
+ if orig is not BuiltinUndefined: #same as 'key in bdict'
+ self._orig_builtins[key] = orig
+ del bdict[key]
+ else:
+ self._orig_builtins[key] = orig
+ bdict[key] = value
+
+ def remove_builtin(self, key, orig):
+ """Remove an added builtin and re-set the original."""
+ if orig is BuiltinUndefined:
+ del builtin_mod.__dict__[key]
+ else:
+ builtin_mod.__dict__[key] = orig
+
+ def activate(self):
+ """Store ipython references in the __builtin__ namespace."""
+
+ add_builtin = self.add_builtin
+ for name, func in iteritems(self.auto_builtins):
+ add_builtin(name, func)
+
+ def deactivate(self):
+ """Remove any builtins which might have been added by add_builtins, or
+ restore overwritten ones to their previous values."""
+ remove_builtin = self.remove_builtin
+ for key, val in iteritems(self._orig_builtins):
+ remove_builtin(key, val)
+ self._orig_builtins.clear()
+ self._builtins_added = False
diff --git a/contrib/python/ipython/py2/IPython/core/compilerop.py b/contrib/python/ipython/py2/IPython/core/compilerop.py
index f662e37e96..f529eb5224 100644
--- a/contrib/python/ipython/py2/IPython/core/compilerop.py
+++ b/contrib/python/ipython/py2/IPython/core/compilerop.py
@@ -1,144 +1,144 @@
-"""Compiler tools with improved interactive support.
-
-Provides compilation machinery similar to codeop, but with caching support so
-we can provide interactive tracebacks.
-
-Authors
--------
-* Robert Kern
-* Fernando Perez
-* Thomas Kluyver
-"""
-
-# Note: though it might be more natural to name this module 'compiler', that
-# name is in the stdlib and name collisions with the stdlib tend to produce
-# weird problems (often with third-party tools).
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2010-2011 The IPython Development Team.
-#
-# Distributed under the terms of the BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-from __future__ import print_function
-
-# Stdlib imports
-import __future__
-from ast import PyCF_ONLY_AST
-import codeop
-import functools
-import hashlib
-import linecache
-import operator
-import time
-
-#-----------------------------------------------------------------------------
-# Constants
-#-----------------------------------------------------------------------------
-
-# Roughtly equal to PyCF_MASK | PyCF_MASK_OBSOLETE as defined in pythonrun.h,
-# this is used as a bitmask to extract future-related code flags.
-PyCF_MASK = functools.reduce(operator.or_,
- (getattr(__future__, fname).compiler_flag
- for fname in __future__.all_feature_names))
-
-#-----------------------------------------------------------------------------
-# Local utilities
-#-----------------------------------------------------------------------------
-
-def code_name(code, number=0):
- """ Compute a (probably) unique name for code for caching.
-
- This now expects code to be unicode.
- """
+"""Compiler tools with improved interactive support.
+
+Provides compilation machinery similar to codeop, but with caching support so
+we can provide interactive tracebacks.
+
+Authors
+-------
+* Robert Kern
+* Fernando Perez
+* Thomas Kluyver
+"""
+
+# Note: though it might be more natural to name this module 'compiler', that
+# name is in the stdlib and name collisions with the stdlib tend to produce
+# weird problems (often with third-party tools).
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team.
+#
+# Distributed under the terms of the BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+from __future__ import print_function
+
+# Stdlib imports
+import __future__
+from ast import PyCF_ONLY_AST
+import codeop
+import functools
+import hashlib
+import linecache
+import operator
+import time
+
+#-----------------------------------------------------------------------------
+# Constants
+#-----------------------------------------------------------------------------
+
+# Roughtly equal to PyCF_MASK | PyCF_MASK_OBSOLETE as defined in pythonrun.h,
+# this is used as a bitmask to extract future-related code flags.
+PyCF_MASK = functools.reduce(operator.or_,
+ (getattr(__future__, fname).compiler_flag
+ for fname in __future__.all_feature_names))
+
+#-----------------------------------------------------------------------------
+# Local utilities
+#-----------------------------------------------------------------------------
+
+def code_name(code, number=0):
+ """ Compute a (probably) unique name for code for caching.
+
+ This now expects code to be unicode.
+ """
hash_digest = hashlib.sha1(code.encode("utf-8")).hexdigest()
- # Include the number and 12 characters of the hash in the name. It's
- # pretty much impossible that in a single session we'll have collisions
- # even with truncated hashes, and the full one makes tracebacks too long
- return '<ipython-input-{0}-{1}>'.format(number, hash_digest[:12])
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-
-class CachingCompiler(codeop.Compile):
- """A compiler that caches code compiled from interactive statements.
- """
-
- def __init__(self):
- codeop.Compile.__init__(self)
-
- # This is ugly, but it must be done this way to allow multiple
- # simultaneous ipython instances to coexist. Since Python itself
- # directly accesses the data structures in the linecache module, and
- # the cache therein is global, we must work with that data structure.
- # We must hold a reference to the original checkcache routine and call
- # that in our own check_cache() below, but the special IPython cache
- # must also be shared by all IPython instances. If we were to hold
- # separate caches (one in each CachingCompiler instance), any call made
- # by Python itself to linecache.checkcache() would obliterate the
- # cached data from the other IPython instances.
- if not hasattr(linecache, '_ipython_cache'):
- linecache._ipython_cache = {}
- if not hasattr(linecache, '_checkcache_ori'):
- linecache._checkcache_ori = linecache.checkcache
- # Now, we must monkeypatch the linecache directly so that parts of the
- # stdlib that call it outside our control go through our codepath
- # (otherwise we'd lose our tracebacks).
- linecache.checkcache = check_linecache_ipython
-
- def ast_parse(self, source, filename='<unknown>', symbol='exec'):
- """Parse code to an AST with the current compiler flags active.
-
- Arguments are exactly the same as ast.parse (in the standard library),
- and are passed to the built-in compile function."""
- return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
-
- def reset_compiler_flags(self):
- """Reset compiler flags to default state."""
- # This value is copied from codeop.Compile.__init__, so if that ever
- # changes, it will need to be updated.
- self.flags = codeop.PyCF_DONT_IMPLY_DEDENT
-
- @property
- def compiler_flags(self):
- """Flags currently active in the compilation process.
- """
- return self.flags
-
- def cache(self, code, number=0):
- """Make a name for a block of code, and cache the code.
-
- Parameters
- ----------
- code : str
- The Python source code to cache.
- number : int
- A number which forms part of the code's name. Used for the execution
- counter.
-
- Returns
- -------
- The name of the cached code (as a string). Pass this as the filename
- argument to compilation, so that tracebacks are correctly hooked up.
- """
- name = code_name(code, number)
- entry = (len(code), time.time(),
- [line+'\n' for line in code.splitlines()], name)
- linecache.cache[name] = entry
- linecache._ipython_cache[name] = entry
- return name
-
-def check_linecache_ipython(*args):
- """Call linecache.checkcache() safely protecting our cached values.
- """
- # First call the orignal checkcache as intended
- linecache._checkcache_ori(*args)
- # Then, update back the cache with our data, so that tracebacks related
- # to our compiled codes can be produced.
- linecache.cache.update(linecache._ipython_cache)
+ # Include the number and 12 characters of the hash in the name. It's
+ # pretty much impossible that in a single session we'll have collisions
+ # even with truncated hashes, and the full one makes tracebacks too long
+ return '<ipython-input-{0}-{1}>'.format(number, hash_digest[:12])
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+class CachingCompiler(codeop.Compile):
+ """A compiler that caches code compiled from interactive statements.
+ """
+
+ def __init__(self):
+ codeop.Compile.__init__(self)
+
+ # This is ugly, but it must be done this way to allow multiple
+ # simultaneous ipython instances to coexist. Since Python itself
+ # directly accesses the data structures in the linecache module, and
+ # the cache therein is global, we must work with that data structure.
+ # We must hold a reference to the original checkcache routine and call
+ # that in our own check_cache() below, but the special IPython cache
+ # must also be shared by all IPython instances. If we were to hold
+ # separate caches (one in each CachingCompiler instance), any call made
+ # by Python itself to linecache.checkcache() would obliterate the
+ # cached data from the other IPython instances.
+ if not hasattr(linecache, '_ipython_cache'):
+ linecache._ipython_cache = {}
+ if not hasattr(linecache, '_checkcache_ori'):
+ linecache._checkcache_ori = linecache.checkcache
+ # Now, we must monkeypatch the linecache directly so that parts of the
+ # stdlib that call it outside our control go through our codepath
+ # (otherwise we'd lose our tracebacks).
+ linecache.checkcache = check_linecache_ipython
+
+ def ast_parse(self, source, filename='<unknown>', symbol='exec'):
+ """Parse code to an AST with the current compiler flags active.
+
+ Arguments are exactly the same as ast.parse (in the standard library),
+ and are passed to the built-in compile function."""
+ return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
+
+ def reset_compiler_flags(self):
+ """Reset compiler flags to default state."""
+ # This value is copied from codeop.Compile.__init__, so if that ever
+ # changes, it will need to be updated.
+ self.flags = codeop.PyCF_DONT_IMPLY_DEDENT
+
+ @property
+ def compiler_flags(self):
+ """Flags currently active in the compilation process.
+ """
+ return self.flags
+
+ def cache(self, code, number=0):
+ """Make a name for a block of code, and cache the code.
+
+ Parameters
+ ----------
+ code : str
+ The Python source code to cache.
+ number : int
+ A number which forms part of the code's name. Used for the execution
+ counter.
+
+ Returns
+ -------
+ The name of the cached code (as a string). Pass this as the filename
+ argument to compilation, so that tracebacks are correctly hooked up.
+ """
+ name = code_name(code, number)
+ entry = (len(code), time.time(),
+ [line+'\n' for line in code.splitlines()], name)
+ linecache.cache[name] = entry
+ linecache._ipython_cache[name] = entry
+ return name
+
+def check_linecache_ipython(*args):
+ """Call linecache.checkcache() safely protecting our cached values.
+ """
+ # First call the orignal checkcache as intended
+ linecache._checkcache_ori(*args)
+ # Then, update back the cache with our data, so that tracebacks related
+ # to our compiled codes can be produced.
+ linecache.cache.update(linecache._ipython_cache)
diff --git a/contrib/python/ipython/py2/IPython/core/completer.py b/contrib/python/ipython/py2/IPython/core/completer.py
index 46003de8d4..b386945e54 100644
--- a/contrib/python/ipython/py2/IPython/core/completer.py
+++ b/contrib/python/ipython/py2/IPython/core/completer.py
@@ -1,79 +1,79 @@
-# encoding: utf-8
-"""Word completion for IPython.
-
+# encoding: utf-8
+"""Word completion for IPython.
+
This module started as fork of the rlcompleter module in the Python standard
-library. The original enhancements made to rlcompleter have been sent
+library. The original enhancements made to rlcompleter have been sent
upstream and were accepted as of Python 2.3,
-
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-#
-# Some of this code originated from rlcompleter in the Python standard library
-# Copyright (C) 2001 Python Software Foundation, www.python.org
-
+
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+#
+# Some of this code originated from rlcompleter in the Python standard library
+# Copyright (C) 2001 Python Software Foundation, www.python.org
+
from __future__ import print_function
-import __main__
-import glob
-import inspect
-import itertools
-import keyword
-import os
-import re
-import sys
-import unicodedata
-import string
+import __main__
+import glob
+import inspect
+import itertools
+import keyword
+import os
+import re
+import sys
+import unicodedata
+import string
import warnings
-
+
from traitlets.config.configurable import Configurable
-from IPython.core.error import TryNext
-from IPython.core.inputsplitter import ESC_MAGIC
-from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
-from IPython.utils import generics
-from IPython.utils.decorators import undoc
+from IPython.core.error import TryNext
+from IPython.core.inputsplitter import ESC_MAGIC
+from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
+from IPython.utils import generics
+from IPython.utils.decorators import undoc
from IPython.utils.dir2 import dir2, get_real_method
-from IPython.utils.process import arg_split
+from IPython.utils.process import arg_split
from IPython.utils.py3compat import builtin_mod, string_types, PY3, cast_unicode_py2
from traitlets import Bool, Enum, observe
-
-
-# Public API
-__all__ = ['Completer','IPCompleter']
-
-if sys.platform == 'win32':
- PROTECTABLES = ' '
-else:
- PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
-
+
+
+# Public API
+__all__ = ['Completer','IPCompleter']
+
+if sys.platform == 'win32':
+ PROTECTABLES = ' '
+else:
+ PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
+
# Protect against returning an enormous number of completions which the frontend
# may have trouble processing.
MATCHES_LIMIT = 500
-
-def has_open_quotes(s):
- """Return whether a string has open quotes.
-
- This simply counts whether the number of quote characters of either type in
- the string is odd.
-
- Returns
- -------
- If there is an open quote, the quote character is returned. Else, return
- False.
- """
- # We check " first, then ', so complex cases with nested quotes will get
- # the " to take precedence.
- if s.count('"') % 2:
- return '"'
- elif s.count("'") % 2:
- return "'"
- else:
- return False
-
-
-def protect_filename(s):
- """Escape a string to protect certain characters."""
+
+def has_open_quotes(s):
+ """Return whether a string has open quotes.
+
+ This simply counts whether the number of quote characters of either type in
+ the string is odd.
+
+ Returns
+ -------
+ If there is an open quote, the quote character is returned. Else, return
+ False.
+ """
+ # We check " first, then ', so complex cases with nested quotes will get
+ # the " to take precedence.
+ if s.count('"') % 2:
+ return '"'
+ elif s.count("'") % 2:
+ return "'"
+ else:
+ return False
+
+
+def protect_filename(s):
+ """Escape a string to protect certain characters."""
if set(s) & set(PROTECTABLES):
if sys.platform == "win32":
return '"' + s + '"'
@@ -81,62 +81,62 @@ def protect_filename(s):
return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
else:
return s
-
-
-def expand_user(path):
- """Expand '~'-style usernames in strings.
-
- This is similar to :func:`os.path.expanduser`, but it computes and returns
- extra information that will be useful if the input was being used in
- computing completions, and you wish to return the completions with the
- original '~' instead of its expanded value.
-
- Parameters
- ----------
- path : str
- String to be expanded. If no ~ is present, the output is the same as the
- input.
-
- Returns
- -------
- newpath : str
- Result of ~ expansion in the input path.
- tilde_expand : bool
- Whether any expansion was performed or not.
- tilde_val : str
- The value that ~ was replaced with.
- """
- # Default values
- tilde_expand = False
- tilde_val = ''
- newpath = path
-
- if path.startswith('~'):
- tilde_expand = True
- rest = len(path)-1
- newpath = os.path.expanduser(path)
- if rest:
- tilde_val = newpath[:-rest]
- else:
- tilde_val = newpath
-
- return newpath, tilde_expand, tilde_val
-
-
-def compress_user(path, tilde_expand, tilde_val):
- """Does the opposite of expand_user, with its outputs.
- """
- if tilde_expand:
- return path.replace(tilde_val, '~')
- else:
- return path
-
-
+
+
+def expand_user(path):
+ """Expand '~'-style usernames in strings.
+
+ This is similar to :func:`os.path.expanduser`, but it computes and returns
+ extra information that will be useful if the input was being used in
+ computing completions, and you wish to return the completions with the
+ original '~' instead of its expanded value.
+
+ Parameters
+ ----------
+ path : str
+ String to be expanded. If no ~ is present, the output is the same as the
+ input.
+
+ Returns
+ -------
+ newpath : str
+ Result of ~ expansion in the input path.
+ tilde_expand : bool
+ Whether any expansion was performed or not.
+ tilde_val : str
+ The value that ~ was replaced with.
+ """
+ # Default values
+ tilde_expand = False
+ tilde_val = ''
+ newpath = path
+
+ if path.startswith('~'):
+ tilde_expand = True
+ rest = len(path)-1
+ newpath = os.path.expanduser(path)
+ if rest:
+ tilde_val = newpath[:-rest]
+ else:
+ tilde_val = newpath
+
+ return newpath, tilde_expand, tilde_val
+
+
+def compress_user(path, tilde_expand, tilde_val):
+ """Does the opposite of expand_user, with its outputs.
+ """
+ if tilde_expand:
+ return path.replace(tilde_val, '~')
+ else:
+ return path
+
+
def completions_sorting_key(word):
"""key for sorting completions
-
+
This does several things:
-
+
- Lowercase all completions, so they are sorted alphabetically with
upper and lower case words mingled
- Demote any completions starting with underscores to the end
@@ -145,410 +145,410 @@ def completions_sorting_key(word):
"""
# Case insensitive sort
word = word.lower()
-
+
prio1, prio2 = 0, 0
-
+
if word.startswith('__'):
prio1 = 2
elif word.startswith('_'):
prio1 = 1
-
+
if word.endswith('='):
prio1 = -1
-
+
if word.startswith('%%'):
# If there's another % in there, this is something else, so leave it alone
- if not "%" in word[2:]:
+ if not "%" in word[2:]:
word = word[2:]
prio2 = 2
elif word.startswith('%'):
- if not "%" in word[1:]:
+ if not "%" in word[1:]:
word = word[1:]
prio2 = 1
-
+
return prio1, word, prio2
-
-@undoc
-class Bunch(object): pass
-
-
+
+@undoc
+class Bunch(object): pass
+
+
if sys.platform == 'win32':
DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
else:
DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
-GREEDY_DELIMS = ' =\r\n'
-
-
-class CompletionSplitter(object):
- """An object to split an input line in a manner similar to readline.
-
- By having our own implementation, we can expose readline-like completion in
- a uniform manner to all frontends. This object only needs to be given the
- line of text to be split and the cursor position on said line, and it
- returns the 'word' to be completed on at the cursor after splitting the
- entire line.
-
- What characters are used as splitting delimiters can be controlled by
- setting the `delims` attribute (this is a property that internally
- automatically builds the necessary regular expression)"""
-
- # Private interface
-
- # A string of delimiter characters. The default value makes sense for
- # IPython's most typical usage patterns.
- _delims = DELIMS
-
- # The expression (a normal string) to be compiled into a regular expression
- # for actual splitting. We store it as an attribute mostly for ease of
- # debugging, since this type of code can be so tricky to debug.
- _delim_expr = None
-
- # The regular expression that does the actual splitting
- _delim_re = None
-
- def __init__(self, delims=None):
- delims = CompletionSplitter._delims if delims is None else delims
- self.delims = delims
-
- @property
- def delims(self):
- """Return the string of delimiter characters."""
- return self._delims
-
- @delims.setter
- def delims(self, delims):
- """Set the delimiters for line splitting."""
- expr = '[' + ''.join('\\'+ c for c in delims) + ']'
- self._delim_re = re.compile(expr)
- self._delims = delims
- self._delim_expr = expr
-
- def split_line(self, line, cursor_pos=None):
- """Split a line of text with a cursor at the given position.
- """
- l = line if cursor_pos is None else line[:cursor_pos]
- return self._delim_re.split(l)[-1]
-
-
-class Completer(Configurable):
-
+GREEDY_DELIMS = ' =\r\n'
+
+
+class CompletionSplitter(object):
+ """An object to split an input line in a manner similar to readline.
+
+ By having our own implementation, we can expose readline-like completion in
+ a uniform manner to all frontends. This object only needs to be given the
+ line of text to be split and the cursor position on said line, and it
+ returns the 'word' to be completed on at the cursor after splitting the
+ entire line.
+
+ What characters are used as splitting delimiters can be controlled by
+ setting the `delims` attribute (this is a property that internally
+ automatically builds the necessary regular expression)"""
+
+ # Private interface
+
+ # A string of delimiter characters. The default value makes sense for
+ # IPython's most typical usage patterns.
+ _delims = DELIMS
+
+ # The expression (a normal string) to be compiled into a regular expression
+ # for actual splitting. We store it as an attribute mostly for ease of
+ # debugging, since this type of code can be so tricky to debug.
+ _delim_expr = None
+
+ # The regular expression that does the actual splitting
+ _delim_re = None
+
+ def __init__(self, delims=None):
+ delims = CompletionSplitter._delims if delims is None else delims
+ self.delims = delims
+
+ @property
+ def delims(self):
+ """Return the string of delimiter characters."""
+ return self._delims
+
+ @delims.setter
+ def delims(self, delims):
+ """Set the delimiters for line splitting."""
+ expr = '[' + ''.join('\\'+ c for c in delims) + ']'
+ self._delim_re = re.compile(expr)
+ self._delims = delims
+ self._delim_expr = expr
+
+ def split_line(self, line, cursor_pos=None):
+ """Split a line of text with a cursor at the given position.
+ """
+ l = line if cursor_pos is None else line[:cursor_pos]
+ return self._delim_re.split(l)[-1]
+
+
+class Completer(Configurable):
+
greedy = Bool(False,
- help="""Activate greedy completion
+ help="""Activate greedy completion
PENDING DEPRECTION. this is now mostly taken care of with Jedi.
-
- This will enable completion on elements of lists, results of function calls, etc.,
- but can be unsafe because the code is actually evaluated on TAB.
- """
+
+ This will enable completion on elements of lists, results of function calls, etc.,
+ but can be unsafe because the code is actually evaluated on TAB.
+ """
).tag(config=True)
-
+
backslash_combining_completions = Bool(True,
help="Enable unicode completions, e.g. \\alpha<tab> . "
"Includes completion of latex commands, unicode names, and expanding "
"unicode characters back to latex commands.").tag(config=True)
- def __init__(self, namespace=None, global_namespace=None, **kwargs):
- """Create a new completer for the command line.
-
+ def __init__(self, namespace=None, global_namespace=None, **kwargs):
+ """Create a new completer for the command line.
+
Completer(namespace=ns, global_namespace=ns2) -> completer instance.
-
- If unspecified, the default namespace where completions are performed
- is __main__ (technically, __main__.__dict__). Namespaces should be
- given as dictionaries.
-
- An optional second namespace can be given. This allows the completer
- to handle cases where both the local and global scopes need to be
- distinguished.
-
- Completer instances should be used as the completion mechanism of
- readline via the set_completer() call:
-
- readline.set_completer(Completer(my_namespace).complete)
- """
-
- # Don't bind to namespace quite yet, but flag whether the user wants a
- # specific namespace or to use __main__.__dict__. This will allow us
- # to bind to __main__.__dict__ at completion time, not now.
- if namespace is None:
- self.use_main_ns = 1
- else:
- self.use_main_ns = 0
- self.namespace = namespace
-
- # The global namespace, if given, can be bound directly
- if global_namespace is None:
- self.global_namespace = {}
- else:
- self.global_namespace = global_namespace
-
- super(Completer, self).__init__(**kwargs)
-
- def complete(self, text, state):
- """Return the next possible completion for 'text'.
-
- This is called successively with state == 0, 1, 2, ... until it
- returns None. The completion should begin with 'text'.
-
- """
- if self.use_main_ns:
- self.namespace = __main__.__dict__
-
- if state == 0:
- if "." in text:
- self.matches = self.attr_matches(text)
- else:
- self.matches = self.global_matches(text)
- try:
- return self.matches[state]
- except IndexError:
- return None
-
- def global_matches(self, text):
- """Compute matches when text is a simple name.
-
- Return a list of all keywords, built-in functions and names currently
- defined in self.namespace or self.global_namespace that match.
-
- """
- matches = []
- match_append = matches.append
- n = len(text)
- for lst in [keyword.kwlist,
- builtin_mod.__dict__.keys(),
- self.namespace.keys(),
- self.global_namespace.keys()]:
- for word in lst:
- if word[:n] == text and word != "__builtins__":
- match_append(word)
+
+ If unspecified, the default namespace where completions are performed
+ is __main__ (technically, __main__.__dict__). Namespaces should be
+ given as dictionaries.
+
+ An optional second namespace can be given. This allows the completer
+ to handle cases where both the local and global scopes need to be
+ distinguished.
+
+ Completer instances should be used as the completion mechanism of
+ readline via the set_completer() call:
+
+ readline.set_completer(Completer(my_namespace).complete)
+ """
+
+ # Don't bind to namespace quite yet, but flag whether the user wants a
+ # specific namespace or to use __main__.__dict__. This will allow us
+ # to bind to __main__.__dict__ at completion time, not now.
+ if namespace is None:
+ self.use_main_ns = 1
+ else:
+ self.use_main_ns = 0
+ self.namespace = namespace
+
+ # The global namespace, if given, can be bound directly
+ if global_namespace is None:
+ self.global_namespace = {}
+ else:
+ self.global_namespace = global_namespace
+
+ super(Completer, self).__init__(**kwargs)
+
+ def complete(self, text, state):
+ """Return the next possible completion for 'text'.
+
+ This is called successively with state == 0, 1, 2, ... until it
+ returns None. The completion should begin with 'text'.
+
+ """
+ if self.use_main_ns:
+ self.namespace = __main__.__dict__
+
+ if state == 0:
+ if "." in text:
+ self.matches = self.attr_matches(text)
+ else:
+ self.matches = self.global_matches(text)
+ try:
+ return self.matches[state]
+ except IndexError:
+ return None
+
+ def global_matches(self, text):
+ """Compute matches when text is a simple name.
+
+ Return a list of all keywords, built-in functions and names currently
+ defined in self.namespace or self.global_namespace that match.
+
+ """
+ matches = []
+ match_append = matches.append
+ n = len(text)
+ for lst in [keyword.kwlist,
+ builtin_mod.__dict__.keys(),
+ self.namespace.keys(),
+ self.global_namespace.keys()]:
+ for word in lst:
+ if word[:n] == text and word != "__builtins__":
+ match_append(word)
return [cast_unicode_py2(m) for m in matches]
-
- def attr_matches(self, text):
- """Compute matches when text contains a dot.
-
- Assuming the text is of the form NAME.NAME....[NAME], and is
- evaluatable in self.namespace or self.global_namespace, it will be
- evaluated and its attributes (as revealed by dir()) are used as
- possible completions. (For class instances, class members are are
- also considered.)
-
- WARNING: this can still invoke arbitrary C code, if an object
- with a __getattr__ hook is evaluated.
-
- """
-
- # Another option, seems to work great. Catches things like ''.<tab>
- m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
-
- if m:
- expr, attr = m.group(1, 3)
- elif self.greedy:
- m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
- if not m2:
- return []
- expr, attr = m2.group(1,2)
+
+ def attr_matches(self, text):
+ """Compute matches when text contains a dot.
+
+ Assuming the text is of the form NAME.NAME....[NAME], and is
+ evaluatable in self.namespace or self.global_namespace, it will be
+ evaluated and its attributes (as revealed by dir()) are used as
+ possible completions. (For class instances, class members are are
+ also considered.)
+
+ WARNING: this can still invoke arbitrary C code, if an object
+ with a __getattr__ hook is evaluated.
+
+ """
+
+ # Another option, seems to work great. Catches things like ''.<tab>
+ m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
+
+ if m:
+ expr, attr = m.group(1, 3)
+ elif self.greedy:
+ m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
+ if not m2:
+ return []
+ expr, attr = m2.group(1,2)
+ else:
+ return []
+
+ try:
+ obj = eval(expr, self.namespace)
+ except:
+ try:
+ obj = eval(expr, self.global_namespace)
+ except:
+ return []
+
+ if self.limit_to__all__ and hasattr(obj, '__all__'):
+ words = get__all__entries(obj)
else:
- return []
-
- try:
- obj = eval(expr, self.namespace)
- except:
- try:
- obj = eval(expr, self.global_namespace)
- except:
- return []
-
- if self.limit_to__all__ and hasattr(obj, '__all__'):
- words = get__all__entries(obj)
- else:
- words = dir2(obj)
-
- try:
- words = generics.complete_object(obj, words)
- except TryNext:
- pass
- except Exception:
- # Silence errors from completion function
- #raise # dbg
- pass
- # Build match list to return
- n = len(attr)
+ words = dir2(obj)
+
+ try:
+ words = generics.complete_object(obj, words)
+ except TryNext:
+ pass
+ except Exception:
+ # Silence errors from completion function
+ #raise # dbg
+ pass
+ # Build match list to return
+ n = len(attr)
return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
-
-
-def get__all__entries(obj):
- """returns the strings in the __all__ attribute"""
- try:
- words = getattr(obj, '__all__')
- except:
- return []
-
+
+
+def get__all__entries(obj):
+ """returns the strings in the __all__ attribute"""
+ try:
+ words = getattr(obj, '__all__')
+ except:
+ return []
+
return [cast_unicode_py2(w) for w in words if isinstance(w, string_types)]
-
-
-def match_dict_keys(keys, prefix, delims):
- """Used by dict_key_matches, matching the prefix to a list of keys"""
- if not prefix:
- return None, 0, [repr(k) for k in keys
- if isinstance(k, (string_types, bytes))]
- quote_match = re.search('["\']', prefix)
- quote = quote_match.group()
- try:
- prefix_str = eval(prefix + quote, {})
- except Exception:
- return None, 0, []
-
- pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
- token_match = re.search(pattern, prefix, re.UNICODE)
- token_start = token_match.start()
- token_prefix = token_match.group()
-
- # TODO: support bytes in Py3k
- matched = []
- for key in keys:
- try:
- if not key.startswith(prefix_str):
- continue
- except (AttributeError, TypeError, UnicodeError):
- # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
- continue
-
- # reformat remainder of key to begin with prefix
- rem = key[len(prefix_str):]
- # force repr wrapped in '
- rem_repr = repr(rem + '"')
- if rem_repr.startswith('u') and prefix[0] not in 'uU':
- # Found key is unicode, but prefix is Py2 string.
- # Therefore attempt to interpret key as string.
- try:
- rem_repr = repr(rem.encode('ascii') + '"')
- except UnicodeEncodeError:
- continue
-
- rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
- if quote == '"':
- # The entered prefix is quoted with ",
- # but the match is quoted with '.
- # A contained " hence needs escaping for comparison:
- rem_repr = rem_repr.replace('"', '\\"')
-
- # then reinsert prefix from start of token
- matched.append('%s%s' % (token_prefix, rem_repr))
- return quote, token_start, matched
-
-
-def _safe_isinstance(obj, module, class_name):
- """Checks if obj is an instance of module.class_name if loaded
- """
- return (module in sys.modules and
- isinstance(obj, getattr(__import__(module), class_name)))
-
-
-def back_unicode_name_matches(text):
- u"""Match unicode characters back to unicode name
-
- This does ☃ -> \\snowman
-
- Note that snowman is not a valid python3 combining character but will be expanded.
- Though it will not recombine back to the snowman character by the completion machinery.
-
- This will not either back-complete standard sequences like \\n, \\b ...
-
- Used on Python 3 only.
- """
- if len(text)<2:
- return u'', ()
- maybe_slash = text[-2]
- if maybe_slash != '\\':
- return u'', ()
-
- char = text[-1]
- # no expand on quote for completion in strings.
- # nor backcomplete standard ascii keys
- if char in string.ascii_letters or char in ['"',"'"]:
- return u'', ()
- try :
- unic = unicodedata.name(char)
- return '\\'+char,['\\'+unic]
+
+
+def match_dict_keys(keys, prefix, delims):
+ """Used by dict_key_matches, matching the prefix to a list of keys"""
+ if not prefix:
+ return None, 0, [repr(k) for k in keys
+ if isinstance(k, (string_types, bytes))]
+ quote_match = re.search('["\']', prefix)
+ quote = quote_match.group()
+ try:
+ prefix_str = eval(prefix + quote, {})
+ except Exception:
+ return None, 0, []
+
+ pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
+ token_match = re.search(pattern, prefix, re.UNICODE)
+ token_start = token_match.start()
+ token_prefix = token_match.group()
+
+ # TODO: support bytes in Py3k
+ matched = []
+ for key in keys:
+ try:
+ if not key.startswith(prefix_str):
+ continue
+ except (AttributeError, TypeError, UnicodeError):
+ # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
+ continue
+
+ # reformat remainder of key to begin with prefix
+ rem = key[len(prefix_str):]
+ # force repr wrapped in '
+ rem_repr = repr(rem + '"')
+ if rem_repr.startswith('u') and prefix[0] not in 'uU':
+ # Found key is unicode, but prefix is Py2 string.
+ # Therefore attempt to interpret key as string.
+ try:
+ rem_repr = repr(rem.encode('ascii') + '"')
+ except UnicodeEncodeError:
+ continue
+
+ rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
+ if quote == '"':
+ # The entered prefix is quoted with ",
+ # but the match is quoted with '.
+ # A contained " hence needs escaping for comparison:
+ rem_repr = rem_repr.replace('"', '\\"')
+
+ # then reinsert prefix from start of token
+ matched.append('%s%s' % (token_prefix, rem_repr))
+ return quote, token_start, matched
+
+
+def _safe_isinstance(obj, module, class_name):
+ """Checks if obj is an instance of module.class_name if loaded
+ """
+ return (module in sys.modules and
+ isinstance(obj, getattr(__import__(module), class_name)))
+
+
+def back_unicode_name_matches(text):
+ u"""Match unicode characters back to unicode name
+
+ This does ☃ -> \\snowman
+
+ Note that snowman is not a valid python3 combining character but will be expanded.
+ Though it will not recombine back to the snowman character by the completion machinery.
+
+ This will not either back-complete standard sequences like \\n, \\b ...
+
+ Used on Python 3 only.
+ """
+ if len(text)<2:
+ return u'', ()
+ maybe_slash = text[-2]
+ if maybe_slash != '\\':
+ return u'', ()
+
+ char = text[-1]
+ # no expand on quote for completion in strings.
+ # nor backcomplete standard ascii keys
+ if char in string.ascii_letters or char in ['"',"'"]:
+ return u'', ()
+ try :
+ unic = unicodedata.name(char)
+ return '\\'+char,['\\'+unic]
except KeyError:
- pass
- return u'', ()
-
-def back_latex_name_matches(text):
- u"""Match latex characters back to unicode name
-
- This does ->\\sqrt
-
- Used on Python 3 only.
- """
- if len(text)<2:
- return u'', ()
- maybe_slash = text[-2]
- if maybe_slash != '\\':
- return u'', ()
-
-
- char = text[-1]
- # no expand on quote for completion in strings.
- # nor backcomplete standard ascii keys
- if char in string.ascii_letters or char in ['"',"'"]:
- return u'', ()
- try :
- latex = reverse_latex_symbol[char]
- # '\\' replace the \ as well
- return '\\'+char,[latex]
+ pass
+ return u'', ()
+
+def back_latex_name_matches(text):
+ u"""Match latex characters back to unicode name
+
+ This does ->\\sqrt
+
+ Used on Python 3 only.
+ """
+ if len(text)<2:
+ return u'', ()
+ maybe_slash = text[-2]
+ if maybe_slash != '\\':
+ return u'', ()
+
+
+ char = text[-1]
+ # no expand on quote for completion in strings.
+ # nor backcomplete standard ascii keys
+ if char in string.ascii_letters or char in ['"',"'"]:
+ return u'', ()
+ try :
+ latex = reverse_latex_symbol[char]
+ # '\\' replace the \ as well
+ return '\\'+char,[latex]
except KeyError:
- pass
- return u'', ()
-
-
-class IPCompleter(Completer):
- """Extension of the completer class with IPython-specific features"""
+ pass
+ return u'', ()
+
+
+class IPCompleter(Completer):
+ """Extension of the completer class with IPython-specific features"""
@observe('greedy')
def _greedy_changed(self, change):
- """update the splitter and readline delims when greedy is changed"""
+ """update the splitter and readline delims when greedy is changed"""
if change['new']:
- self.splitter.delims = GREEDY_DELIMS
- else:
- self.splitter.delims = DELIMS
-
- if self.readline:
- self.readline.set_completer_delims(self.splitter.delims)
-
+ self.splitter.delims = GREEDY_DELIMS
+ else:
+ self.splitter.delims = DELIMS
+
+ if self.readline:
+ self.readline.set_completer_delims(self.splitter.delims)
+
merge_completions = Bool(True,
- help="""Whether to merge completion results into a single list
-
- If False, only the completion results from the first non-empty
- completer will be returned.
- """
+ help="""Whether to merge completion results into a single list
+
+ If False, only the completion results from the first non-empty
+ completer will be returned.
+ """
).tag(config=True)
omit__names = Enum((0,1,2), default_value=2,
- help="""Instruct the completer to omit private method names
-
- Specifically, when completing on ``object.<tab>``.
-
- When 2 [default]: all names that start with '_' will be excluded.
-
- When 1: all 'magic' names (``__foo__``) will be excluded.
-
- When 0: nothing will be excluded.
- """
+ help="""Instruct the completer to omit private method names
+
+ Specifically, when completing on ``object.<tab>``.
+
+ When 2 [default]: all names that start with '_' will be excluded.
+
+ When 1: all 'magic' names (``__foo__``) will be excluded.
+
+ When 0: nothing will be excluded.
+ """
).tag(config=True)
limit_to__all__ = Bool(False,
help="""
DEPRECATED as of version 5.0.
-
+
Instruct the completer to use __all__ for the completion
- Specifically, when completing on ``object.<tab>``.
-
- When True: only those names in obj.__all__ will be included.
-
- When False [default]: the __all__ attribute is ignored
+ Specifically, when completing on ``object.<tab>``.
+
+ When True: only those names in obj.__all__ will be included.
+
+ When False [default]: the __all__ attribute is ignored
""",
).tag(config=True)
-
+
@observe('limit_to__all__')
def _limit_to_all_changed(self, change):
warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
@@ -556,352 +556,352 @@ class IPCompleter(Completer):
'no effects and then removed in future version of IPython.',
UserWarning)
- def __init__(self, shell=None, namespace=None, global_namespace=None,
- use_readline=True, config=None, **kwargs):
- """IPCompleter() -> completer
-
- Return a completer object suitable for use by the readline library
- via readline.set_completer().
-
- Inputs:
-
- - shell: a pointer to the ipython shell itself. This is needed
- because this completer knows about magic functions, and those can
- only be accessed via the ipython instance.
-
- - namespace: an optional dict where completions are performed.
-
- - global_namespace: secondary optional dict for completions, to
- handle cases (such as IPython embedded inside functions) where
- both Python scopes are visible.
-
- use_readline : bool, optional
- If true, use the readline library. This completer can still function
- without readline, though in that case callers must provide some extra
- information on each call about the current line."""
-
- self.magic_escape = ESC_MAGIC
- self.splitter = CompletionSplitter()
-
- # Readline configuration, only used by the rlcompleter method.
- if use_readline:
- # We store the right version of readline so that later code
- import IPython.utils.rlineimpl as readline
- self.readline = readline
- else:
- self.readline = None
-
- # _greedy_changed() depends on splitter and readline being defined:
- Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
- config=config, **kwargs)
-
- # List where completion matches will be stored
- self.matches = []
- self.shell = shell
- # Regexp to split filenames with spaces in them
- self.space_name_re = re.compile(r'([^\\] )')
- # Hold a local ref. to glob.glob for speed
- self.glob = glob.glob
-
- # Determine if we are running on 'dumb' terminals, like (X)Emacs
- # buffers, to avoid completion problems.
- term = os.environ.get('TERM','xterm')
- self.dumb_terminal = term in ['dumb','emacs']
-
- # Special handling of backslashes needed in win32 platforms
- if sys.platform == "win32":
- self.clean_glob = self._clean_glob_win32
- else:
- self.clean_glob = self._clean_glob
-
- #regexp to parse docstring for function signature
- self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
- self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
- #use this if positional argument name is also needed
- #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
-
- # All active matcher routines for completion
+ def __init__(self, shell=None, namespace=None, global_namespace=None,
+ use_readline=True, config=None, **kwargs):
+ """IPCompleter() -> completer
+
+ Return a completer object suitable for use by the readline library
+ via readline.set_completer().
+
+ Inputs:
+
+ - shell: a pointer to the ipython shell itself. This is needed
+ because this completer knows about magic functions, and those can
+ only be accessed via the ipython instance.
+
+ - namespace: an optional dict where completions are performed.
+
+ - global_namespace: secondary optional dict for completions, to
+ handle cases (such as IPython embedded inside functions) where
+ both Python scopes are visible.
+
+ use_readline : bool, optional
+ If true, use the readline library. This completer can still function
+ without readline, though in that case callers must provide some extra
+ information on each call about the current line."""
+
+ self.magic_escape = ESC_MAGIC
+ self.splitter = CompletionSplitter()
+
+ # Readline configuration, only used by the rlcompleter method.
+ if use_readline:
+ # We store the right version of readline so that later code
+ import IPython.utils.rlineimpl as readline
+ self.readline = readline
+ else:
+ self.readline = None
+
+ # _greedy_changed() depends on splitter and readline being defined:
+ Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
+ config=config, **kwargs)
+
+ # List where completion matches will be stored
+ self.matches = []
+ self.shell = shell
+ # Regexp to split filenames with spaces in them
+ self.space_name_re = re.compile(r'([^\\] )')
+ # Hold a local ref. to glob.glob for speed
+ self.glob = glob.glob
+
+ # Determine if we are running on 'dumb' terminals, like (X)Emacs
+ # buffers, to avoid completion problems.
+ term = os.environ.get('TERM','xterm')
+ self.dumb_terminal = term in ['dumb','emacs']
+
+ # Special handling of backslashes needed in win32 platforms
+ if sys.platform == "win32":
+ self.clean_glob = self._clean_glob_win32
+ else:
+ self.clean_glob = self._clean_glob
+
+ #regexp to parse docstring for function signature
+ self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
+ self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
+ #use this if positional argument name is also needed
+ #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
+
+ # All active matcher routines for completion
self.matchers = [
self.python_matches,
- self.file_matches,
- self.magic_matches,
- self.python_func_kw_matches,
- self.dict_key_matches,
- ]
-
+ self.file_matches,
+ self.magic_matches,
+ self.python_func_kw_matches,
+ self.dict_key_matches,
+ ]
+
# This is set externally by InteractiveShell
self.custom_completers = None
- def all_completions(self, text):
- """
+ def all_completions(self, text):
+ """
Wrapper around the complete method for the benefit of emacs.
- """
- return self.complete(text)[1]
-
+ """
+ return self.complete(text)[1]
+
def _clean_glob(self, text):
- return self.glob("%s*" % text)
-
- def _clean_glob_win32(self,text):
- return [f.replace("\\","/")
- for f in self.glob("%s*" % text)]
-
- def file_matches(self, text):
- """Match filenames, expanding ~USER type strings.
-
- Most of the seemingly convoluted logic in this completer is an
- attempt to handle filenames with spaces in them. And yet it's not
- quite perfect, because Python's readline doesn't expose all of the
- GNU readline details needed for this to be done correctly.
-
- For a filename with a space in it, the printed completions will be
- only the parts after what's already been typed (instead of the
- full completions, as is normally done). I don't think with the
- current (as of Python 2.3) Python readline it's possible to do
- better."""
-
- # chars that require escaping with backslash - i.e. chars
- # that readline treats incorrectly as delimiters, but we
- # don't want to treat as delimiters in filename matching
- # when escaped with backslash
- if text.startswith('!'):
- text = text[1:]
+ return self.glob("%s*" % text)
+
+ def _clean_glob_win32(self,text):
+ return [f.replace("\\","/")
+ for f in self.glob("%s*" % text)]
+
+ def file_matches(self, text):
+ """Match filenames, expanding ~USER type strings.
+
+ Most of the seemingly convoluted logic in this completer is an
+ attempt to handle filenames with spaces in them. And yet it's not
+ quite perfect, because Python's readline doesn't expose all of the
+ GNU readline details needed for this to be done correctly.
+
+ For a filename with a space in it, the printed completions will be
+ only the parts after what's already been typed (instead of the
+ full completions, as is normally done). I don't think with the
+ current (as of Python 2.3) Python readline it's possible to do
+ better."""
+
+ # chars that require escaping with backslash - i.e. chars
+ # that readline treats incorrectly as delimiters, but we
+ # don't want to treat as delimiters in filename matching
+ # when escaped with backslash
+ if text.startswith('!'):
+ text = text[1:]
text_prefix = u'!'
- else:
+ else:
text_prefix = u''
-
- text_until_cursor = self.text_until_cursor
- # track strings with open quotes
- open_quotes = has_open_quotes(text_until_cursor)
-
- if '(' in text_until_cursor or '[' in text_until_cursor:
- lsplit = text
- else:
- try:
- # arg_split ~ shlex.split, but with unicode bugs fixed by us
- lsplit = arg_split(text_until_cursor)[-1]
- except ValueError:
- # typically an unmatched ", or backslash without escaped char.
- if open_quotes:
- lsplit = text_until_cursor.split(open_quotes)[-1]
- else:
- return []
- except IndexError:
- # tab pressed on empty line
- lsplit = ""
-
- if not open_quotes and lsplit != protect_filename(lsplit):
- # if protectables are found, do matching on the whole escaped name
- has_protectables = True
- text0,text = text,lsplit
- else:
- has_protectables = False
- text = os.path.expanduser(text)
-
- if text == "":
+
+ text_until_cursor = self.text_until_cursor
+ # track strings with open quotes
+ open_quotes = has_open_quotes(text_until_cursor)
+
+ if '(' in text_until_cursor or '[' in text_until_cursor:
+ lsplit = text
+ else:
+ try:
+ # arg_split ~ shlex.split, but with unicode bugs fixed by us
+ lsplit = arg_split(text_until_cursor)[-1]
+ except ValueError:
+ # typically an unmatched ", or backslash without escaped char.
+ if open_quotes:
+ lsplit = text_until_cursor.split(open_quotes)[-1]
+ else:
+ return []
+ except IndexError:
+ # tab pressed on empty line
+ lsplit = ""
+
+ if not open_quotes and lsplit != protect_filename(lsplit):
+ # if protectables are found, do matching on the whole escaped name
+ has_protectables = True
+ text0,text = text,lsplit
+ else:
+ has_protectables = False
+ text = os.path.expanduser(text)
+
+ if text == "":
return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
-
- # Compute the matches from the filesystem
+
+ # Compute the matches from the filesystem
if sys.platform == 'win32':
m0 = self.clean_glob(text)
else:
m0 = self.clean_glob(text.replace('\\', ''))
-
- if has_protectables:
- # If we had protectables, we need to revert our changes to the
- # beginning of filename so that we don't double-write the part
- # of the filename we have so far
- len_lsplit = len(lsplit)
- matches = [text_prefix + text0 +
- protect_filename(f[len_lsplit:]) for f in m0]
- else:
- if open_quotes:
- # if we have a string with an open quote, we don't need to
- # protect the names at all (and we _shouldn't_, as it
- # would cause bugs when the filesystem call is made).
- matches = m0
- else:
- matches = [text_prefix +
- protect_filename(f) for f in m0]
-
- # Mark directories in input list by appending '/' to their names.
+
+ if has_protectables:
+ # If we had protectables, we need to revert our changes to the
+ # beginning of filename so that we don't double-write the part
+ # of the filename we have so far
+ len_lsplit = len(lsplit)
+ matches = [text_prefix + text0 +
+ protect_filename(f[len_lsplit:]) for f in m0]
+ else:
+ if open_quotes:
+ # if we have a string with an open quote, we don't need to
+ # protect the names at all (and we _shouldn't_, as it
+ # would cause bugs when the filesystem call is made).
+ matches = m0
+ else:
+ matches = [text_prefix +
+ protect_filename(f) for f in m0]
+
+ # Mark directories in input list by appending '/' to their names.
return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
-
- def magic_matches(self, text):
- """Match magics"""
- # Get all shell magics now rather than statically, so magics loaded at
- # runtime show up too.
- lsm = self.shell.magics_manager.lsmagic()
- line_magics = lsm['line']
- cell_magics = lsm['cell']
- pre = self.magic_escape
- pre2 = pre+pre
-
- # Completion logic:
- # - user gives %%: only do cell magics
- # - user gives %: do both line and cell magics
- # - no prefix: do both
- # In other words, line magics are skipped if the user gives %% explicitly
- bare_text = text.lstrip(pre)
- comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
- if not text.startswith(pre2):
- comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
+
+ def magic_matches(self, text):
+ """Match magics"""
+ # Get all shell magics now rather than statically, so magics loaded at
+ # runtime show up too.
+ lsm = self.shell.magics_manager.lsmagic()
+ line_magics = lsm['line']
+ cell_magics = lsm['cell']
+ pre = self.magic_escape
+ pre2 = pre+pre
+
+ # Completion logic:
+ # - user gives %%: only do cell magics
+ # - user gives %: do both line and cell magics
+ # - no prefix: do both
+ # In other words, line magics are skipped if the user gives %% explicitly
+ bare_text = text.lstrip(pre)
+ comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
+ if not text.startswith(pre2):
+ comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
return [cast_unicode_py2(c) for c in comp]
-
+
def python_matches(self, text):
- """Match attributes or global python names"""
- if "." in text:
- try:
- matches = self.attr_matches(text)
- if text.endswith('.') and self.omit__names:
- if self.omit__names == 1:
- # true if txt is _not_ a __ name, false otherwise:
- no__name = (lambda txt:
- re.match(r'.*\.__.*?__',txt) is None)
- else:
- # true if txt is _not_ a _ name, false otherwise:
- no__name = (lambda txt:
- re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
- matches = filter(no__name, matches)
- except NameError:
- # catches <undefined attributes>.<tab>
- matches = []
- else:
- matches = self.global_matches(text)
- return matches
-
- def _default_arguments_from_docstring(self, doc):
- """Parse the first line of docstring for call signature.
-
- Docstring should be of the form 'min(iterable[, key=func])\n'.
- It can also parse cython docstring of the form
- 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
- """
- if doc is None:
- return []
-
- #care only the firstline
- line = doc.lstrip().splitlines()[0]
-
- #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
- #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
- sig = self.docstring_sig_re.search(line)
- if sig is None:
- return []
- # iterable[, key=func]' -> ['iterable[' ,' key=func]']
- sig = sig.groups()[0].split(',')
- ret = []
- for s in sig:
- #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
- ret += self.docstring_kwd_re.findall(s)
- return ret
-
- def _default_arguments(self, obj):
- """Return the list of default arguments of obj if it is callable,
- or empty list otherwise."""
- call_obj = obj
- ret = []
- if inspect.isbuiltin(obj):
- pass
- elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
- if inspect.isclass(obj):
- #for cython embededsignature=True the constructor docstring
- #belongs to the object itself not __init__
- ret += self._default_arguments_from_docstring(
- getattr(obj, '__doc__', ''))
- # for classes, check for __init__,__new__
- call_obj = (getattr(obj, '__init__', None) or
- getattr(obj, '__new__', None))
- # for all others, check if they are __call__able
- elif hasattr(obj, '__call__'):
- call_obj = obj.__call__
- ret += self._default_arguments_from_docstring(
- getattr(call_obj, '__doc__', ''))
-
- if PY3:
- _keeps = (inspect.Parameter.KEYWORD_ONLY,
- inspect.Parameter.POSITIONAL_OR_KEYWORD)
- signature = inspect.signature
- else:
- import IPython.utils.signatures
- _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
- IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
- signature = IPython.utils.signatures.signature
-
- try:
- sig = signature(call_obj)
- ret.extend(k for k, v in sig.parameters.items() if
- v.kind in _keeps)
- except ValueError:
- pass
-
- return list(set(ret))
-
- def python_func_kw_matches(self,text):
- """Match named parameters (kwargs) of the last open function"""
-
- if "." in text: # a parameter cannot be dotted
- return []
- try: regexp = self.__funcParamsRegex
- except AttributeError:
- regexp = self.__funcParamsRegex = re.compile(r'''
- '.*?(?<!\\)' | # single quoted strings or
- ".*?(?<!\\)" | # double quoted strings or
- \w+ | # identifier
- \S # other characters
- ''', re.VERBOSE | re.DOTALL)
- # 1. find the nearest identifier that comes before an unclosed
- # parenthesis before the cursor
- # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
- tokens = regexp.findall(self.text_until_cursor)
- tokens.reverse()
- iterTokens = iter(tokens); openPar = 0
-
- for token in iterTokens:
- if token == ')':
- openPar -= 1
- elif token == '(':
- openPar += 1
- if openPar > 0:
- # found the last unclosed parenthesis
- break
- else:
- return []
- # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
- ids = []
- isId = re.compile(r'\w+$').match
-
- while True:
- try:
- ids.append(next(iterTokens))
- if not isId(ids[-1]):
- ids.pop(); break
- if not next(iterTokens) == '.':
- break
- except StopIteration:
- break
- # lookup the candidate callable matches either using global_matches
- # or attr_matches for dotted names
- if len(ids) == 1:
- callableMatches = self.global_matches(ids[0])
- else:
- callableMatches = self.attr_matches('.'.join(ids[::-1]))
- argMatches = []
- for callableMatch in callableMatches:
- try:
- namedArgs = self._default_arguments(eval(callableMatch,
- self.namespace))
- except:
- continue
-
- for namedArg in namedArgs:
- if namedArg.startswith(text):
+ """Match attributes or global python names"""
+ if "." in text:
+ try:
+ matches = self.attr_matches(text)
+ if text.endswith('.') and self.omit__names:
+ if self.omit__names == 1:
+ # true if txt is _not_ a __ name, false otherwise:
+ no__name = (lambda txt:
+ re.match(r'.*\.__.*?__',txt) is None)
+ else:
+ # true if txt is _not_ a _ name, false otherwise:
+ no__name = (lambda txt:
+ re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
+ matches = filter(no__name, matches)
+ except NameError:
+ # catches <undefined attributes>.<tab>
+ matches = []
+ else:
+ matches = self.global_matches(text)
+ return matches
+
+ def _default_arguments_from_docstring(self, doc):
+ """Parse the first line of docstring for call signature.
+
+ Docstring should be of the form 'min(iterable[, key=func])\n'.
+ It can also parse cython docstring of the form
+ 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
+ """
+ if doc is None:
+ return []
+
+ #care only the firstline
+ line = doc.lstrip().splitlines()[0]
+
+ #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
+ #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
+ sig = self.docstring_sig_re.search(line)
+ if sig is None:
+ return []
+ # iterable[, key=func]' -> ['iterable[' ,' key=func]']
+ sig = sig.groups()[0].split(',')
+ ret = []
+ for s in sig:
+ #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
+ ret += self.docstring_kwd_re.findall(s)
+ return ret
+
+ def _default_arguments(self, obj):
+ """Return the list of default arguments of obj if it is callable,
+ or empty list otherwise."""
+ call_obj = obj
+ ret = []
+ if inspect.isbuiltin(obj):
+ pass
+ elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
+ if inspect.isclass(obj):
+ #for cython embededsignature=True the constructor docstring
+ #belongs to the object itself not __init__
+ ret += self._default_arguments_from_docstring(
+ getattr(obj, '__doc__', ''))
+ # for classes, check for __init__,__new__
+ call_obj = (getattr(obj, '__init__', None) or
+ getattr(obj, '__new__', None))
+ # for all others, check if they are __call__able
+ elif hasattr(obj, '__call__'):
+ call_obj = obj.__call__
+ ret += self._default_arguments_from_docstring(
+ getattr(call_obj, '__doc__', ''))
+
+ if PY3:
+ _keeps = (inspect.Parameter.KEYWORD_ONLY,
+ inspect.Parameter.POSITIONAL_OR_KEYWORD)
+ signature = inspect.signature
+ else:
+ import IPython.utils.signatures
+ _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
+ IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
+ signature = IPython.utils.signatures.signature
+
+ try:
+ sig = signature(call_obj)
+ ret.extend(k for k, v in sig.parameters.items() if
+ v.kind in _keeps)
+ except ValueError:
+ pass
+
+ return list(set(ret))
+
+ def python_func_kw_matches(self,text):
+ """Match named parameters (kwargs) of the last open function"""
+
+ if "." in text: # a parameter cannot be dotted
+ return []
+ try: regexp = self.__funcParamsRegex
+ except AttributeError:
+ regexp = self.__funcParamsRegex = re.compile(r'''
+ '.*?(?<!\\)' | # single quoted strings or
+ ".*?(?<!\\)" | # double quoted strings or
+ \w+ | # identifier
+ \S # other characters
+ ''', re.VERBOSE | re.DOTALL)
+ # 1. find the nearest identifier that comes before an unclosed
+ # parenthesis before the cursor
+ # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
+ tokens = regexp.findall(self.text_until_cursor)
+ tokens.reverse()
+ iterTokens = iter(tokens); openPar = 0
+
+ for token in iterTokens:
+ if token == ')':
+ openPar -= 1
+ elif token == '(':
+ openPar += 1
+ if openPar > 0:
+ # found the last unclosed parenthesis
+ break
+ else:
+ return []
+ # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
+ ids = []
+ isId = re.compile(r'\w+$').match
+
+ while True:
+ try:
+ ids.append(next(iterTokens))
+ if not isId(ids[-1]):
+ ids.pop(); break
+ if not next(iterTokens) == '.':
+ break
+ except StopIteration:
+ break
+ # lookup the candidate callable matches either using global_matches
+ # or attr_matches for dotted names
+ if len(ids) == 1:
+ callableMatches = self.global_matches(ids[0])
+ else:
+ callableMatches = self.attr_matches('.'.join(ids[::-1]))
+ argMatches = []
+ for callableMatch in callableMatches:
+ try:
+ namedArgs = self._default_arguments(eval(callableMatch,
+ self.namespace))
+ except:
+ continue
+
+ for namedArg in namedArgs:
+ if namedArg.startswith(text):
argMatches.append(u"%s=" %namedArg)
- return argMatches
-
- def dict_key_matches(self, text):
- "Match string keys in a dictionary, after e.g. 'foo[' "
- def get_keys(obj):
+ return argMatches
+
+ def dict_key_matches(self, text):
+ "Match string keys in a dictionary, after e.g. 'foo[' "
+ def get_keys(obj):
# Objects can define their own completions by defining an
# _ipy_key_completions_() method.
method = get_real_method(obj, '_ipython_key_completions_')
@@ -909,289 +909,289 @@ class IPCompleter(Completer):
return method()
# Special case some common in-memory dict-like types
- if isinstance(obj, dict) or\
- _safe_isinstance(obj, 'pandas', 'DataFrame'):
- try:
- return list(obj.keys())
- except Exception:
- return []
- elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
- _safe_isinstance(obj, 'numpy', 'void'):
- return obj.dtype.names or []
- return []
-
- try:
- regexps = self.__dict_key_regexps
- except AttributeError:
- dict_key_re_fmt = r'''(?x)
- ( # match dict-referring expression wrt greedy setting
- %s
- )
- \[ # open bracket
- \s* # and optional whitespace
- ([uUbB]? # string prefix (r not handled)
- (?: # unclosed string
- '(?:[^']|(?<!\\)\\')*
- |
- "(?:[^"]|(?<!\\)\\")*
- )
- )?
- $
- '''
- regexps = self.__dict_key_regexps = {
- False: re.compile(dict_key_re_fmt % '''
- # identifiers separated by .
- (?!\d)\w+
- (?:\.(?!\d)\w+)*
- '''),
- True: re.compile(dict_key_re_fmt % '''
- .+
- ''')
- }
-
- match = regexps[self.greedy].search(self.text_until_cursor)
- if match is None:
- return []
-
- expr, prefix = match.groups()
- try:
- obj = eval(expr, self.namespace)
- except Exception:
- try:
- obj = eval(expr, self.global_namespace)
- except Exception:
- return []
-
- keys = get_keys(obj)
- if not keys:
- return keys
- closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
- if not matches:
- return matches
-
- # get the cursor position of
- # - the text being completed
- # - the start of the key text
- # - the start of the completion
- text_start = len(self.text_until_cursor) - len(text)
- if prefix:
- key_start = match.start(2)
- completion_start = key_start + token_offset
- else:
- key_start = completion_start = match.end()
-
- # grab the leading prefix, to make sure all completions start with `text`
- if text_start > key_start:
- leading = ''
- else:
- leading = text[text_start:completion_start]
-
- # the index of the `[` character
- bracket_idx = match.end(1)
-
- # append closing quote and bracket as appropriate
- # this is *not* appropriate if the opening quote or bracket is outside
- # the text given to this method
- suf = ''
- continuation = self.line_buffer[len(self.text_until_cursor):]
- if key_start > text_start and closing_quote:
- # quotes were opened inside text, maybe close them
- if continuation.startswith(closing_quote):
- continuation = continuation[len(closing_quote):]
- else:
- suf += closing_quote
- if bracket_idx > text_start:
- # brackets were opened inside text, maybe close them
- if not continuation.startswith(']'):
- suf += ']'
-
- return [leading + k + suf for k in matches]
-
- def unicode_name_matches(self, text):
- u"""Match Latex-like syntax for unicode characters base
- on the name of the character.
-
- This does \\GREEK SMALL LETTER ETA -> η
-
- Works only on valid python 3 identifier, or on combining characters that
- will combine to form a valid identifier.
-
- Used on Python 3 only.
- """
- slashpos = text.rfind('\\')
- if slashpos > -1:
- s = text[slashpos+1:]
- try :
- unic = unicodedata.lookup(s)
- # allow combining chars
- if ('a'+unic).isidentifier():
- return '\\'+s,[unic]
+ if isinstance(obj, dict) or\
+ _safe_isinstance(obj, 'pandas', 'DataFrame'):
+ try:
+ return list(obj.keys())
+ except Exception:
+ return []
+ elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
+ _safe_isinstance(obj, 'numpy', 'void'):
+ return obj.dtype.names or []
+ return []
+
+ try:
+ regexps = self.__dict_key_regexps
+ except AttributeError:
+ dict_key_re_fmt = r'''(?x)
+ ( # match dict-referring expression wrt greedy setting
+ %s
+ )
+ \[ # open bracket
+ \s* # and optional whitespace
+ ([uUbB]? # string prefix (r not handled)
+ (?: # unclosed string
+ '(?:[^']|(?<!\\)\\')*
+ |
+ "(?:[^"]|(?<!\\)\\")*
+ )
+ )?
+ $
+ '''
+ regexps = self.__dict_key_regexps = {
+ False: re.compile(dict_key_re_fmt % '''
+ # identifiers separated by .
+ (?!\d)\w+
+ (?:\.(?!\d)\w+)*
+ '''),
+ True: re.compile(dict_key_re_fmt % '''
+ .+
+ ''')
+ }
+
+ match = regexps[self.greedy].search(self.text_until_cursor)
+ if match is None:
+ return []
+
+ expr, prefix = match.groups()
+ try:
+ obj = eval(expr, self.namespace)
+ except Exception:
+ try:
+ obj = eval(expr, self.global_namespace)
+ except Exception:
+ return []
+
+ keys = get_keys(obj)
+ if not keys:
+ return keys
+ closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
+ if not matches:
+ return matches
+
+ # get the cursor position of
+ # - the text being completed
+ # - the start of the key text
+ # - the start of the completion
+ text_start = len(self.text_until_cursor) - len(text)
+ if prefix:
+ key_start = match.start(2)
+ completion_start = key_start + token_offset
+ else:
+ key_start = completion_start = match.end()
+
+ # grab the leading prefix, to make sure all completions start with `text`
+ if text_start > key_start:
+ leading = ''
+ else:
+ leading = text[text_start:completion_start]
+
+ # the index of the `[` character
+ bracket_idx = match.end(1)
+
+ # append closing quote and bracket as appropriate
+ # this is *not* appropriate if the opening quote or bracket is outside
+ # the text given to this method
+ suf = ''
+ continuation = self.line_buffer[len(self.text_until_cursor):]
+ if key_start > text_start and closing_quote:
+ # quotes were opened inside text, maybe close them
+ if continuation.startswith(closing_quote):
+ continuation = continuation[len(closing_quote):]
+ else:
+ suf += closing_quote
+ if bracket_idx > text_start:
+ # brackets were opened inside text, maybe close them
+ if not continuation.startswith(']'):
+ suf += ']'
+
+ return [leading + k + suf for k in matches]
+
+ def unicode_name_matches(self, text):
+ u"""Match Latex-like syntax for unicode characters base
+ on the name of the character.
+
+ This does \\GREEK SMALL LETTER ETA -> η
+
+ Works only on valid python 3 identifier, or on combining characters that
+ will combine to form a valid identifier.
+
+ Used on Python 3 only.
+ """
+ slashpos = text.rfind('\\')
+ if slashpos > -1:
+ s = text[slashpos+1:]
+ try :
+ unic = unicodedata.lookup(s)
+ # allow combining chars
+ if ('a'+unic).isidentifier():
+ return '\\'+s,[unic]
except KeyError:
- pass
- return u'', []
-
-
-
-
- def latex_matches(self, text):
- u"""Match Latex syntax for unicode characters.
-
- This does both \\alp -> \\alpha and \\alpha -> α
-
- Used on Python 3 only.
- """
- slashpos = text.rfind('\\')
- if slashpos > -1:
- s = text[slashpos:]
- if s in latex_symbols:
- # Try to complete a full latex symbol to unicode
- # \\alpha -> α
- return s, [latex_symbols[s]]
- else:
- # If a user has partially typed a latex symbol, give them
- # a full list of options \al -> [\aleph, \alpha]
- matches = [k for k in latex_symbols if k.startswith(s)]
- return s, matches
- return u'', []
-
- def dispatch_custom_completer(self, text):
+ pass
+ return u'', []
+
+
+
+
+ def latex_matches(self, text):
+ u"""Match Latex syntax for unicode characters.
+
+ This does both \\alp -> \\alpha and \\alpha -> α
+
+ Used on Python 3 only.
+ """
+ slashpos = text.rfind('\\')
+ if slashpos > -1:
+ s = text[slashpos:]
+ if s in latex_symbols:
+ # Try to complete a full latex symbol to unicode
+ # \\alpha -> α
+ return s, [latex_symbols[s]]
+ else:
+ # If a user has partially typed a latex symbol, give them
+ # a full list of options \al -> [\aleph, \alpha]
+ matches = [k for k in latex_symbols if k.startswith(s)]
+ return s, matches
+ return u'', []
+
+ def dispatch_custom_completer(self, text):
if not self.custom_completers:
return
- line = self.line_buffer
- if not line.strip():
- return None
-
- # Create a little structure to pass all the relevant information about
- # the current completion to any custom completer.
- event = Bunch()
- event.line = line
- event.symbol = text
- cmd = line.split(None,1)[0]
- event.command = cmd
- event.text_until_cursor = self.text_until_cursor
-
- # for foo etc, try also to find completer for %foo
- if not cmd.startswith(self.magic_escape):
- try_magic = self.custom_completers.s_matches(
- self.magic_escape + cmd)
- else:
- try_magic = []
-
- for c in itertools.chain(self.custom_completers.s_matches(cmd),
- try_magic,
- self.custom_completers.flat_matches(self.text_until_cursor)):
- try:
- res = c(event)
- if res:
- # first, try case sensitive match
+ line = self.line_buffer
+ if not line.strip():
+ return None
+
+ # Create a little structure to pass all the relevant information about
+ # the current completion to any custom completer.
+ event = Bunch()
+ event.line = line
+ event.symbol = text
+ cmd = line.split(None,1)[0]
+ event.command = cmd
+ event.text_until_cursor = self.text_until_cursor
+
+ # for foo etc, try also to find completer for %foo
+ if not cmd.startswith(self.magic_escape):
+ try_magic = self.custom_completers.s_matches(
+ self.magic_escape + cmd)
+ else:
+ try_magic = []
+
+ for c in itertools.chain(self.custom_completers.s_matches(cmd),
+ try_magic,
+ self.custom_completers.flat_matches(self.text_until_cursor)):
+ try:
+ res = c(event)
+ if res:
+ # first, try case sensitive match
withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
- if withcase:
- return withcase
- # if none, then case insensitive ones are ok too
- text_low = text.lower()
+ if withcase:
+ return withcase
+ # if none, then case insensitive ones are ok too
+ text_low = text.lower()
return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
- except TryNext:
- pass
+ except TryNext:
+ pass
except KeyboardInterrupt:
"""
If custom completer take too long,
let keyboard interrupt abort and return nothing.
"""
break
-
- return None
-
- def complete(self, text=None, line_buffer=None, cursor_pos=None):
- """Find completions for the given text and line context.
-
- Note that both the text and the line_buffer are optional, but at least
- one of them must be given.
-
- Parameters
- ----------
- text : string, optional
- Text to perform the completion on. If not given, the line buffer
- is split using the instance's CompletionSplitter object.
-
- line_buffer : string, optional
- If not given, the completer attempts to obtain the current line
- buffer via readline. This keyword allows clients which are
- requesting for text completions in non-readline contexts to inform
- the completer of the entire text.
-
- cursor_pos : int, optional
- Index of the cursor in the full line buffer. Should be provided by
- remote frontends where kernel has no access to frontend state.
-
- Returns
- -------
- text : str
- Text that was actually used in the completion.
-
- matches : list
- A list of completion matches.
- """
- # if the cursor position isn't given, the only sane assumption we can
- # make is that it's at the end of the line (the common case)
- if cursor_pos is None:
- cursor_pos = len(line_buffer) if text is None else len(text)
-
+
+ return None
+
+ def complete(self, text=None, line_buffer=None, cursor_pos=None):
+ """Find completions for the given text and line context.
+
+ Note that both the text and the line_buffer are optional, but at least
+ one of them must be given.
+
+ Parameters
+ ----------
+ text : string, optional
+ Text to perform the completion on. If not given, the line buffer
+ is split using the instance's CompletionSplitter object.
+
+ line_buffer : string, optional
+ If not given, the completer attempts to obtain the current line
+ buffer via readline. This keyword allows clients which are
+ requesting for text completions in non-readline contexts to inform
+ the completer of the entire text.
+
+ cursor_pos : int, optional
+ Index of the cursor in the full line buffer. Should be provided by
+ remote frontends where kernel has no access to frontend state.
+
+ Returns
+ -------
+ text : str
+ Text that was actually used in the completion.
+
+ matches : list
+ A list of completion matches.
+ """
+ # if the cursor position isn't given, the only sane assumption we can
+ # make is that it's at the end of the line (the common case)
+ if cursor_pos is None:
+ cursor_pos = len(line_buffer) if text is None else len(text)
+
if self.use_main_ns:
self.namespace = __main__.__dict__
-
+
if PY3 and self.backslash_combining_completions:
- base_text = text if not line_buffer else line_buffer[:cursor_pos]
- latex_text, latex_matches = self.latex_matches(base_text)
- if latex_matches:
+ base_text = text if not line_buffer else line_buffer[:cursor_pos]
+ latex_text, latex_matches = self.latex_matches(base_text)
+ if latex_matches:
return latex_text, latex_matches
- name_text = ''
- name_matches = []
- for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
- name_text, name_matches = meth(base_text)
- if name_text:
+ name_text = ''
+ name_matches = []
+ for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
+ name_text, name_matches = meth(base_text)
+ if name_text:
return name_text, name_matches[:MATCHES_LIMIT]
-
- # if text is either None or an empty string, rely on the line buffer
- if not text:
- text = self.splitter.split_line(line_buffer, cursor_pos)
-
- # If no line buffer is given, assume the input text is all there was
- if line_buffer is None:
- line_buffer = text
-
- self.line_buffer = line_buffer
- self.text_until_cursor = self.line_buffer[:cursor_pos]
-
- # Start with a clean slate of completions
- self.matches[:] = []
- custom_res = self.dispatch_custom_completer(text)
- if custom_res is not None:
- # did custom completers produce something?
- self.matches = custom_res
- else:
- # Extend the list of completions with the results of each
- # matcher, so we return results to the user from all
- # namespaces.
- if self.merge_completions:
- self.matches = []
- for matcher in self.matchers:
- try:
- self.matches.extend(matcher(text))
- except:
- # Show the ugly traceback if the matcher causes an
- # exception, but do NOT crash the kernel!
- sys.excepthook(*sys.exc_info())
- else:
- for matcher in self.matchers:
- self.matches = matcher(text)
- if self.matches:
- break
- # FIXME: we should extend our api to return a dict with completions for
- # different types of objects. The rlcomplete() method could then
- # simply collapse the dict into a list for readline, but we'd have
- # richer completion semantics in other evironments.
+
+ # if text is either None or an empty string, rely on the line buffer
+ if not text:
+ text = self.splitter.split_line(line_buffer, cursor_pos)
+
+ # If no line buffer is given, assume the input text is all there was
+ if line_buffer is None:
+ line_buffer = text
+
+ self.line_buffer = line_buffer
+ self.text_until_cursor = self.line_buffer[:cursor_pos]
+
+ # Start with a clean slate of completions
+ self.matches[:] = []
+ custom_res = self.dispatch_custom_completer(text)
+ if custom_res is not None:
+ # did custom completers produce something?
+ self.matches = custom_res
+ else:
+ # Extend the list of completions with the results of each
+ # matcher, so we return results to the user from all
+ # namespaces.
+ if self.merge_completions:
+ self.matches = []
+ for matcher in self.matchers:
+ try:
+ self.matches.extend(matcher(text))
+ except:
+ # Show the ugly traceback if the matcher causes an
+ # exception, but do NOT crash the kernel!
+ sys.excepthook(*sys.exc_info())
+ else:
+ for matcher in self.matchers:
+ self.matches = matcher(text)
+ if self.matches:
+ break
+ # FIXME: we should extend our api to return a dict with completions for
+ # different types of objects. The rlcomplete() method could then
+ # simply collapse the dict into a list for readline, but we'd have
+ # richer completion semantics in other evironments.
self.matches = sorted(set(self.matches), key=completions_sorting_key)[:MATCHES_LIMIT]
-
- return text, self.matches
+
+ return text, self.matches
diff --git a/contrib/python/ipython/py2/IPython/core/completerlib.py b/contrib/python/ipython/py2/IPython/core/completerlib.py
index 4e7ae41941..e736ca73d1 100644
--- a/contrib/python/ipython/py2/IPython/core/completerlib.py
+++ b/contrib/python/ipython/py2/IPython/core/completerlib.py
@@ -1,76 +1,76 @@
-# encoding: utf-8
-"""Implementations for various useful completers.
-
-These are all loaded by default by IPython.
-"""
-#-----------------------------------------------------------------------------
-# Copyright (C) 2010-2011 The IPython Development Team.
-#
-# Distributed under the terms of the BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-from __future__ import print_function
-
-# Stdlib imports
-import glob
-import inspect
+# encoding: utf-8
+"""Implementations for various useful completers.
+
+These are all loaded by default by IPython.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team.
+#
+# Distributed under the terms of the BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+from __future__ import print_function
+
+# Stdlib imports
+import glob
+import inspect
import itertools
-import os
-import re
-import sys
-
-try:
- # Python >= 3.3
- from importlib.machinery import all_suffixes
- _suffixes = all_suffixes()
-except ImportError:
- from imp import get_suffixes
- _suffixes = [ s[0] for s in get_suffixes() ]
-
-# Third-party imports
-from time import time
-from zipimport import zipimporter
-
-# Our own imports
-from IPython.core.completer import expand_user, compress_user
-from IPython.core.error import TryNext
-from IPython.utils._process_common import arg_split
-from IPython.utils.py3compat import string_types
-
-# FIXME: this should be pulled in with the right call via the component system
-from IPython import get_ipython
-
+import os
+import re
+import sys
+
+try:
+ # Python >= 3.3
+ from importlib.machinery import all_suffixes
+ _suffixes = all_suffixes()
+except ImportError:
+ from imp import get_suffixes
+ _suffixes = [ s[0] for s in get_suffixes() ]
+
+# Third-party imports
+from time import time
+from zipimport import zipimporter
+
+# Our own imports
+from IPython.core.completer import expand_user, compress_user
+from IPython.core.error import TryNext
+from IPython.utils._process_common import arg_split
+from IPython.utils.py3compat import string_types
+
+# FIXME: this should be pulled in with the right call via the component system
+from IPython import get_ipython
+
from __res import importer
-#-----------------------------------------------------------------------------
-# Globals and constants
-#-----------------------------------------------------------------------------
-
-# Time in seconds after which the rootmodules will be stored permanently in the
-# ipython ip.db database (kept in the user's .ipython dir).
-TIMEOUT_STORAGE = 2
-
-# Time in seconds after which we give up
-TIMEOUT_GIVEUP = 20
-
-# Regular expression for the python import statement
-import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
- r'(?P<package>[/\\]__init__)?'
- r'(?P<suffix>%s)$' %
- r'|'.join(re.escape(s) for s in _suffixes))
-
-# RE for the ipython %run command (python + ipython scripts)
-magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
-
-#-----------------------------------------------------------------------------
-# Local utilities
-#-----------------------------------------------------------------------------
-
+#-----------------------------------------------------------------------------
+# Globals and constants
+#-----------------------------------------------------------------------------
+
+# Time in seconds after which the rootmodules will be stored permanently in the
+# ipython ip.db database (kept in the user's .ipython dir).
+TIMEOUT_STORAGE = 2
+
+# Time in seconds after which we give up
+TIMEOUT_GIVEUP = 20
+
+# Regular expression for the python import statement
+import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
+ r'(?P<package>[/\\]__init__)?'
+ r'(?P<suffix>%s)$' %
+ r'|'.join(re.escape(s) for s in _suffixes))
+
+# RE for the ipython %run command (python + ipython scripts)
+magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
+
+#-----------------------------------------------------------------------------
+# Local utilities
+#-----------------------------------------------------------------------------
+
arcadia_rootmodules_cache = None
arcadia_modules_cache = None
@@ -116,291 +116,291 @@ def arcadia_get_root_modules():
-def module_list(path):
- """
- Return the list containing the names of the modules available in the given
- folder.
- """
- # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
- if path == '':
- path = '.'
-
- # A few local constants to be used in loops below
- pjoin = os.path.join
-
- if os.path.isdir(path):
- # Build a list of all files in the directory and all files
- # in its subdirectories. For performance reasons, do not
- # recurse more than one level into subdirectories.
- files = []
- for root, dirs, nondirs in os.walk(path, followlinks=True):
- subdir = root[len(path)+1:]
- if subdir:
- files.extend(pjoin(subdir, f) for f in nondirs)
- dirs[:] = [] # Do not recurse into additional subdirectories.
- else:
- files.extend(nondirs)
-
- else:
- try:
- files = list(zipimporter(path)._files.keys())
- except:
- files = []
-
- # Build a list of modules which match the import_re regex.
- modules = []
- for f in files:
- m = import_re.match(f)
- if m:
- modules.append(m.group('name'))
- return list(set(modules))
-
-
-def get_root_modules():
- """
- Returns a list containing the names of all the modules available in the
- folders of the pythonpath.
-
- ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
- """
- ip = get_ipython()
+def module_list(path):
+ """
+ Return the list containing the names of the modules available in the given
+ folder.
+ """
+ # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
+ if path == '':
+ path = '.'
+
+ # A few local constants to be used in loops below
+ pjoin = os.path.join
+
+ if os.path.isdir(path):
+ # Build a list of all files in the directory and all files
+ # in its subdirectories. For performance reasons, do not
+ # recurse more than one level into subdirectories.
+ files = []
+ for root, dirs, nondirs in os.walk(path, followlinks=True):
+ subdir = root[len(path)+1:]
+ if subdir:
+ files.extend(pjoin(subdir, f) for f in nondirs)
+ dirs[:] = [] # Do not recurse into additional subdirectories.
+ else:
+ files.extend(nondirs)
+
+ else:
+ try:
+ files = list(zipimporter(path)._files.keys())
+ except:
+ files = []
+
+ # Build a list of modules which match the import_re regex.
+ modules = []
+ for f in files:
+ m = import_re.match(f)
+ if m:
+ modules.append(m.group('name'))
+ return list(set(modules))
+
+
+def get_root_modules():
+ """
+ Returns a list containing the names of all the modules available in the
+ folders of the pythonpath.
+
+ ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
+ """
+ ip = get_ipython()
if ip is None:
# No global shell instance to store cached list of modules.
# Don't try to scan for modules every time.
return list(sys.builtin_module_names)
- rootmodules_cache = ip.db.get('rootmodules_cache', {})
- rootmodules = list(sys.builtin_module_names)
- start_time = time()
- store = False
- for path in sys.path:
- try:
- modules = rootmodules_cache[path]
- except KeyError:
- modules = module_list(path)
- try:
- modules.remove('__init__')
- except ValueError:
- pass
- if path not in ('', '.'): # cwd modules should not be cached
- rootmodules_cache[path] = modules
- if time() - start_time > TIMEOUT_STORAGE and not store:
- store = True
- print("\nCaching the list of root modules, please wait!")
- print("(This will only be done once - type '%rehashx' to "
- "reset cache!)\n")
- sys.stdout.flush()
- if time() - start_time > TIMEOUT_GIVEUP:
- print("This is taking too long, we give up.\n")
- return []
- rootmodules.extend(modules)
- if store:
- ip.db['rootmodules_cache'] = rootmodules_cache
+ rootmodules_cache = ip.db.get('rootmodules_cache', {})
+ rootmodules = list(sys.builtin_module_names)
+ start_time = time()
+ store = False
+ for path in sys.path:
+ try:
+ modules = rootmodules_cache[path]
+ except KeyError:
+ modules = module_list(path)
+ try:
+ modules.remove('__init__')
+ except ValueError:
+ pass
+ if path not in ('', '.'): # cwd modules should not be cached
+ rootmodules_cache[path] = modules
+ if time() - start_time > TIMEOUT_STORAGE and not store:
+ store = True
+ print("\nCaching the list of root modules, please wait!")
+ print("(This will only be done once - type '%rehashx' to "
+ "reset cache!)\n")
+ sys.stdout.flush()
+ if time() - start_time > TIMEOUT_GIVEUP:
+ print("This is taking too long, we give up.\n")
+ return []
+ rootmodules.extend(modules)
+ if store:
+ ip.db['rootmodules_cache'] = rootmodules_cache
rootmodules = list(set(rootmodules))
- return rootmodules
-
-
-def is_importable(module, attr, only_modules):
- if only_modules:
- return inspect.ismodule(getattr(module, attr))
- else:
- return not(attr[:2] == '__' and attr[-2:] == '__')
-
-def try_import(mod, only_modules=False):
+ return rootmodules
+
+
+def is_importable(module, attr, only_modules):
+ if only_modules:
+ return inspect.ismodule(getattr(module, attr))
+ else:
+ return not(attr[:2] == '__' and attr[-2:] == '__')
+
+def try_import(mod, only_modules=False):
mod = mod.rstrip('.')
- try:
- m = __import__(mod)
- except:
- return []
- mods = mod.split('.')
- for module in mods[1:]:
- m = getattr(m, module)
-
+ try:
+ m = __import__(mod)
+ except:
+ return []
+ mods = mod.split('.')
+ for module in mods[1:]:
+ m = getattr(m, module)
+
filename = getattr(m, '__file__', '')
m_is_init = '__init__' in (filename or '') or filename == mod
-
- completions = []
- if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
- completions.extend( [attr for attr in dir(m) if
- is_importable(m, attr, only_modules)])
-
- completions.extend(getattr(m, '__all__', []))
- if m_is_init:
+
+ completions = []
+ if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
+ completions.extend( [attr for attr in dir(m) if
+ is_importable(m, attr, only_modules)])
+
+ completions.extend(getattr(m, '__all__', []))
+ if m_is_init:
completions.extend(arcadia_module_list(mod))
completions = {c for c in completions if isinstance(c, string_types)}
completions.discard('__init__')
return sorted(completions)
-
-
-#-----------------------------------------------------------------------------
-# Completion-related functions.
-#-----------------------------------------------------------------------------
-
-def quick_completer(cmd, completions):
- """ Easily create a trivial completer for a command.
-
- Takes either a list of completions, or all completions in string (that will
- be split on whitespace).
-
- Example::
-
- [d:\ipython]|1> import ipy_completers
- [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
- [d:\ipython]|3> foo b<TAB>
- bar baz
- [d:\ipython]|3> foo ba
- """
-
- if isinstance(completions, string_types):
- completions = completions.split()
-
- def do_complete(self, event):
- return completions
-
- get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
-
-def module_completion(line):
- """
- Returns a list containing the completion possibilities for an import line.
-
- The line looks like this :
- 'import xml.d'
- 'from xml.dom import'
- """
-
- words = line.split(' ')
- nwords = len(words)
-
- # from whatever <tab> -> 'import '
- if nwords == 3 and words[0] == 'from':
- return ['import ']
-
- # 'from xy<tab>' or 'import xy<tab>'
- if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}) :
- if nwords == 1:
+
+
+#-----------------------------------------------------------------------------
+# Completion-related functions.
+#-----------------------------------------------------------------------------
+
+def quick_completer(cmd, completions):
+ """ Easily create a trivial completer for a command.
+
+ Takes either a list of completions, or all completions in string (that will
+ be split on whitespace).
+
+ Example::
+
+ [d:\ipython]|1> import ipy_completers
+ [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
+ [d:\ipython]|3> foo b<TAB>
+ bar baz
+ [d:\ipython]|3> foo ba
+ """
+
+ if isinstance(completions, string_types):
+ completions = completions.split()
+
+ def do_complete(self, event):
+ return completions
+
+ get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
+
+def module_completion(line):
+ """
+ Returns a list containing the completion possibilities for an import line.
+
+ The line looks like this :
+ 'import xml.d'
+ 'from xml.dom import'
+ """
+
+ words = line.split(' ')
+ nwords = len(words)
+
+ # from whatever <tab> -> 'import '
+ if nwords == 3 and words[0] == 'from':
+ return ['import ']
+
+ # 'from xy<tab>' or 'import xy<tab>'
+ if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}) :
+ if nwords == 1:
return arcadia_get_root_modules()
- mod = words[1].split('.')
- if len(mod) < 2:
+ mod = words[1].split('.')
+ if len(mod) < 2:
return arcadia_get_root_modules()
- completion_list = try_import('.'.join(mod[:-1]), True)
- return ['.'.join(mod[:-1] + [el]) for el in completion_list]
-
- # 'from xyz import abc<tab>'
- if nwords >= 3 and words[0] == 'from':
- mod = words[1]
- return try_import(mod)
-
-#-----------------------------------------------------------------------------
-# Completers
-#-----------------------------------------------------------------------------
-# These all have the func(self, event) signature to be used as custom
-# completers
-
-def module_completer(self,event):
- """Give completions after user has typed 'import ...' or 'from ...'"""
-
- # This works in all versions of python. While 2.5 has
- # pkgutil.walk_packages(), that particular routine is fairly dangerous,
- # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
- # of possibly problematic side effects.
- # This search the folders in the sys.path for available modules.
-
- return module_completion(event.line)
-
-# FIXME: there's a lot of logic common to the run, cd and builtin file
-# completers, that is currently reimplemented in each.
-
-def magic_run_completer(self, event):
- """Complete files that end in .py or .ipy or .ipynb for the %run command.
- """
- comps = arg_split(event.line, strict=False)
- # relpath should be the current token that we need to complete.
- if (len(comps) > 1) and (not event.line.endswith(' ')):
- relpath = comps[-1].strip("'\"")
- else:
- relpath = ''
-
- #print("\nev=", event) # dbg
- #print("rp=", relpath) # dbg
- #print('comps=', comps) # dbg
-
- lglob = glob.glob
- isdir = os.path.isdir
- relpath, tilde_expand, tilde_val = expand_user(relpath)
-
- # Find if the user has already typed the first filename, after which we
- # should complete on all files, since after the first one other files may
- # be arguments to the input script.
-
- if any(magic_run_re.match(c) for c in comps):
- matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
- for f in lglob(relpath+'*')]
- else:
- dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
- pys = [f.replace('\\','/')
- for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
- lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
-
- matches = dirs + pys
-
- #print('run comp:', dirs+pys) # dbg
- return [compress_user(p, tilde_expand, tilde_val) for p in matches]
-
-
-def cd_completer(self, event):
- """Completer function for cd, which only returns directories."""
- ip = get_ipython()
- relpath = event.symbol
-
- #print(event) # dbg
- if event.line.endswith('-b') or ' -b ' in event.line:
- # return only bookmark completions
- bkms = self.db.get('bookmarks', None)
- if bkms:
- return bkms.keys()
- else:
- return []
-
- if event.symbol == '-':
- width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
- # jump in directory history by number
- fmt = '-%0' + width_dh +'d [%s]'
- ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
- if len(ents) > 1:
- return ents
- return []
-
- if event.symbol.startswith('--'):
- return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
-
- # Expand ~ in path and normalize directory separators.
- relpath, tilde_expand, tilde_val = expand_user(relpath)
- relpath = relpath.replace('\\','/')
-
- found = []
- for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
- if os.path.isdir(f)]:
- if ' ' in d:
- # we don't want to deal with any of that, complex code
- # for this is elsewhere
- raise TryNext
-
- found.append(d)
-
- if not found:
- if os.path.isdir(relpath):
- return [compress_user(relpath, tilde_expand, tilde_val)]
-
- # if no completions so far, try bookmarks
- bks = self.db.get('bookmarks',{})
- bkmatches = [s for s in bks if s.startswith(event.symbol)]
- if bkmatches:
- return bkmatches
-
- raise TryNext
-
- return [compress_user(p, tilde_expand, tilde_val) for p in found]
-
-def reset_completer(self, event):
- "A completer for %reset magic"
- return '-f -s in out array dhist'.split()
+ completion_list = try_import('.'.join(mod[:-1]), True)
+ return ['.'.join(mod[:-1] + [el]) for el in completion_list]
+
+ # 'from xyz import abc<tab>'
+ if nwords >= 3 and words[0] == 'from':
+ mod = words[1]
+ return try_import(mod)
+
+#-----------------------------------------------------------------------------
+# Completers
+#-----------------------------------------------------------------------------
+# These all have the func(self, event) signature to be used as custom
+# completers
+
+def module_completer(self,event):
+ """Give completions after user has typed 'import ...' or 'from ...'"""
+
+ # This works in all versions of python. While 2.5 has
+ # pkgutil.walk_packages(), that particular routine is fairly dangerous,
+ # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
+ # of possibly problematic side effects.
+ # This search the folders in the sys.path for available modules.
+
+ return module_completion(event.line)
+
+# FIXME: there's a lot of logic common to the run, cd and builtin file
+# completers, that is currently reimplemented in each.
+
+def magic_run_completer(self, event):
+ """Complete files that end in .py or .ipy or .ipynb for the %run command.
+ """
+ comps = arg_split(event.line, strict=False)
+ # relpath should be the current token that we need to complete.
+ if (len(comps) > 1) and (not event.line.endswith(' ')):
+ relpath = comps[-1].strip("'\"")
+ else:
+ relpath = ''
+
+ #print("\nev=", event) # dbg
+ #print("rp=", relpath) # dbg
+ #print('comps=', comps) # dbg
+
+ lglob = glob.glob
+ isdir = os.path.isdir
+ relpath, tilde_expand, tilde_val = expand_user(relpath)
+
+ # Find if the user has already typed the first filename, after which we
+ # should complete on all files, since after the first one other files may
+ # be arguments to the input script.
+
+ if any(magic_run_re.match(c) for c in comps):
+ matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
+ for f in lglob(relpath+'*')]
+ else:
+ dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
+ pys = [f.replace('\\','/')
+ for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
+ lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
+
+ matches = dirs + pys
+
+ #print('run comp:', dirs+pys) # dbg
+ return [compress_user(p, tilde_expand, tilde_val) for p in matches]
+
+
+def cd_completer(self, event):
+ """Completer function for cd, which only returns directories."""
+ ip = get_ipython()
+ relpath = event.symbol
+
+ #print(event) # dbg
+ if event.line.endswith('-b') or ' -b ' in event.line:
+ # return only bookmark completions
+ bkms = self.db.get('bookmarks', None)
+ if bkms:
+ return bkms.keys()
+ else:
+ return []
+
+ if event.symbol == '-':
+ width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
+ # jump in directory history by number
+ fmt = '-%0' + width_dh +'d [%s]'
+ ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
+ if len(ents) > 1:
+ return ents
+ return []
+
+ if event.symbol.startswith('--'):
+ return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
+
+ # Expand ~ in path and normalize directory separators.
+ relpath, tilde_expand, tilde_val = expand_user(relpath)
+ relpath = relpath.replace('\\','/')
+
+ found = []
+ for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
+ if os.path.isdir(f)]:
+ if ' ' in d:
+ # we don't want to deal with any of that, complex code
+ # for this is elsewhere
+ raise TryNext
+
+ found.append(d)
+
+ if not found:
+ if os.path.isdir(relpath):
+ return [compress_user(relpath, tilde_expand, tilde_val)]
+
+ # if no completions so far, try bookmarks
+ bks = self.db.get('bookmarks',{})
+ bkmatches = [s for s in bks if s.startswith(event.symbol)]
+ if bkmatches:
+ return bkmatches
+
+ raise TryNext
+
+ return [compress_user(p, tilde_expand, tilde_val) for p in found]
+
+def reset_completer(self, event):
+ "A completer for %reset magic"
+ return '-f -s in out array dhist'.split()
diff --git a/contrib/python/ipython/py2/IPython/core/crashhandler.py b/contrib/python/ipython/py2/IPython/core/crashhandler.py
index 8341a61db8..22bbd7ae81 100644
--- a/contrib/python/ipython/py2/IPython/core/crashhandler.py
+++ b/contrib/python/ipython/py2/IPython/core/crashhandler.py
@@ -1,59 +1,59 @@
-# encoding: utf-8
-"""sys.excepthook for IPython itself, leaves a detailed report on disk.
-
-Authors:
-
-* Fernando Perez
-* Brian E. Granger
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-from __future__ import print_function
-
-import os
-import sys
-import traceback
-from pprint import pformat
-
-from IPython.core import ultratb
-from IPython.core.release import author_email
-from IPython.utils.sysinfo import sys_info
-from IPython.utils.py3compat import input, getcwd
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-# Template for the user message.
-_default_message_template = """\
-Oops, {app_name} crashed. We do our best to make it stable, but...
-
-A crash report was automatically generated with the following information:
- - A verbatim copy of the crash traceback.
- - A copy of your input history during this session.
- - Data on your current {app_name} configuration.
-
-It was left in the file named:
-\t'{crash_report_fname}'
-If you can email this file to the developers, the information in it will help
-them in understanding and correcting the problem.
-
-You can mail it to: {contact_name} at {contact_email}
-with the subject '{app_name} Crash Report'.
-
-If you want to do it now, the following command will work (under Unix):
-mail -s '{app_name} Crash Report' {contact_email} < {crash_report_fname}
-
+# encoding: utf-8
+"""sys.excepthook for IPython itself, leaves a detailed report on disk.
+
+Authors:
+
+* Fernando Perez
+* Brian E. Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+from __future__ import print_function
+
+import os
+import sys
+import traceback
+from pprint import pformat
+
+from IPython.core import ultratb
+from IPython.core.release import author_email
+from IPython.utils.sysinfo import sys_info
+from IPython.utils.py3compat import input, getcwd
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+# Template for the user message.
+_default_message_template = """\
+Oops, {app_name} crashed. We do our best to make it stable, but...
+
+A crash report was automatically generated with the following information:
+ - A verbatim copy of the crash traceback.
+ - A copy of your input history during this session.
+ - Data on your current {app_name} configuration.
+
+It was left in the file named:
+\t'{crash_report_fname}'
+If you can email this file to the developers, the information in it will help
+them in understanding and correcting the problem.
+
+You can mail it to: {contact_name} at {contact_email}
+with the subject '{app_name} Crash Report'.
+
+If you want to do it now, the following command will work (under Unix):
+mail -s '{app_name} Crash Report' {contact_email} < {crash_report_fname}
+
In your email, please also include information about:
- The operating system under which the crash happened: Linux, macOS, Windows,
other, and which exact version (for example: Ubuntu 16.04.3, macOS 10.13.2,
@@ -64,163 +64,163 @@ In your email, please also include information about:
input to get the same crash? Ideally, find a minimal yet complete sequence
of instructions that yields the crash.
-To ensure accurate tracking of this issue, please file a report about it at:
-{bug_tracker}
-"""
-
-_lite_message_template = """
-If you suspect this is an IPython bug, please report it at:
- https://github.com/ipython/ipython/issues
-or send an email to the mailing list at {email}
-
-You can print a more detailed traceback right now with "%tb", or use "%debug"
-to interactively debug it.
-
-Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
- {config}Application.verbose_crash=True
-"""
-
-
-class CrashHandler(object):
- """Customizable crash handlers for IPython applications.
-
- Instances of this class provide a :meth:`__call__` method which can be
- used as a ``sys.excepthook``. The :meth:`__call__` signature is::
-
- def __call__(self, etype, evalue, etb)
- """
-
- message_template = _default_message_template
- section_sep = '\n\n'+'*'*75+'\n\n'
-
- def __init__(self, app, contact_name=None, contact_email=None,
- bug_tracker=None, show_crash_traceback=True, call_pdb=False):
- """Create a new crash handler
-
- Parameters
- ----------
- app : Application
- A running :class:`Application` instance, which will be queried at
- crash time for internal information.
-
- contact_name : str
- A string with the name of the person to contact.
-
- contact_email : str
- A string with the email address of the contact.
-
- bug_tracker : str
- A string with the URL for your project's bug tracker.
-
- show_crash_traceback : bool
- If false, don't print the crash traceback on stderr, only generate
- the on-disk report
-
- Non-argument instance attributes:
-
- These instances contain some non-argument attributes which allow for
- further customization of the crash handler's behavior. Please see the
- source for further details.
- """
- self.crash_report_fname = "Crash_report_%s.txt" % app.name
- self.app = app
- self.call_pdb = call_pdb
- #self.call_pdb = True # dbg
- self.show_crash_traceback = show_crash_traceback
- self.info = dict(app_name = app.name,
- contact_name = contact_name,
- contact_email = contact_email,
- bug_tracker = bug_tracker,
- crash_report_fname = self.crash_report_fname)
-
-
- def __call__(self, etype, evalue, etb):
- """Handle an exception, call for compatible with sys.excepthook"""
-
- # do not allow the crash handler to be called twice without reinstalling it
- # this prevents unlikely errors in the crash handling from entering an
- # infinite loop.
- sys.excepthook = sys.__excepthook__
-
- # Report tracebacks shouldn't use color in general (safer for users)
- color_scheme = 'NoColor'
-
- # Use this ONLY for developer debugging (keep commented out for release)
- #color_scheme = 'Linux' # dbg
- try:
- rptdir = self.app.ipython_dir
- except:
- rptdir = getcwd()
- if rptdir is None or not os.path.isdir(rptdir):
- rptdir = getcwd()
- report_name = os.path.join(rptdir,self.crash_report_fname)
- # write the report filename into the instance dict so it can get
- # properly expanded out in the user message template
- self.crash_report_fname = report_name
- self.info['crash_report_fname'] = report_name
- TBhandler = ultratb.VerboseTB(
- color_scheme=color_scheme,
- long_header=1,
- call_pdb=self.call_pdb,
- )
- if self.call_pdb:
- TBhandler(etype,evalue,etb)
- return
- else:
- traceback = TBhandler.text(etype,evalue,etb,context=31)
-
- # print traceback to screen
- if self.show_crash_traceback:
- print(traceback, file=sys.stderr)
-
- # and generate a complete report on disk
- try:
- report = open(report_name,'w')
- except:
- print('Could not create crash report on disk.', file=sys.stderr)
- return
-
- # Inform user on stderr of what happened
- print('\n'+'*'*70+'\n', file=sys.stderr)
- print(self.message_template.format(**self.info), file=sys.stderr)
-
- # Construct report on disk
- report.write(self.make_report(traceback))
- report.close()
- input("Hit <Enter> to quit (your terminal may close):")
-
- def make_report(self,traceback):
- """Return a string containing a crash report."""
-
- sec_sep = self.section_sep
-
- report = ['*'*75+'\n\n'+'IPython post-mortem report\n\n']
- rpt_add = report.append
- rpt_add(sys_info())
-
- try:
- config = pformat(self.app.config)
- rpt_add(sec_sep)
- rpt_add('Application name: %s\n\n' % self.app_name)
- rpt_add('Current user configuration structure:\n\n')
- rpt_add(config)
- except:
- pass
- rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
-
- return ''.join(report)
-
-
-def crash_handler_lite(etype, evalue, tb):
- """a light excepthook, adding a small message to the usual traceback"""
- traceback.print_exception(etype, evalue, tb)
-
- from IPython.core.interactiveshell import InteractiveShell
- if InteractiveShell.initialized():
- # we are in a Shell environment, give %magic example
- config = "%config "
- else:
- # we are not in a shell, show generic config
- config = "c."
- print(_lite_message_template.format(email=author_email, config=config), file=sys.stderr)
-
+To ensure accurate tracking of this issue, please file a report about it at:
+{bug_tracker}
+"""
+
+_lite_message_template = """
+If you suspect this is an IPython bug, please report it at:
+ https://github.com/ipython/ipython/issues
+or send an email to the mailing list at {email}
+
+You can print a more detailed traceback right now with "%tb", or use "%debug"
+to interactively debug it.
+
+Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
+ {config}Application.verbose_crash=True
+"""
+
+
+class CrashHandler(object):
+ """Customizable crash handlers for IPython applications.
+
+ Instances of this class provide a :meth:`__call__` method which can be
+ used as a ``sys.excepthook``. The :meth:`__call__` signature is::
+
+ def __call__(self, etype, evalue, etb)
+ """
+
+ message_template = _default_message_template
+ section_sep = '\n\n'+'*'*75+'\n\n'
+
+ def __init__(self, app, contact_name=None, contact_email=None,
+ bug_tracker=None, show_crash_traceback=True, call_pdb=False):
+ """Create a new crash handler
+
+ Parameters
+ ----------
+ app : Application
+ A running :class:`Application` instance, which will be queried at
+ crash time for internal information.
+
+ contact_name : str
+ A string with the name of the person to contact.
+
+ contact_email : str
+ A string with the email address of the contact.
+
+ bug_tracker : str
+ A string with the URL for your project's bug tracker.
+
+ show_crash_traceback : bool
+ If false, don't print the crash traceback on stderr, only generate
+ the on-disk report
+
+ Non-argument instance attributes:
+
+ These instances contain some non-argument attributes which allow for
+ further customization of the crash handler's behavior. Please see the
+ source for further details.
+ """
+ self.crash_report_fname = "Crash_report_%s.txt" % app.name
+ self.app = app
+ self.call_pdb = call_pdb
+ #self.call_pdb = True # dbg
+ self.show_crash_traceback = show_crash_traceback
+ self.info = dict(app_name = app.name,
+ contact_name = contact_name,
+ contact_email = contact_email,
+ bug_tracker = bug_tracker,
+ crash_report_fname = self.crash_report_fname)
+
+
+ def __call__(self, etype, evalue, etb):
+ """Handle an exception, call for compatible with sys.excepthook"""
+
+ # do not allow the crash handler to be called twice without reinstalling it
+ # this prevents unlikely errors in the crash handling from entering an
+ # infinite loop.
+ sys.excepthook = sys.__excepthook__
+
+ # Report tracebacks shouldn't use color in general (safer for users)
+ color_scheme = 'NoColor'
+
+ # Use this ONLY for developer debugging (keep commented out for release)
+ #color_scheme = 'Linux' # dbg
+ try:
+ rptdir = self.app.ipython_dir
+ except:
+ rptdir = getcwd()
+ if rptdir is None or not os.path.isdir(rptdir):
+ rptdir = getcwd()
+ report_name = os.path.join(rptdir,self.crash_report_fname)
+ # write the report filename into the instance dict so it can get
+ # properly expanded out in the user message template
+ self.crash_report_fname = report_name
+ self.info['crash_report_fname'] = report_name
+ TBhandler = ultratb.VerboseTB(
+ color_scheme=color_scheme,
+ long_header=1,
+ call_pdb=self.call_pdb,
+ )
+ if self.call_pdb:
+ TBhandler(etype,evalue,etb)
+ return
+ else:
+ traceback = TBhandler.text(etype,evalue,etb,context=31)
+
+ # print traceback to screen
+ if self.show_crash_traceback:
+ print(traceback, file=sys.stderr)
+
+ # and generate a complete report on disk
+ try:
+ report = open(report_name,'w')
+ except:
+ print('Could not create crash report on disk.', file=sys.stderr)
+ return
+
+ # Inform user on stderr of what happened
+ print('\n'+'*'*70+'\n', file=sys.stderr)
+ print(self.message_template.format(**self.info), file=sys.stderr)
+
+ # Construct report on disk
+ report.write(self.make_report(traceback))
+ report.close()
+ input("Hit <Enter> to quit (your terminal may close):")
+
+ def make_report(self,traceback):
+ """Return a string containing a crash report."""
+
+ sec_sep = self.section_sep
+
+ report = ['*'*75+'\n\n'+'IPython post-mortem report\n\n']
+ rpt_add = report.append
+ rpt_add(sys_info())
+
+ try:
+ config = pformat(self.app.config)
+ rpt_add(sec_sep)
+ rpt_add('Application name: %s\n\n' % self.app_name)
+ rpt_add('Current user configuration structure:\n\n')
+ rpt_add(config)
+ except:
+ pass
+ rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
+
+ return ''.join(report)
+
+
+def crash_handler_lite(etype, evalue, tb):
+ """a light excepthook, adding a small message to the usual traceback"""
+ traceback.print_exception(etype, evalue, tb)
+
+ from IPython.core.interactiveshell import InteractiveShell
+ if InteractiveShell.initialized():
+ # we are in a Shell environment, give %magic example
+ config = "%config "
+ else:
+ # we are not in a shell, show generic config
+ config = "c."
+ print(_lite_message_template.format(email=author_email, config=config), file=sys.stderr)
+
diff --git a/contrib/python/ipython/py2/IPython/core/debugger.py b/contrib/python/ipython/py2/IPython/core/debugger.py
index c5a443eb5b..f08cfb1a78 100644
--- a/contrib/python/ipython/py2/IPython/core/debugger.py
+++ b/contrib/python/ipython/py2/IPython/core/debugger.py
@@ -1,53 +1,53 @@
-# -*- coding: utf-8 -*-
-"""
-Pdb debugger class.
-
-Modified from the standard pdb.Pdb class to avoid including readline, so that
-the command line completion of other programs which include this isn't
-damaged.
-
-In the future, this class will be expanded with improvements over the standard
-pdb.
-
-The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
-changes. Licensing should therefore be under the standard Python terms. For
-details on the PSF (Python Software Foundation) standard license, see:
-
+# -*- coding: utf-8 -*-
+"""
+Pdb debugger class.
+
+Modified from the standard pdb.Pdb class to avoid including readline, so that
+the command line completion of other programs which include this isn't
+damaged.
+
+In the future, this class will be expanded with improvements over the standard
+pdb.
+
+The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
+changes. Licensing should therefore be under the standard Python terms. For
+details on the PSF (Python Software Foundation) standard license, see:
+
https://docs.python.org/2/license.html
"""
-
-#*****************************************************************************
-#
-# This file is licensed under the PSF license.
-#
-# Copyright (C) 2001 Python Software Foundation, www.python.org
-# Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
-#
-#
-#*****************************************************************************
-from __future__ import print_function
-
-import bdb
-import functools
-import inspect
-import sys
+
+#*****************************************************************************
+#
+# This file is licensed under the PSF license.
+#
+# Copyright (C) 2001 Python Software Foundation, www.python.org
+# Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
+#
+#
+#*****************************************************************************
+from __future__ import print_function
+
+import bdb
+import functools
+import inspect
+import sys
import warnings
-
-from IPython import get_ipython
-from IPython.utils import PyColorize, ulinecache
+
+from IPython import get_ipython
+from IPython.utils import PyColorize, ulinecache
from IPython.utils import coloransi, py3compat
-from IPython.core.excolors import exception_colors
-from IPython.testing.skipdoctest import skip_doctest
-
+from IPython.core.excolors import exception_colors
+from IPython.testing.skipdoctest import skip_doctest
+
-prompt = 'ipdb> '
+prompt = 'ipdb> '
-#We have to check this directly from sys.argv, config struct not yet available
+#We have to check this directly from sys.argv, config struct not yet available
from pdb import Pdb as OldPdb
-
-# Allow the set_trace code to operate outside of an ipython instance, even if
-# it does so with some limitations. The rest of this support is implemented in
-# the Tracer constructor.
+
+# Allow the set_trace code to operate outside of an ipython instance, even if
+# it does so with some limitations. The rest of this support is implemented in
+# the Tracer constructor.
def make_arrow(pad):
"""generate the leading arrow in front of traceback or debugger"""
@@ -58,555 +58,555 @@ def make_arrow(pad):
return ''
-def BdbQuit_excepthook(et, ev, tb, excepthook=None):
- """Exception hook which handles `BdbQuit` exceptions.
-
- All other exceptions are processed using the `excepthook`
- parameter.
- """
+def BdbQuit_excepthook(et, ev, tb, excepthook=None):
+ """Exception hook which handles `BdbQuit` exceptions.
+
+ All other exceptions are processed using the `excepthook`
+ parameter.
+ """
warnings.warn("`BdbQuit_excepthook` is deprecated since version 5.1",
DeprecationWarning, stacklevel=2)
- if et==bdb.BdbQuit:
- print('Exiting Debugger.')
- elif excepthook is not None:
- excepthook(et, ev, tb)
- else:
- # Backwards compatibility. Raise deprecation warning?
- BdbQuit_excepthook.excepthook_ori(et,ev,tb)
-
-
-def BdbQuit_IPython_excepthook(self,et,ev,tb,tb_offset=None):
+ if et==bdb.BdbQuit:
+ print('Exiting Debugger.')
+ elif excepthook is not None:
+ excepthook(et, ev, tb)
+ else:
+ # Backwards compatibility. Raise deprecation warning?
+ BdbQuit_excepthook.excepthook_ori(et,ev,tb)
+
+
+def BdbQuit_IPython_excepthook(self,et,ev,tb,tb_offset=None):
warnings.warn(
"`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
DeprecationWarning, stacklevel=2)
- print('Exiting Debugger.')
-
-
-class Tracer(object):
+ print('Exiting Debugger.')
+
+
+class Tracer(object):
"""
DEPRECATED
-
+
Class for local debugging, similar to pdb.set_trace.
- Instances of this class, when called, behave like pdb.set_trace, but
- providing IPython's enhanced capabilities.
-
- This is implemented as a class which must be initialized in your own code
- and not as a standalone function because we need to detect at runtime
- whether IPython is already active or not. That detection is done in the
- constructor, ensuring that this code plays nicely with a running IPython,
- while functioning acceptably (though with limitations) if outside of it.
- """
-
- @skip_doctest
- def __init__(self, colors=None):
+ Instances of this class, when called, behave like pdb.set_trace, but
+ providing IPython's enhanced capabilities.
+
+ This is implemented as a class which must be initialized in your own code
+ and not as a standalone function because we need to detect at runtime
+ whether IPython is already active or not. That detection is done in the
+ constructor, ensuring that this code plays nicely with a running IPython,
+ while functioning acceptably (though with limitations) if outside of it.
+ """
+
+ @skip_doctest
+ def __init__(self, colors=None):
"""
DEPRECATED
-
+
Create a local debugger instance.
- Parameters
- ----------
-
- colors : str, optional
- The name of the color scheme to use, it must be one of IPython's
- valid color schemes. If not given, the function will default to
- the current IPython scheme when running inside IPython, and to
- 'NoColor' otherwise.
-
- Examples
- --------
- ::
-
- from IPython.core.debugger import Tracer; debug_here = Tracer()
-
- Later in your code::
-
- debug_here() # -> will open up the debugger at that point.
-
- Once the debugger activates, you can use all of its regular commands to
- step through code, set breakpoints, etc. See the pdb documentation
- from the Python standard library for usage details.
- """
+ Parameters
+ ----------
+
+ colors : str, optional
+ The name of the color scheme to use, it must be one of IPython's
+ valid color schemes. If not given, the function will default to
+ the current IPython scheme when running inside IPython, and to
+ 'NoColor' otherwise.
+
+ Examples
+ --------
+ ::
+
+ from IPython.core.debugger import Tracer; debug_here = Tracer()
+
+ Later in your code::
+
+ debug_here() # -> will open up the debugger at that point.
+
+ Once the debugger activates, you can use all of its regular commands to
+ step through code, set breakpoints, etc. See the pdb documentation
+ from the Python standard library for usage details.
+ """
warnings.warn("`Tracer` is deprecated since version 5.1, directly use "
"`IPython.core.debugger.Pdb.set_trace()`",
DeprecationWarning, stacklevel=2)
-
- ip = get_ipython()
- if ip is None:
- # Outside of ipython, we set our own exception hook manually
- sys.excepthook = functools.partial(BdbQuit_excepthook,
- excepthook=sys.excepthook)
- def_colors = 'NoColor'
- else:
- # In ipython, we use its custom exception handler mechanism
- def_colors = ip.colors
- ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
-
- if colors is None:
- colors = def_colors
-
- # The stdlib debugger internally uses a modified repr from the `repr`
- # module, that limits the length of printed strings to a hardcoded
- # limit of 30 characters. That much trimming is too aggressive, let's
- # at least raise that limit to 80 chars, which should be enough for
- # most interactive uses.
- try:
- try:
- from reprlib import aRepr # Py 3
- except ImportError:
- from repr import aRepr # Py 2
- aRepr.maxstring = 80
- except:
- # This is only a user-facing convenience, so any error we encounter
- # here can be warned about but can be otherwise ignored. These
- # printouts will tell us about problems if this API changes
- import traceback
- traceback.print_exc()
-
- self.debugger = Pdb(colors)
-
- def __call__(self):
- """Starts an interactive debugger at the point where called.
-
- This is similar to the pdb.set_trace() function from the std lib, but
- using IPython's enhanced debugger."""
-
- self.debugger.set_trace(sys._getframe().f_back)
-
-
-def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
- """Make new_fn have old_fn's doc string. This is particularly useful
- for the ``do_...`` commands that hook into the help system.
- Adapted from from a comp.lang.python posting
- by Duncan Booth."""
- def wrapper(*args, **kw):
- return new_fn(*args, **kw)
- if old_fn.__doc__:
- wrapper.__doc__ = old_fn.__doc__ + additional_text
- return wrapper
-
-
-def _file_lines(fname):
- """Return the contents of a named file as a list of lines.
-
- This function never raises an IOError exception: if the file can't be
- read, it simply returns an empty list."""
-
- try:
- outfile = open(fname)
- except IOError:
- return []
- else:
- out = outfile.readlines()
- outfile.close()
- return out
-
-
+
+ ip = get_ipython()
+ if ip is None:
+ # Outside of ipython, we set our own exception hook manually
+ sys.excepthook = functools.partial(BdbQuit_excepthook,
+ excepthook=sys.excepthook)
+ def_colors = 'NoColor'
+ else:
+ # In ipython, we use its custom exception handler mechanism
+ def_colors = ip.colors
+ ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
+
+ if colors is None:
+ colors = def_colors
+
+ # The stdlib debugger internally uses a modified repr from the `repr`
+ # module, that limits the length of printed strings to a hardcoded
+ # limit of 30 characters. That much trimming is too aggressive, let's
+ # at least raise that limit to 80 chars, which should be enough for
+ # most interactive uses.
+ try:
+ try:
+ from reprlib import aRepr # Py 3
+ except ImportError:
+ from repr import aRepr # Py 2
+ aRepr.maxstring = 80
+ except:
+ # This is only a user-facing convenience, so any error we encounter
+ # here can be warned about but can be otherwise ignored. These
+ # printouts will tell us about problems if this API changes
+ import traceback
+ traceback.print_exc()
+
+ self.debugger = Pdb(colors)
+
+ def __call__(self):
+ """Starts an interactive debugger at the point where called.
+
+ This is similar to the pdb.set_trace() function from the std lib, but
+ using IPython's enhanced debugger."""
+
+ self.debugger.set_trace(sys._getframe().f_back)
+
+
+def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
+ """Make new_fn have old_fn's doc string. This is particularly useful
+ for the ``do_...`` commands that hook into the help system.
+ Adapted from from a comp.lang.python posting
+ by Duncan Booth."""
+ def wrapper(*args, **kw):
+ return new_fn(*args, **kw)
+ if old_fn.__doc__:
+ wrapper.__doc__ = old_fn.__doc__ + additional_text
+ return wrapper
+
+
+def _file_lines(fname):
+ """Return the contents of a named file as a list of lines.
+
+ This function never raises an IOError exception: if the file can't be
+ read, it simply returns an empty list."""
+
+ try:
+ outfile = open(fname)
+ except IOError:
+ return []
+ else:
+ out = outfile.readlines()
+ outfile.close()
+ return out
+
+
class Pdb(OldPdb):
"""Modified Pdb class, does not load readline.
-
+
for a standalone version that uses prompt_toolkit, see
`IPython.terminal.debugger.TerminalPdb` and
`IPython.terminal.debugger.set_trace()`
"""
def __init__(self, color_scheme=None, completekey=None,
- stdin=None, stdout=None, context=5):
-
- # Parent constructor:
- try:
+ stdin=None, stdout=None, context=5):
+
+ # Parent constructor:
+ try:
self.context = int(context)
- if self.context <= 0:
- raise ValueError("Context must be a positive integer")
- except (TypeError, ValueError):
- raise ValueError("Context must be a positive integer")
-
+ if self.context <= 0:
+ raise ValueError("Context must be a positive integer")
+ except (TypeError, ValueError):
+ raise ValueError("Context must be a positive integer")
+
OldPdb.__init__(self, completekey, stdin, stdout)
-
- # IPython changes...
- self.shell = get_ipython()
-
- if self.shell is None:
+
+ # IPython changes...
+ self.shell = get_ipython()
+
+ if self.shell is None:
save_main = sys.modules['__main__']
- # No IPython instance running, we must create one
- from IPython.terminal.interactiveshell import \
- TerminalInteractiveShell
- self.shell = TerminalInteractiveShell.instance()
+ # No IPython instance running, we must create one
+ from IPython.terminal.interactiveshell import \
+ TerminalInteractiveShell
+ self.shell = TerminalInteractiveShell.instance()
# needed by any code which calls __import__("__main__") after
# the debugger was entered. See also #9941.
sys.modules['__main__'] = save_main
-
+
if color_scheme is not None:
warnings.warn(
"The `color_scheme` argument is deprecated since version 5.1",
DeprecationWarning)
else:
color_scheme = self.shell.colors
-
- self.aliases = {}
-
- # Create color table: we copy the default one from the traceback
- # module and add a few attributes needed for debugging
- self.color_scheme_table = exception_colors()
-
- # shorthands
- C = coloransi.TermColors
- cst = self.color_scheme_table
-
- cst['NoColor'].colors.prompt = C.NoColor
- cst['NoColor'].colors.breakpoint_enabled = C.NoColor
- cst['NoColor'].colors.breakpoint_disabled = C.NoColor
-
- cst['Linux'].colors.prompt = C.Green
- cst['Linux'].colors.breakpoint_enabled = C.LightRed
- cst['Linux'].colors.breakpoint_disabled = C.Red
-
- cst['LightBG'].colors.prompt = C.Blue
- cst['LightBG'].colors.breakpoint_enabled = C.LightRed
- cst['LightBG'].colors.breakpoint_disabled = C.Red
-
+
+ self.aliases = {}
+
+ # Create color table: we copy the default one from the traceback
+ # module and add a few attributes needed for debugging
+ self.color_scheme_table = exception_colors()
+
+ # shorthands
+ C = coloransi.TermColors
+ cst = self.color_scheme_table
+
+ cst['NoColor'].colors.prompt = C.NoColor
+ cst['NoColor'].colors.breakpoint_enabled = C.NoColor
+ cst['NoColor'].colors.breakpoint_disabled = C.NoColor
+
+ cst['Linux'].colors.prompt = C.Green
+ cst['Linux'].colors.breakpoint_enabled = C.LightRed
+ cst['Linux'].colors.breakpoint_disabled = C.Red
+
+ cst['LightBG'].colors.prompt = C.Blue
+ cst['LightBG'].colors.breakpoint_enabled = C.LightRed
+ cst['LightBG'].colors.breakpoint_disabled = C.Red
+
cst['Neutral'].colors.prompt = C.Blue
cst['Neutral'].colors.breakpoint_enabled = C.LightRed
cst['Neutral'].colors.breakpoint_disabled = C.Red
- self.set_colors(color_scheme)
-
- # Add a python parser so we can syntax highlight source while
- # debugging.
- self.parser = PyColorize.Parser()
-
+ self.set_colors(color_scheme)
+
+ # Add a python parser so we can syntax highlight source while
+ # debugging.
+ self.parser = PyColorize.Parser()
+
# Set the prompt - the default prompt is '(Pdb)'
self.prompt = prompt
-
- def set_colors(self, scheme):
- """Shorthand access to the color table scheme selector method."""
- self.color_scheme_table.set_active_scheme(scheme)
-
- def interaction(self, frame, traceback):
+
+ def set_colors(self, scheme):
+ """Shorthand access to the color table scheme selector method."""
+ self.color_scheme_table.set_active_scheme(scheme)
+
+ def interaction(self, frame, traceback):
try:
OldPdb.interaction(self, frame, traceback)
except KeyboardInterrupt:
sys.stdout.write('\n' + self.shell.get_exception_only())
-
- def new_do_up(self, arg):
- OldPdb.do_up(self, arg)
- do_u = do_up = decorate_fn_with_doc(new_do_up, OldPdb.do_up)
-
- def new_do_down(self, arg):
- OldPdb.do_down(self, arg)
-
- do_d = do_down = decorate_fn_with_doc(new_do_down, OldPdb.do_down)
-
- def new_do_frame(self, arg):
- OldPdb.do_frame(self, arg)
-
- def new_do_quit(self, arg):
-
- if hasattr(self, 'old_all_completions'):
- self.shell.Completer.all_completions=self.old_all_completions
-
- return OldPdb.do_quit(self, arg)
-
- do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
-
- def new_do_restart(self, arg):
- """Restart command. In the context of ipython this is exactly the same
- thing as 'quit'."""
- self.msg("Restart doesn't make sense here. Using 'quit' instead.")
- return self.do_quit(arg)
-
- def print_stack_trace(self, context=None):
- if context is None:
- context = self.context
- try:
- context=int(context)
- if context <= 0:
- raise ValueError("Context must be a positive integer")
- except (TypeError, ValueError):
- raise ValueError("Context must be a positive integer")
- try:
- for frame_lineno in self.stack:
- self.print_stack_entry(frame_lineno, context=context)
- except KeyboardInterrupt:
- pass
-
+
+ def new_do_up(self, arg):
+ OldPdb.do_up(self, arg)
+ do_u = do_up = decorate_fn_with_doc(new_do_up, OldPdb.do_up)
+
+ def new_do_down(self, arg):
+ OldPdb.do_down(self, arg)
+
+ do_d = do_down = decorate_fn_with_doc(new_do_down, OldPdb.do_down)
+
+ def new_do_frame(self, arg):
+ OldPdb.do_frame(self, arg)
+
+ def new_do_quit(self, arg):
+
+ if hasattr(self, 'old_all_completions'):
+ self.shell.Completer.all_completions=self.old_all_completions
+
+ return OldPdb.do_quit(self, arg)
+
+ do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
+
+ def new_do_restart(self, arg):
+ """Restart command. In the context of ipython this is exactly the same
+ thing as 'quit'."""
+ self.msg("Restart doesn't make sense here. Using 'quit' instead.")
+ return self.do_quit(arg)
+
+ def print_stack_trace(self, context=None):
+ if context is None:
+ context = self.context
+ try:
+ context=int(context)
+ if context <= 0:
+ raise ValueError("Context must be a positive integer")
+ except (TypeError, ValueError):
+ raise ValueError("Context must be a positive integer")
+ try:
+ for frame_lineno in self.stack:
+ self.print_stack_entry(frame_lineno, context=context)
+ except KeyboardInterrupt:
+ pass
+
def print_stack_entry(self,frame_lineno, prompt_prefix='\n-> ',
- context=None):
- if context is None:
- context = self.context
- try:
- context=int(context)
- if context <= 0:
- raise ValueError("Context must be a positive integer")
- except (TypeError, ValueError):
- raise ValueError("Context must be a positive integer")
+ context=None):
+ if context is None:
+ context = self.context
+ try:
+ context=int(context)
+ if context <= 0:
+ raise ValueError("Context must be a positive integer")
+ except (TypeError, ValueError):
+ raise ValueError("Context must be a positive integer")
print(self.format_stack_entry(frame_lineno, '', context))
-
- # vds: >>
- frame, lineno = frame_lineno
- filename = frame.f_code.co_filename
- self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
- # vds: <<
-
- def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
- if context is None:
- context = self.context
- try:
- context=int(context)
- if context <= 0:
- print("Context must be a positive integer")
- except (TypeError, ValueError):
- print("Context must be a positive integer")
- try:
- import reprlib # Py 3
- except ImportError:
- import repr as reprlib # Py 2
-
- ret = []
-
- Colors = self.color_scheme_table.active_colors
- ColorsNormal = Colors.Normal
- tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
- tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
- tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
- tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
- ColorsNormal)
-
- frame, lineno = frame_lineno
-
- return_value = ''
- if '__return__' in frame.f_locals:
- rv = frame.f_locals['__return__']
- #return_value += '->'
- return_value += reprlib.repr(rv) + '\n'
- ret.append(return_value)
-
- #s = filename + '(' + `lineno` + ')'
- filename = self.canonic(frame.f_code.co_filename)
- link = tpl_link % py3compat.cast_unicode(filename)
-
- if frame.f_code.co_name:
- func = frame.f_code.co_name
- else:
- func = "<lambda>"
-
- call = ''
- if func != '?':
- if '__args__' in frame.f_locals:
- args = reprlib.repr(frame.f_locals['__args__'])
- else:
- args = '()'
- call = tpl_call % (func, args)
-
- # The level info should be generated in the same format pdb uses, to
- # avoid breaking the pdbtrack functionality of python-mode in *emacs.
- if frame is self.curframe:
- ret.append('> ')
- else:
- ret.append(' ')
- ret.append(u'%s(%s)%s\n' % (link,lineno,call))
-
- start = lineno - 1 - context//2
- lines = ulinecache.getlines(filename)
- start = min(start, len(lines) - context)
- start = max(start, 0)
- lines = lines[start : start + context]
-
- for i,line in enumerate(lines):
- show_arrow = (start + 1 + i == lineno)
- linetpl = (frame is self.curframe or show_arrow) \
- and tpl_line_em \
- or tpl_line
- ret.append(self.__format_line(linetpl, filename,
- start + 1 + i, line,
- arrow = show_arrow) )
- return ''.join(ret)
-
- def __format_line(self, tpl_line, filename, lineno, line, arrow = False):
- bp_mark = ""
- bp_mark_color = ""
-
- scheme = self.color_scheme_table.active_scheme_name
- new_line, err = self.parser.format2(line, 'str', scheme)
- if not err: line = new_line
-
- bp = None
- if lineno in self.get_file_breaks(filename):
- bps = self.get_breaks(filename, lineno)
- bp = bps[-1]
-
- if bp:
- Colors = self.color_scheme_table.active_colors
- bp_mark = str(bp.number)
- bp_mark_color = Colors.breakpoint_enabled
- if not bp.enabled:
- bp_mark_color = Colors.breakpoint_disabled
-
- numbers_width = 7
- if arrow:
- # This is the line with the error
- pad = numbers_width - len(str(lineno)) - len(bp_mark)
+
+ # vds: >>
+ frame, lineno = frame_lineno
+ filename = frame.f_code.co_filename
+ self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
+ # vds: <<
+
+ def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
+ if context is None:
+ context = self.context
+ try:
+ context=int(context)
+ if context <= 0:
+ print("Context must be a positive integer")
+ except (TypeError, ValueError):
+ print("Context must be a positive integer")
+ try:
+ import reprlib # Py 3
+ except ImportError:
+ import repr as reprlib # Py 2
+
+ ret = []
+
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
+ tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
+ tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
+ tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
+ ColorsNormal)
+
+ frame, lineno = frame_lineno
+
+ return_value = ''
+ if '__return__' in frame.f_locals:
+ rv = frame.f_locals['__return__']
+ #return_value += '->'
+ return_value += reprlib.repr(rv) + '\n'
+ ret.append(return_value)
+
+ #s = filename + '(' + `lineno` + ')'
+ filename = self.canonic(frame.f_code.co_filename)
+ link = tpl_link % py3compat.cast_unicode(filename)
+
+ if frame.f_code.co_name:
+ func = frame.f_code.co_name
+ else:
+ func = "<lambda>"
+
+ call = ''
+ if func != '?':
+ if '__args__' in frame.f_locals:
+ args = reprlib.repr(frame.f_locals['__args__'])
+ else:
+ args = '()'
+ call = tpl_call % (func, args)
+
+ # The level info should be generated in the same format pdb uses, to
+ # avoid breaking the pdbtrack functionality of python-mode in *emacs.
+ if frame is self.curframe:
+ ret.append('> ')
+ else:
+ ret.append(' ')
+ ret.append(u'%s(%s)%s\n' % (link,lineno,call))
+
+ start = lineno - 1 - context//2
+ lines = ulinecache.getlines(filename)
+ start = min(start, len(lines) - context)
+ start = max(start, 0)
+ lines = lines[start : start + context]
+
+ for i,line in enumerate(lines):
+ show_arrow = (start + 1 + i == lineno)
+ linetpl = (frame is self.curframe or show_arrow) \
+ and tpl_line_em \
+ or tpl_line
+ ret.append(self.__format_line(linetpl, filename,
+ start + 1 + i, line,
+ arrow = show_arrow) )
+ return ''.join(ret)
+
+ def __format_line(self, tpl_line, filename, lineno, line, arrow = False):
+ bp_mark = ""
+ bp_mark_color = ""
+
+ scheme = self.color_scheme_table.active_scheme_name
+ new_line, err = self.parser.format2(line, 'str', scheme)
+ if not err: line = new_line
+
+ bp = None
+ if lineno in self.get_file_breaks(filename):
+ bps = self.get_breaks(filename, lineno)
+ bp = bps[-1]
+
+ if bp:
+ Colors = self.color_scheme_table.active_colors
+ bp_mark = str(bp.number)
+ bp_mark_color = Colors.breakpoint_enabled
+ if not bp.enabled:
+ bp_mark_color = Colors.breakpoint_disabled
+
+ numbers_width = 7
+ if arrow:
+ # This is the line with the error
+ pad = numbers_width - len(str(lineno)) - len(bp_mark)
num = '%s%s' % (make_arrow(pad), str(lineno))
- else:
- num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
-
+ else:
+ num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
+
return tpl_line % (bp_mark_color + bp_mark, num, line)
-
-
- def print_list_lines(self, filename, first, last):
- """The printing (as opposed to the parsing part of a 'list'
- command."""
- try:
- Colors = self.color_scheme_table.active_colors
- ColorsNormal = Colors.Normal
- tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
- tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
- src = []
- if filename == "<string>" and hasattr(self, "_exec_filename"):
- filename = self._exec_filename
-
- for lineno in range(first, last+1):
- line = ulinecache.getline(filename, lineno)
- if not line:
- break
-
- if lineno == self.curframe.f_lineno:
- line = self.__format_line(tpl_line_em, filename, lineno, line, arrow = True)
- else:
- line = self.__format_line(tpl_line, filename, lineno, line, arrow = False)
-
- src.append(line)
- self.lineno = lineno
-
+
+
+ def print_list_lines(self, filename, first, last):
+ """The printing (as opposed to the parsing part of a 'list'
+ command."""
+ try:
+ Colors = self.color_scheme_table.active_colors
+ ColorsNormal = Colors.Normal
+ tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
+ tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
+ src = []
+ if filename == "<string>" and hasattr(self, "_exec_filename"):
+ filename = self._exec_filename
+
+ for lineno in range(first, last+1):
+ line = ulinecache.getline(filename, lineno)
+ if not line:
+ break
+
+ if lineno == self.curframe.f_lineno:
+ line = self.__format_line(tpl_line_em, filename, lineno, line, arrow = True)
+ else:
+ line = self.__format_line(tpl_line, filename, lineno, line, arrow = False)
+
+ src.append(line)
+ self.lineno = lineno
+
print(''.join(src))
-
- except KeyboardInterrupt:
- pass
-
- def do_list(self, arg):
+
+ except KeyboardInterrupt:
+ pass
+
+ def do_list(self, arg):
"""Print lines of code from the current stack frame
"""
- self.lastcmd = 'list'
- last = None
- if arg:
- try:
- x = eval(arg, {}, {})
- if type(x) == type(()):
- first, last = x
- first = int(first)
- last = int(last)
- if last < first:
- # Assume it's a count
- last = first + last
- else:
- first = max(1, int(x) - 5)
- except:
- print('*** Error in argument:', repr(arg))
- return
- elif self.lineno is None:
- first = max(1, self.curframe.f_lineno - 5)
- else:
- first = self.lineno + 1
- if last is None:
- last = first + 10
- self.print_list_lines(self.curframe.f_code.co_filename, first, last)
-
- # vds: >>
- lineno = first
- filename = self.curframe.f_code.co_filename
- self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
- # vds: <<
-
- do_l = do_list
-
- def getsourcelines(self, obj):
- lines, lineno = inspect.findsource(obj)
- if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
- # must be a module frame: do not try to cut a block out of it
- return lines, 1
- elif inspect.ismodule(obj):
- return lines, 1
- return inspect.getblock(lines[lineno:]), lineno+1
-
- def do_longlist(self, arg):
+ self.lastcmd = 'list'
+ last = None
+ if arg:
+ try:
+ x = eval(arg, {}, {})
+ if type(x) == type(()):
+ first, last = x
+ first = int(first)
+ last = int(last)
+ if last < first:
+ # Assume it's a count
+ last = first + last
+ else:
+ first = max(1, int(x) - 5)
+ except:
+ print('*** Error in argument:', repr(arg))
+ return
+ elif self.lineno is None:
+ first = max(1, self.curframe.f_lineno - 5)
+ else:
+ first = self.lineno + 1
+ if last is None:
+ last = first + 10
+ self.print_list_lines(self.curframe.f_code.co_filename, first, last)
+
+ # vds: >>
+ lineno = first
+ filename = self.curframe.f_code.co_filename
+ self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
+ # vds: <<
+
+ do_l = do_list
+
+ def getsourcelines(self, obj):
+ lines, lineno = inspect.findsource(obj)
+ if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
+ # must be a module frame: do not try to cut a block out of it
+ return lines, 1
+ elif inspect.ismodule(obj):
+ return lines, 1
+ return inspect.getblock(lines[lineno:]), lineno+1
+
+ def do_longlist(self, arg):
"""Print lines of code from the current stack frame.
Shows more lines than 'list' does.
"""
- self.lastcmd = 'longlist'
- try:
- lines, lineno = self.getsourcelines(self.curframe)
- except OSError as err:
- self.error(err)
- return
- last = lineno + len(lines)
- self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
- do_ll = do_longlist
-
- def do_pdef(self, arg):
- """Print the call signature for any callable object.
-
- The debugger interface to %pdef"""
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
-
- def do_pdoc(self, arg):
- """Print the docstring for an object.
-
- The debugger interface to %pdoc."""
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('pdoc')(arg, namespaces=namespaces)
-
- def do_pfile(self, arg):
- """Print (or run through pager) the file where an object is defined.
-
- The debugger interface to %pfile.
- """
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('pfile')(arg, namespaces=namespaces)
-
- def do_pinfo(self, arg):
- """Provide detailed information about an object.
-
- The debugger interface to %pinfo, i.e., obj?."""
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('pinfo')(arg, namespaces=namespaces)
-
- def do_pinfo2(self, arg):
- """Provide extra detailed information about an object.
-
- The debugger interface to %pinfo2, i.e., obj??."""
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('pinfo2')(arg, namespaces=namespaces)
-
- def do_psource(self, arg):
- """Print (or run through pager) the source code for an object."""
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('psource')(arg, namespaces=namespaces)
-
- if sys.version_info > (3, ):
- def do_where(self, arg):
- """w(here)
- Print a stack trace, with the most recent frame at the bottom.
- An arrow indicates the "current frame", which determines the
- context of most commands. 'bt' is an alias for this command.
-
- Take a number as argument as an (optional) number of context line to
- print"""
- if arg:
- context = int(arg)
- self.print_stack_trace(context)
- else:
- self.print_stack_trace()
-
- do_w = do_where
+ self.lastcmd = 'longlist'
+ try:
+ lines, lineno = self.getsourcelines(self.curframe)
+ except OSError as err:
+ self.error(err)
+ return
+ last = lineno + len(lines)
+ self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
+ do_ll = do_longlist
+
+ def do_pdef(self, arg):
+ """Print the call signature for any callable object.
+
+ The debugger interface to %pdef"""
+ namespaces = [('Locals', self.curframe.f_locals),
+ ('Globals', self.curframe.f_globals)]
+ self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
+
+ def do_pdoc(self, arg):
+ """Print the docstring for an object.
+
+ The debugger interface to %pdoc."""
+ namespaces = [('Locals', self.curframe.f_locals),
+ ('Globals', self.curframe.f_globals)]
+ self.shell.find_line_magic('pdoc')(arg, namespaces=namespaces)
+
+ def do_pfile(self, arg):
+ """Print (or run through pager) the file where an object is defined.
+
+ The debugger interface to %pfile.
+ """
+ namespaces = [('Locals', self.curframe.f_locals),
+ ('Globals', self.curframe.f_globals)]
+ self.shell.find_line_magic('pfile')(arg, namespaces=namespaces)
+
+ def do_pinfo(self, arg):
+ """Provide detailed information about an object.
+
+ The debugger interface to %pinfo, i.e., obj?."""
+ namespaces = [('Locals', self.curframe.f_locals),
+ ('Globals', self.curframe.f_globals)]
+ self.shell.find_line_magic('pinfo')(arg, namespaces=namespaces)
+
+ def do_pinfo2(self, arg):
+ """Provide extra detailed information about an object.
+
+ The debugger interface to %pinfo2, i.e., obj??."""
+ namespaces = [('Locals', self.curframe.f_locals),
+ ('Globals', self.curframe.f_globals)]
+ self.shell.find_line_magic('pinfo2')(arg, namespaces=namespaces)
+
+ def do_psource(self, arg):
+ """Print (or run through pager) the source code for an object."""
+ namespaces = [('Locals', self.curframe.f_locals),
+ ('Globals', self.curframe.f_globals)]
+ self.shell.find_line_magic('psource')(arg, namespaces=namespaces)
+
+ if sys.version_info > (3, ):
+ def do_where(self, arg):
+ """w(here)
+ Print a stack trace, with the most recent frame at the bottom.
+ An arrow indicates the "current frame", which determines the
+ context of most commands. 'bt' is an alias for this command.
+
+ Take a number as argument as an (optional) number of context line to
+ print"""
+ if arg:
+ context = int(arg)
+ self.print_stack_trace(context)
+ else:
+ self.print_stack_trace()
+
+ do_w = do_where
def set_trace(frame=None):
diff --git a/contrib/python/ipython/py2/IPython/core/display.py b/contrib/python/ipython/py2/IPython/core/display.py
index 2c38b32933..5c82a57b31 100644
--- a/contrib/python/ipython/py2/IPython/core/display.py
+++ b/contrib/python/ipython/py2/IPython/core/display.py
@@ -1,124 +1,124 @@
-# -*- coding: utf-8 -*-
-"""Top-level display functions for displaying object in different formats."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
+# -*- coding: utf-8 -*-
+"""Top-level display functions for displaying object in different formats."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+
try:
from base64 import encodebytes as base64_encode
except ImportError:
from base64 import encodestring as base64_encode
from binascii import b2a_hex, hexlify
-import json
-import mimetypes
-import os
-import struct
+import json
+import mimetypes
+import os
+import struct
import sys
-import warnings
-
-from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
- unicode_type)
-from IPython.testing.skipdoctest import skip_doctest
-
-__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
-'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
-'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
+import warnings
+
+from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
+ unicode_type)
+from IPython.testing.skipdoctest import skip_doctest
+
+__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
+'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
+'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'ProgressBar', 'JSON', 'Javascript',
-'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
+'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
'publish_display_data', 'update_display', 'DisplayHandle']
-
-#-----------------------------------------------------------------------------
-# utility functions
-#-----------------------------------------------------------------------------
-
-def _safe_exists(path):
- """Check path, but don't let exceptions raise"""
- try:
- return os.path.exists(path)
- except Exception:
- return False
-
-def _merge(d1, d2):
- """Like update, but merges sub-dicts instead of clobbering at the top level.
-
- Updates d1 in-place
- """
-
- if not isinstance(d2, dict) or not isinstance(d1, dict):
- return d2
- for key, value in d2.items():
- d1[key] = _merge(d1.get(key), value)
- return d1
-
-def _display_mimetype(mimetype, objs, raw=False, metadata=None):
- """internal implementation of all display_foo methods
-
- Parameters
- ----------
- mimetype : str
- The mimetype to be published (e.g. 'image/png')
- objs : tuple of objects
- The Python objects to display, or if raw=True raw text data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- if metadata:
- metadata = {mimetype: metadata}
- if raw:
- # turn list of pngdata into list of { 'image/png': pngdata }
- objs = [ {mimetype: obj} for obj in objs ]
- display(*objs, raw=raw, metadata=metadata, include=[mimetype])
-
-#-----------------------------------------------------------------------------
-# Main functions
-#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# utility functions
+#-----------------------------------------------------------------------------
+
+def _safe_exists(path):
+ """Check path, but don't let exceptions raise"""
+ try:
+ return os.path.exists(path)
+ except Exception:
+ return False
+
+def _merge(d1, d2):
+ """Like update, but merges sub-dicts instead of clobbering at the top level.
+
+ Updates d1 in-place
+ """
+
+ if not isinstance(d2, dict) or not isinstance(d1, dict):
+ return d2
+ for key, value in d2.items():
+ d1[key] = _merge(d1.get(key), value)
+ return d1
+
+def _display_mimetype(mimetype, objs, raw=False, metadata=None):
+ """internal implementation of all display_foo methods
+
+ Parameters
+ ----------
+ mimetype : str
+ The mimetype to be published (e.g. 'image/png')
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw text data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ if metadata:
+ metadata = {mimetype: metadata}
+ if raw:
+ # turn list of pngdata into list of { 'image/png': pngdata }
+ objs = [ {mimetype: obj} for obj in objs ]
+ display(*objs, raw=raw, metadata=metadata, include=[mimetype])
+
+#-----------------------------------------------------------------------------
+# Main functions
+#-----------------------------------------------------------------------------
# use * to indicate transient is keyword-only
def publish_display_data(data, metadata=None, source=None, **kwargs):
- """Publish data and metadata to all frontends.
-
- See the ``display_data`` message in the messaging documentation for
- more details about this message type.
-
- The following MIME types are currently implemented:
-
- * text/plain
- * text/html
- * text/markdown
- * text/latex
- * application/json
- * application/javascript
- * image/png
- * image/jpeg
- * image/svg+xml
-
- Parameters
- ----------
- data : dict
- A dictionary having keys that are valid MIME types (like
- 'text/plain' or 'image/svg+xml') and values that are the data for
- that MIME type. The data itself must be a JSON'able data
- structure. Minimally all data should have the 'text/plain' data,
- which can be displayed by all frontends. If more than the plain
- text is given, it is up to the frontend to decide which
- representation to use.
- metadata : dict
- A dictionary for metadata related to the data. This can contain
- arbitrary key, value pairs that frontends can use to interpret
- the data. mime-type keys matching those in data can be used
- to specify metadata about particular representations.
- source : str, deprecated
- Unused.
+ """Publish data and metadata to all frontends.
+
+ See the ``display_data`` message in the messaging documentation for
+ more details about this message type.
+
+ The following MIME types are currently implemented:
+
+ * text/plain
+ * text/html
+ * text/markdown
+ * text/latex
+ * application/json
+ * application/javascript
+ * image/png
+ * image/jpeg
+ * image/svg+xml
+
+ Parameters
+ ----------
+ data : dict
+ A dictionary having keys that are valid MIME types (like
+ 'text/plain' or 'image/svg+xml') and values that are the data for
+ that MIME type. The data itself must be a JSON'able data
+ structure. Minimally all data should have the 'text/plain' data,
+ which can be displayed by all frontends. If more than the plain
+ text is given, it is up to the frontend to decide which
+ representation to use.
+ metadata : dict
+ A dictionary for metadata related to the data. This can contain
+ arbitrary key, value pairs that frontends can use to interpret
+ the data. mime-type keys matching those in data can be used
+ to specify metadata about particular representations.
+ source : str, deprecated
+ Unused.
transient : dict, keyword-only
A dictionary of transient data, such as display_id.
- """
- from IPython.core.interactiveshell import InteractiveShell
+ """
+ from IPython.core.interactiveshell import InteractiveShell
display_pub = InteractiveShell.instance().display_pub
@@ -127,45 +127,45 @@ def publish_display_data(data, metadata=None, source=None, **kwargs):
# TODO: We could check for ipykernel version and provide a detailed upgrade message.
display_pub.publish(
- data=data,
- metadata=metadata,
+ data=data,
+ metadata=metadata,
**kwargs
- )
-
+ )
+
def _new_id():
"""Generate a new random text id with urandom"""
return b2a_hex(os.urandom(16)).decode('ascii')
-def display(*objs, **kwargs):
- """Display a Python object in all frontends.
-
- By default all representations will be computed and sent to the frontends.
- Frontends can decide which representation is used and how.
-
+def display(*objs, **kwargs):
+ """Display a Python object in all frontends.
+
+ By default all representations will be computed and sent to the frontends.
+ Frontends can decide which representation is used and how.
+
In terminal IPython this will be similar to using :func:`print`, for use in richer
frontends see Jupyter notebook examples with rich display logic.
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display.
- raw : bool, optional
- Are the objects to be displayed already mimetype-keyed dicts of raw display data,
- or Python objects that need to be formatted before display? [default: False]
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display.
+ raw : bool, optional
+ Are the objects to be displayed already mimetype-keyed dicts of raw display data,
+ or Python objects that need to be formatted before display? [default: False]
include : list, tuple or set, optional
- A list of format type strings (MIME types) to include in the
- format data dict. If this is set *only* the format types included
- in this list will be computed.
+ A list of format type strings (MIME types) to include in the
+ format data dict. If this is set *only* the format types included
+ in this list will be computed.
exclude : list, tuple or set, optional
- A list of format type strings (MIME types) to exclude in the format
- data dict. If this is set all format types will be computed,
- except for those included in this argument.
- metadata : dict, optional
- A dictionary of metadata to associate with the output.
- mime-type keys in this dictionary will be associated with the individual
- representation formats, if they exist.
+ A list of format type strings (MIME types) to exclude in the format
+ data dict. If this is set all format types will be computed,
+ except for those included in this argument.
+ metadata : dict, optional
+ A dictionary of metadata to associate with the output.
+ mime-type keys in this dictionary will be associated with the individual
+ representation formats, if they exist.
transient : dict, optional
A dictionary of transient data to associate with the output.
Data in this dict should not be persisted to files (e.g. notebooks).
@@ -276,8 +276,8 @@ def display(*objs, **kwargs):
from IPython.display import display
- """
- from IPython.core.interactiveshell import InteractiveShell
+ """
+ from IPython.core.interactiveshell import InteractiveShell
if not InteractiveShell.initialized():
# Directly print objects.
@@ -300,26 +300,26 @@ def display(*objs, **kwargs):
raise TypeError('display_id required for update_display')
if transient:
kwargs['transient'] = transient
-
- if not raw:
- format = InteractiveShell.instance().display_formatter.format
-
- for obj in objs:
- if raw:
+
+ if not raw:
+ format = InteractiveShell.instance().display_formatter.format
+
+ for obj in objs:
+ if raw:
publish_display_data(data=obj, metadata=metadata, **kwargs)
- else:
- format_dict, md_dict = format(obj, include=include, exclude=exclude)
- if not format_dict:
- # nothing to display (e.g. _ipython_display_ took over)
- continue
- if metadata:
- # kwarg-specified metadata gets precedence
- _merge(md_dict, metadata)
+ else:
+ format_dict, md_dict = format(obj, include=include, exclude=exclude)
+ if not format_dict:
+ # nothing to display (e.g. _ipython_display_ took over)
+ continue
+ if metadata:
+ # kwarg-specified metadata gets precedence
+ _merge(md_dict, metadata)
publish_display_data(data=format_dict, metadata=md_dict, **kwargs)
if display_id:
return DisplayHandle(display_id)
-
-
+
+
# use * for keyword-only display_id arg
def update_display(obj, **kwargs):
"""Update an existing display by id
@@ -395,346 +395,346 @@ class DisplayHandle(object):
update_display(obj, display_id=self.display_id, **kwargs)
-def display_pretty(*objs, **kwargs):
- """Display the pretty (default) representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw text data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('text/plain', objs, **kwargs)
-
-
-def display_html(*objs, **kwargs):
- """Display the HTML representation of an object.
-
- Note: If raw=False and the object does not have a HTML
- representation, no HTML will be shown.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw HTML data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('text/html', objs, **kwargs)
-
-
-def display_markdown(*objs, **kwargs):
- """Displays the Markdown representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw markdown data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
-
- _display_mimetype('text/markdown', objs, **kwargs)
-
-
-def display_svg(*objs, **kwargs):
- """Display the SVG representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw svg data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('image/svg+xml', objs, **kwargs)
-
-
-def display_png(*objs, **kwargs):
- """Display the PNG representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw png data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('image/png', objs, **kwargs)
-
-
-def display_jpeg(*objs, **kwargs):
- """Display the JPEG representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw JPEG data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('image/jpeg', objs, **kwargs)
-
-
-def display_latex(*objs, **kwargs):
- """Display the LaTeX representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw latex data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('text/latex', objs, **kwargs)
-
-
-def display_json(*objs, **kwargs):
- """Display the JSON representation of an object.
-
- Note that not many frontends support displaying JSON.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw json data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('application/json', objs, **kwargs)
-
-
-def display_javascript(*objs, **kwargs):
- """Display the Javascript representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw javascript data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('application/javascript', objs, **kwargs)
-
-
-def display_pdf(*objs, **kwargs):
- """Display the PDF representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw javascript data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('application/pdf', objs, **kwargs)
-
-
-#-----------------------------------------------------------------------------
-# Smart classes
-#-----------------------------------------------------------------------------
-
-
-class DisplayObject(object):
- """An object that wraps data to be displayed."""
-
- _read_flags = 'r'
- _show_mem_addr = False
-
- def __init__(self, data=None, url=None, filename=None):
- """Create a display object given raw data.
-
- When this object is returned by an expression or passed to the
- display function, it will result in the data being displayed
- in the frontend. The MIME type of the data should match the
- subclasses used, so the Png subclass should be used for 'image/png'
- data. If the data is a URL, the data will first be downloaded
- and then displayed. If
-
- Parameters
- ----------
- data : unicode, str or bytes
- The raw data or a URL or file to load the data from
- url : unicode
- A URL to download the data from.
- filename : unicode
- Path to a local file to load the data from.
- """
- if data is not None and isinstance(data, string_types):
- if data.startswith('http') and url is None:
- url = data
- filename = None
- data = None
- elif _safe_exists(data) and filename is None:
- url = None
- filename = data
- data = None
-
- self.data = data
- self.url = url
- self.filename = None if filename is None else unicode_type(filename)
-
- self.reload()
- self._check_data()
-
- def __repr__(self):
- if not self._show_mem_addr:
- cls = self.__class__
- r = "<%s.%s object>" % (cls.__module__, cls.__name__)
- else:
- r = super(DisplayObject, self).__repr__()
- return r
-
- def _check_data(self):
- """Override in subclasses if there's something to check."""
- pass
-
- def reload(self):
- """Reload the raw data from file or URL."""
- if self.filename is not None:
- with open(self.filename, self._read_flags) as f:
- self.data = f.read()
- elif self.url is not None:
- try:
- try:
- from urllib.request import urlopen # Py3
- except ImportError:
- from urllib2 import urlopen
- response = urlopen(self.url)
- self.data = response.read()
- # extract encoding from header, if there is one:
- encoding = None
- for sub in response.headers['content-type'].split(';'):
- sub = sub.strip()
- if sub.startswith('charset'):
- encoding = sub.split('=')[-1].strip()
- break
- # decode data, if an encoding was specified
- if encoding:
- self.data = self.data.decode(encoding, 'replace')
- except:
- self.data = None
-
-class TextDisplayObject(DisplayObject):
- """Validate that display data is text"""
- def _check_data(self):
- if self.data is not None and not isinstance(self.data, string_types):
- raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
-
-class Pretty(TextDisplayObject):
-
+def display_pretty(*objs, **kwargs):
+ """Display the pretty (default) representation of an object.
+
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw text data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('text/plain', objs, **kwargs)
+
+
+def display_html(*objs, **kwargs):
+ """Display the HTML representation of an object.
+
+ Note: If raw=False and the object does not have a HTML
+ representation, no HTML will be shown.
+
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw HTML data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('text/html', objs, **kwargs)
+
+
+def display_markdown(*objs, **kwargs):
+ """Displays the Markdown representation of an object.
+
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw markdown data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+
+ _display_mimetype('text/markdown', objs, **kwargs)
+
+
+def display_svg(*objs, **kwargs):
+ """Display the SVG representation of an object.
+
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw svg data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('image/svg+xml', objs, **kwargs)
+
+
+def display_png(*objs, **kwargs):
+ """Display the PNG representation of an object.
+
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw png data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('image/png', objs, **kwargs)
+
+
+def display_jpeg(*objs, **kwargs):
+ """Display the JPEG representation of an object.
+
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw JPEG data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('image/jpeg', objs, **kwargs)
+
+
+def display_latex(*objs, **kwargs):
+ """Display the LaTeX representation of an object.
+
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw latex data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('text/latex', objs, **kwargs)
+
+
+def display_json(*objs, **kwargs):
+ """Display the JSON representation of an object.
+
+ Note that not many frontends support displaying JSON.
+
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw json data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('application/json', objs, **kwargs)
+
+
+def display_javascript(*objs, **kwargs):
+ """Display the Javascript representation of an object.
+
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw javascript data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('application/javascript', objs, **kwargs)
+
+
+def display_pdf(*objs, **kwargs):
+ """Display the PDF representation of an object.
+
+ Parameters
+ ----------
+ objs : tuple of objects
+ The Python objects to display, or if raw=True raw javascript data to
+ display.
+ raw : bool
+ Are the data objects raw data or Python objects that need to be
+ formatted before display? [default: False]
+ metadata : dict (optional)
+ Metadata to be associated with the specific mimetype output.
+ """
+ _display_mimetype('application/pdf', objs, **kwargs)
+
+
+#-----------------------------------------------------------------------------
+# Smart classes
+#-----------------------------------------------------------------------------
+
+
+class DisplayObject(object):
+ """An object that wraps data to be displayed."""
+
+ _read_flags = 'r'
+ _show_mem_addr = False
+
+ def __init__(self, data=None, url=None, filename=None):
+ """Create a display object given raw data.
+
+ When this object is returned by an expression or passed to the
+ display function, it will result in the data being displayed
+ in the frontend. The MIME type of the data should match the
+ subclasses used, so the Png subclass should be used for 'image/png'
+ data. If the data is a URL, the data will first be downloaded
+ and then displayed. If
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The raw data or a URL or file to load the data from
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ """
+ if data is not None and isinstance(data, string_types):
+ if data.startswith('http') and url is None:
+ url = data
+ filename = None
+ data = None
+ elif _safe_exists(data) and filename is None:
+ url = None
+ filename = data
+ data = None
+
+ self.data = data
+ self.url = url
+ self.filename = None if filename is None else unicode_type(filename)
+
+ self.reload()
+ self._check_data()
+
+ def __repr__(self):
+ if not self._show_mem_addr:
+ cls = self.__class__
+ r = "<%s.%s object>" % (cls.__module__, cls.__name__)
+ else:
+ r = super(DisplayObject, self).__repr__()
+ return r
+
+ def _check_data(self):
+ """Override in subclasses if there's something to check."""
+ pass
+
+ def reload(self):
+ """Reload the raw data from file or URL."""
+ if self.filename is not None:
+ with open(self.filename, self._read_flags) as f:
+ self.data = f.read()
+ elif self.url is not None:
+ try:
+ try:
+ from urllib.request import urlopen # Py3
+ except ImportError:
+ from urllib2 import urlopen
+ response = urlopen(self.url)
+ self.data = response.read()
+ # extract encoding from header, if there is one:
+ encoding = None
+ for sub in response.headers['content-type'].split(';'):
+ sub = sub.strip()
+ if sub.startswith('charset'):
+ encoding = sub.split('=')[-1].strip()
+ break
+ # decode data, if an encoding was specified
+ if encoding:
+ self.data = self.data.decode(encoding, 'replace')
+ except:
+ self.data = None
+
+class TextDisplayObject(DisplayObject):
+ """Validate that display data is text"""
+ def _check_data(self):
+ if self.data is not None and not isinstance(self.data, string_types):
+ raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
+
+class Pretty(TextDisplayObject):
+
def _repr_pretty_(self, pp, cycle):
return pp.text(self.data)
-
-
-class HTML(TextDisplayObject):
-
- def _repr_html_(self):
- return self.data
-
- def __html__(self):
- """
- This method exists to inform other HTML-using modules (e.g. Markupsafe,
- htmltag, etc) that this object is HTML and does not need things like
- special characters (<>&) escaped.
- """
- return self._repr_html_()
-
-
-class Markdown(TextDisplayObject):
-
- def _repr_markdown_(self):
- return self.data
-
-
-class Math(TextDisplayObject):
-
- def _repr_latex_(self):
- s = self.data.strip('$')
- return "$$%s$$" % s
-
-
-class Latex(TextDisplayObject):
-
- def _repr_latex_(self):
- return self.data
-
-
-class SVG(DisplayObject):
-
+
+
+class HTML(TextDisplayObject):
+
+ def _repr_html_(self):
+ return self.data
+
+ def __html__(self):
+ """
+ This method exists to inform other HTML-using modules (e.g. Markupsafe,
+ htmltag, etc) that this object is HTML and does not need things like
+ special characters (<>&) escaped.
+ """
+ return self._repr_html_()
+
+
+class Markdown(TextDisplayObject):
+
+ def _repr_markdown_(self):
+ return self.data
+
+
+class Math(TextDisplayObject):
+
+ def _repr_latex_(self):
+ s = self.data.strip('$')
+ return "$$%s$$" % s
+
+
+class Latex(TextDisplayObject):
+
+ def _repr_latex_(self):
+ return self.data
+
+
+class SVG(DisplayObject):
+
_read_flags = 'rb'
- # wrap data in a property, which extracts the <svg> tag, discarding
- # document headers
- _data = None
-
- @property
- def data(self):
- return self._data
-
- @data.setter
- def data(self, svg):
- if svg is None:
- self._data = None
- return
- # parse into dom object
- from xml.dom import minidom
- svg = cast_bytes_py2(svg)
- x = minidom.parseString(svg)
- # get svg tag (should be 1)
- found_svg = x.getElementsByTagName('svg')
- if found_svg:
- svg = found_svg[0].toxml()
- else:
- # fallback on the input, trust the user
- # but this is probably an error.
- pass
- svg = cast_unicode(svg)
- self._data = svg
-
- def _repr_svg_(self):
- return self.data
-
+ # wrap data in a property, which extracts the <svg> tag, discarding
+ # document headers
+ _data = None
+
+ @property
+ def data(self):
+ return self._data
+
+ @data.setter
+ def data(self, svg):
+ if svg is None:
+ self._data = None
+ return
+ # parse into dom object
+ from xml.dom import minidom
+ svg = cast_bytes_py2(svg)
+ x = minidom.parseString(svg)
+ # get svg tag (should be 1)
+ found_svg = x.getElementsByTagName('svg')
+ if found_svg:
+ svg = found_svg[0].toxml()
+ else:
+ # fallback on the input, trust the user
+ # but this is probably an error.
+ pass
+ svg = cast_unicode(svg)
+ self._data = svg
+
+ def _repr_svg_(self):
+ return self.data
+
class ProgressBar(DisplayObject):
"""Progressbar supports displaying a progressbar like element
"""
@@ -751,7 +751,7 @@ class ProgressBar(DisplayObject):
self.html_width = '60ex'
self.text_width = 60
self._display_id = hexlify(os.urandom(8)).decode('ascii')
-
+
def __repr__(self):
fraction = self.progress / self.total
filled = '=' * int(fraction * self.text_width)
@@ -797,367 +797,367 @@ class ProgressBar(DisplayObject):
"""Python 2 compatibility"""
return self.__next__()
-class JSON(DisplayObject):
- """JSON expects a JSON-able dict or list
-
- not an already-serialized JSON string.
-
- Scalar types (None, number, string) are not allowed, only dict or list containers.
- """
- # wrap data in a property, which warns about passing already-serialized JSON
- _data = None
- def _check_data(self):
- if self.data is not None and not isinstance(self.data, (dict, list)):
- raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
-
- @property
- def data(self):
- return self._data
-
- @data.setter
- def data(self, data):
- if isinstance(data, string_types):
- warnings.warn("JSON expects JSONable dict or list, not JSON strings")
- data = json.loads(data)
- self._data = data
-
- def _repr_json_(self):
- return self.data
-
-css_t = """$("head").append($("<link/>").attr({
- rel: "stylesheet",
- type: "text/css",
- href: "%s"
-}));
-"""
-
-lib_t1 = """$.getScript("%s", function () {
-"""
-lib_t2 = """});
-"""
-
-class Javascript(TextDisplayObject):
-
- def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
- """Create a Javascript display object given raw data.
-
- When this object is returned by an expression or passed to the
- display function, it will result in the data being displayed
- in the frontend. If the data is a URL, the data will first be
- downloaded and then displayed.
-
- In the Notebook, the containing element will be available as `element`,
- and jQuery will be available. Content appended to `element` will be
- visible in the output area.
-
- Parameters
- ----------
- data : unicode, str or bytes
- The Javascript source code or a URL to download it from.
- url : unicode
- A URL to download the data from.
- filename : unicode
- Path to a local file to load the data from.
- lib : list or str
- A sequence of Javascript library URLs to load asynchronously before
- running the source code. The full URLs of the libraries should
- be given. A single Javascript library URL can also be given as a
- string.
- css: : list or str
- A sequence of css files to load before running the source code.
- The full URLs of the css files should be given. A single css URL
- can also be given as a string.
- """
- if isinstance(lib, string_types):
- lib = [lib]
- elif lib is None:
- lib = []
- if isinstance(css, string_types):
- css = [css]
- elif css is None:
- css = []
- if not isinstance(lib, (list,tuple)):
- raise TypeError('expected sequence, got: %r' % lib)
- if not isinstance(css, (list,tuple)):
- raise TypeError('expected sequence, got: %r' % css)
- self.lib = lib
- self.css = css
- super(Javascript, self).__init__(data=data, url=url, filename=filename)
-
- def _repr_javascript_(self):
- r = ''
- for c in self.css:
- r += css_t % c
- for l in self.lib:
- r += lib_t1 % l
- r += self.data
- r += lib_t2*len(self.lib)
- return r
-
-# constants for identifying png/jpeg data
-_PNG = b'\x89PNG\r\n\x1a\n'
-_JPEG = b'\xff\xd8'
-
-def _pngxy(data):
- """read the (width, height) from a PNG header"""
- ihdr = data.index(b'IHDR')
- # next 8 bytes are width/height
- w4h4 = data[ihdr+4:ihdr+12]
- return struct.unpack('>ii', w4h4)
-
-def _jpegxy(data):
- """read the (width, height) from a JPEG header"""
- # adapted from http://www.64lines.com/jpeg-width-height
-
- idx = 4
- while True:
- block_size = struct.unpack('>H', data[idx:idx+2])[0]
- idx = idx + block_size
- if data[idx:idx+2] == b'\xFF\xC0':
- # found Start of Frame
- iSOF = idx
- break
- else:
- # read another block
- idx += 2
-
- h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
- return w, h
-
-class Image(DisplayObject):
-
- _read_flags = 'rb'
- _FMT_JPEG = u'jpeg'
- _FMT_PNG = u'png'
- _ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
-
- def __init__(self, data=None, url=None, filename=None, format=None,
- embed=None, width=None, height=None, retina=False,
- unconfined=False, metadata=None):
- """Create a PNG/JPEG image object given raw data.
-
- When this object is returned by an input cell or passed to the
- display function, it will result in the image being displayed
- in the frontend.
-
- Parameters
- ----------
- data : unicode, str or bytes
- The raw image data or a URL or filename to load the data from.
- This always results in embedded image data.
- url : unicode
- A URL to download the data from. If you specify `url=`,
- the image data will not be embedded unless you also specify `embed=True`.
- filename : unicode
- Path to a local file to load the data from.
- Images from a file are always embedded.
- format : unicode
- The format of the image data (png/jpeg/jpg). If a filename or URL is given
- for format will be inferred from the filename extension.
- embed : bool
- Should the image data be embedded using a data URI (True) or be
- loaded using an <img> tag. Set this to True if you want the image
- to be viewable later with no internet connection in the notebook.
-
- Default is `True`, unless the keyword argument `url` is set, then
- default value is `False`.
-
- Note that QtConsole is not able to display images if `embed` is set to `False`
- width : int
+class JSON(DisplayObject):
+ """JSON expects a JSON-able dict or list
+
+ not an already-serialized JSON string.
+
+ Scalar types (None, number, string) are not allowed, only dict or list containers.
+ """
+ # wrap data in a property, which warns about passing already-serialized JSON
+ _data = None
+ def _check_data(self):
+ if self.data is not None and not isinstance(self.data, (dict, list)):
+ raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
+
+ @property
+ def data(self):
+ return self._data
+
+ @data.setter
+ def data(self, data):
+ if isinstance(data, string_types):
+ warnings.warn("JSON expects JSONable dict or list, not JSON strings")
+ data = json.loads(data)
+ self._data = data
+
+ def _repr_json_(self):
+ return self.data
+
+css_t = """$("head").append($("<link/>").attr({
+ rel: "stylesheet",
+ type: "text/css",
+ href: "%s"
+}));
+"""
+
+lib_t1 = """$.getScript("%s", function () {
+"""
+lib_t2 = """});
+"""
+
+class Javascript(TextDisplayObject):
+
+ def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
+ """Create a Javascript display object given raw data.
+
+ When this object is returned by an expression or passed to the
+ display function, it will result in the data being displayed
+ in the frontend. If the data is a URL, the data will first be
+ downloaded and then displayed.
+
+ In the Notebook, the containing element will be available as `element`,
+ and jQuery will be available. Content appended to `element` will be
+ visible in the output area.
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The Javascript source code or a URL to download it from.
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ lib : list or str
+ A sequence of Javascript library URLs to load asynchronously before
+ running the source code. The full URLs of the libraries should
+ be given. A single Javascript library URL can also be given as a
+ string.
+ css: : list or str
+ A sequence of css files to load before running the source code.
+ The full URLs of the css files should be given. A single css URL
+ can also be given as a string.
+ """
+ if isinstance(lib, string_types):
+ lib = [lib]
+ elif lib is None:
+ lib = []
+ if isinstance(css, string_types):
+ css = [css]
+ elif css is None:
+ css = []
+ if not isinstance(lib, (list,tuple)):
+ raise TypeError('expected sequence, got: %r' % lib)
+ if not isinstance(css, (list,tuple)):
+ raise TypeError('expected sequence, got: %r' % css)
+ self.lib = lib
+ self.css = css
+ super(Javascript, self).__init__(data=data, url=url, filename=filename)
+
+ def _repr_javascript_(self):
+ r = ''
+ for c in self.css:
+ r += css_t % c
+ for l in self.lib:
+ r += lib_t1 % l
+ r += self.data
+ r += lib_t2*len(self.lib)
+ return r
+
+# constants for identifying png/jpeg data
+_PNG = b'\x89PNG\r\n\x1a\n'
+_JPEG = b'\xff\xd8'
+
+def _pngxy(data):
+ """read the (width, height) from a PNG header"""
+ ihdr = data.index(b'IHDR')
+ # next 8 bytes are width/height
+ w4h4 = data[ihdr+4:ihdr+12]
+ return struct.unpack('>ii', w4h4)
+
+def _jpegxy(data):
+ """read the (width, height) from a JPEG header"""
+ # adapted from http://www.64lines.com/jpeg-width-height
+
+ idx = 4
+ while True:
+ block_size = struct.unpack('>H', data[idx:idx+2])[0]
+ idx = idx + block_size
+ if data[idx:idx+2] == b'\xFF\xC0':
+ # found Start of Frame
+ iSOF = idx
+ break
+ else:
+ # read another block
+ idx += 2
+
+ h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
+ return w, h
+
+class Image(DisplayObject):
+
+ _read_flags = 'rb'
+ _FMT_JPEG = u'jpeg'
+ _FMT_PNG = u'png'
+ _ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
+
+ def __init__(self, data=None, url=None, filename=None, format=None,
+ embed=None, width=None, height=None, retina=False,
+ unconfined=False, metadata=None):
+ """Create a PNG/JPEG image object given raw data.
+
+ When this object is returned by an input cell or passed to the
+ display function, it will result in the image being displayed
+ in the frontend.
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
+ The raw image data or a URL or filename to load the data from.
+ This always results in embedded image data.
+ url : unicode
+ A URL to download the data from. If you specify `url=`,
+ the image data will not be embedded unless you also specify `embed=True`.
+ filename : unicode
+ Path to a local file to load the data from.
+ Images from a file are always embedded.
+ format : unicode
+ The format of the image data (png/jpeg/jpg). If a filename or URL is given
+ for format will be inferred from the filename extension.
+ embed : bool
+ Should the image data be embedded using a data URI (True) or be
+ loaded using an <img> tag. Set this to True if you want the image
+ to be viewable later with no internet connection in the notebook.
+
+ Default is `True`, unless the keyword argument `url` is set, then
+ default value is `False`.
+
+ Note that QtConsole is not able to display images if `embed` is set to `False`
+ width : int
Width in pixels to which to constrain the image in html
- height : int
+ height : int
Height in pixels to which to constrain the image in html
- retina : bool
- Automatically set the width and height to half of the measured
- width and height.
- This only works for embedded images because it reads the width/height
- from image data.
- For non-embedded images, you can just set the desired display width
- and height directly.
- unconfined: bool
- Set unconfined=True to disable max-width confinement of the image.
- metadata: dict
- Specify extra metadata to attach to the image.
-
- Examples
- --------
- # embedded image data, works in qtconsole and notebook
- # when passed positionally, the first arg can be any of raw image data,
- # a URL, or a filename from which to load image data.
- # The result is always embedding image data for inline images.
- Image('http://www.google.fr/images/srpr/logo3w.png')
- Image('/path/to/image.jpg')
- Image(b'RAW_PNG_DATA...')
-
- # Specifying Image(url=...) does not embed the image data,
- # it only generates `<img>` tag with a link to the source.
- # This will not work in the qtconsole or offline.
- Image(url='http://www.google.fr/images/srpr/logo3w.png')
-
- """
- if filename is not None:
- ext = self._find_ext(filename)
- elif url is not None:
- ext = self._find_ext(url)
- elif data is None:
- raise ValueError("No image data found. Expecting filename, url, or data.")
- elif isinstance(data, string_types) and (
- data.startswith('http') or _safe_exists(data)
- ):
- ext = self._find_ext(data)
- else:
- ext = None
-
- if format is None:
- if ext is not None:
- if ext == u'jpg' or ext == u'jpeg':
- format = self._FMT_JPEG
+ retina : bool
+ Automatically set the width and height to half of the measured
+ width and height.
+ This only works for embedded images because it reads the width/height
+ from image data.
+ For non-embedded images, you can just set the desired display width
+ and height directly.
+ unconfined: bool
+ Set unconfined=True to disable max-width confinement of the image.
+ metadata: dict
+ Specify extra metadata to attach to the image.
+
+ Examples
+ --------
+ # embedded image data, works in qtconsole and notebook
+ # when passed positionally, the first arg can be any of raw image data,
+ # a URL, or a filename from which to load image data.
+ # The result is always embedding image data for inline images.
+ Image('http://www.google.fr/images/srpr/logo3w.png')
+ Image('/path/to/image.jpg')
+ Image(b'RAW_PNG_DATA...')
+
+ # Specifying Image(url=...) does not embed the image data,
+ # it only generates `<img>` tag with a link to the source.
+ # This will not work in the qtconsole or offline.
+ Image(url='http://www.google.fr/images/srpr/logo3w.png')
+
+ """
+ if filename is not None:
+ ext = self._find_ext(filename)
+ elif url is not None:
+ ext = self._find_ext(url)
+ elif data is None:
+ raise ValueError("No image data found. Expecting filename, url, or data.")
+ elif isinstance(data, string_types) and (
+ data.startswith('http') or _safe_exists(data)
+ ):
+ ext = self._find_ext(data)
+ else:
+ ext = None
+
+ if format is None:
+ if ext is not None:
+ if ext == u'jpg' or ext == u'jpeg':
+ format = self._FMT_JPEG
elif ext == u'png':
- format = self._FMT_PNG
- else:
- format = ext.lower()
- elif isinstance(data, bytes):
- # infer image type from image data header,
- # only if format has not been specified.
- if data[:2] == _JPEG:
- format = self._FMT_JPEG
-
- # failed to detect format, default png
- if format is None:
- format = 'png'
-
- if format.lower() == 'jpg':
- # jpg->jpeg
- format = self._FMT_JPEG
-
- self.format = unicode_type(format).lower()
- self.embed = embed if embed is not None else (url is None)
-
- if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
- raise ValueError("Cannot embed the '%s' image format" % (self.format))
- self.width = width
- self.height = height
- self.retina = retina
- self.unconfined = unconfined
- self.metadata = metadata
- super(Image, self).__init__(data=data, url=url, filename=filename)
-
- if retina:
- self._retina_shape()
-
- def _retina_shape(self):
- """load pixel-doubled width and height from image data"""
- if not self.embed:
- return
- if self.format == 'png':
- w, h = _pngxy(self.data)
- elif self.format == 'jpeg':
- w, h = _jpegxy(self.data)
- else:
- # retina only supports png
- return
- self.width = w // 2
- self.height = h // 2
-
- def reload(self):
- """Reload the raw data from file or URL."""
- if self.embed:
- super(Image,self).reload()
- if self.retina:
- self._retina_shape()
-
- def _repr_html_(self):
- if not self.embed:
- width = height = klass = ''
- if self.width:
- width = ' width="%d"' % self.width
- if self.height:
- height = ' height="%d"' % self.height
- if self.unconfined:
- klass = ' class="unconfined"'
- return u'<img src="{url}"{width}{height}{klass}/>'.format(
- url=self.url,
- width=width,
- height=height,
- klass=klass,
- )
-
- def _data_and_metadata(self):
- """shortcut for returning metadata with shape information, if defined"""
- md = {}
- if self.width:
- md['width'] = self.width
- if self.height:
- md['height'] = self.height
- if self.unconfined:
- md['unconfined'] = self.unconfined
- if self.metadata:
- md.update(self.metadata)
- if md:
- return self.data, md
- else:
- return self.data
-
- def _repr_png_(self):
- if self.embed and self.format == u'png':
- return self._data_and_metadata()
-
- def _repr_jpeg_(self):
- if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
- return self._data_and_metadata()
-
- def _find_ext(self, s):
- return unicode_type(s.split('.')[-1].lower())
-
-class Video(DisplayObject):
-
+ format = self._FMT_PNG
+ else:
+ format = ext.lower()
+ elif isinstance(data, bytes):
+ # infer image type from image data header,
+ # only if format has not been specified.
+ if data[:2] == _JPEG:
+ format = self._FMT_JPEG
+
+ # failed to detect format, default png
+ if format is None:
+ format = 'png'
+
+ if format.lower() == 'jpg':
+ # jpg->jpeg
+ format = self._FMT_JPEG
+
+ self.format = unicode_type(format).lower()
+ self.embed = embed if embed is not None else (url is None)
+
+ if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
+ raise ValueError("Cannot embed the '%s' image format" % (self.format))
+ self.width = width
+ self.height = height
+ self.retina = retina
+ self.unconfined = unconfined
+ self.metadata = metadata
+ super(Image, self).__init__(data=data, url=url, filename=filename)
+
+ if retina:
+ self._retina_shape()
+
+ def _retina_shape(self):
+ """load pixel-doubled width and height from image data"""
+ if not self.embed:
+ return
+ if self.format == 'png':
+ w, h = _pngxy(self.data)
+ elif self.format == 'jpeg':
+ w, h = _jpegxy(self.data)
+ else:
+ # retina only supports png
+ return
+ self.width = w // 2
+ self.height = h // 2
+
+ def reload(self):
+ """Reload the raw data from file or URL."""
+ if self.embed:
+ super(Image,self).reload()
+ if self.retina:
+ self._retina_shape()
+
+ def _repr_html_(self):
+ if not self.embed:
+ width = height = klass = ''
+ if self.width:
+ width = ' width="%d"' % self.width
+ if self.height:
+ height = ' height="%d"' % self.height
+ if self.unconfined:
+ klass = ' class="unconfined"'
+ return u'<img src="{url}"{width}{height}{klass}/>'.format(
+ url=self.url,
+ width=width,
+ height=height,
+ klass=klass,
+ )
+
+ def _data_and_metadata(self):
+ """shortcut for returning metadata with shape information, if defined"""
+ md = {}
+ if self.width:
+ md['width'] = self.width
+ if self.height:
+ md['height'] = self.height
+ if self.unconfined:
+ md['unconfined'] = self.unconfined
+ if self.metadata:
+ md.update(self.metadata)
+ if md:
+ return self.data, md
+ else:
+ return self.data
+
+ def _repr_png_(self):
+ if self.embed and self.format == u'png':
+ return self._data_and_metadata()
+
+ def _repr_jpeg_(self):
+ if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
+ return self._data_and_metadata()
+
+ def _find_ext(self, s):
+ return unicode_type(s.split('.')[-1].lower())
+
+class Video(DisplayObject):
+
def __init__(self, data=None, url=None, filename=None, embed=False, mimetype=None):
- """Create a video object given raw data or an URL.
-
- When this object is returned by an input cell or passed to the
- display function, it will result in the video being displayed
- in the frontend.
-
- Parameters
- ----------
- data : unicode, str or bytes
+ """Create a video object given raw data or an URL.
+
+ When this object is returned by an input cell or passed to the
+ display function, it will result in the video being displayed
+ in the frontend.
+
+ Parameters
+ ----------
+ data : unicode, str or bytes
The raw video data or a URL or filename to load the data from.
Raw data will require passing `embed=True`.
- url : unicode
+ url : unicode
A URL for the video. If you specify `url=`,
the image data will not be embedded.
- filename : unicode
+ filename : unicode
Path to a local file containing the video.
Will be interpreted as a local URL unless `embed=True`.
- embed : bool
+ embed : bool
Should the video be embedded using a data URI (True) or be
loaded using a <video> tag (False).
-
+
Since videos are large, embedding them should be avoided, if possible.
You must confirm embedding as your intention by passing `embed=True`.
-
+
Local files can be displayed with URLs without embedding the content, via::
Video('./video.mp4')
- mimetype: unicode
+ mimetype: unicode
Specify the mimetype for embedded videos.
Default will be guessed from file extension, if available.
- Examples
- --------
+ Examples
+ --------
- Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
- Video('path/to/video.mp4')
+ Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
+ Video('path/to/video.mp4')
Video('path/to/video.mp4', embed=True)
Video(b'raw-videodata', embed=True)
- """
+ """
if url is None and isinstance(data, string_types) and data.startswith(('http:', 'https:')):
- url = data
- data = None
- elif os.path.exists(data):
- filename = data
- data = None
+ url = data
+ data = None
+ elif os.path.exists(data):
+ filename = data
+ data = None
if data and not embed:
msg = ''.join([
@@ -1166,30 +1166,30 @@ class Video(DisplayObject):
"Consider passing Video(url='...')",
])
raise ValueError(msg)
-
- self.mimetype = mimetype
+
+ self.mimetype = mimetype
self.embed = embed
- super(Video, self).__init__(data=data, url=url, filename=filename)
-
- def _repr_html_(self):
- # External URLs and potentially local files are not embedded into the
- # notebook output.
- if not self.embed:
- url = self.url if self.url is not None else self.filename
- output = """<video src="{0}" controls>
- Your browser does not support the <code>video</code> element.
- </video>""".format(url)
- return output
+ super(Video, self).__init__(data=data, url=url, filename=filename)
+
+ def _repr_html_(self):
+ # External URLs and potentially local files are not embedded into the
+ # notebook output.
+ if not self.embed:
+ url = self.url if self.url is not None else self.filename
+ output = """<video src="{0}" controls>
+ Your browser does not support the <code>video</code> element.
+ </video>""".format(url)
+ return output
# Embedded videos are base64-encoded.
mimetype = self.mimetype
- if self.filename is not None:
+ if self.filename is not None:
if not mimetype:
mimetype, _ = mimetypes.guess_type(self.filename)
with open(self.filename, 'rb') as f:
video = f.read()
- else:
+ else:
video = self.data
if isinstance(video, unicode_type):
# unicode input is already b64-encoded
@@ -1197,94 +1197,94 @@ class Video(DisplayObject):
else:
b64_video = base64_encode(video).decode('ascii').rstrip()
- output = """<video controls>
- <source src="data:{0};base64,{1}" type="{0}">
- Your browser does not support the video tag.
+ output = """<video controls>
+ <source src="data:{0};base64,{1}" type="{0}">
+ Your browser does not support the video tag.
</video>""".format(mimetype, b64_video)
- return output
-
- def reload(self):
- # TODO
- pass
-
- def _repr_png_(self):
- # TODO
- pass
- def _repr_jpeg_(self):
- # TODO
- pass
-
-def clear_output(wait=False):
- """Clear the output of the current cell receiving output.
-
- Parameters
- ----------
- wait : bool [default: false]
- Wait to clear the output until new output is available to replace it."""
- from IPython.core.interactiveshell import InteractiveShell
- if InteractiveShell.initialized():
- InteractiveShell.instance().display_pub.clear_output(wait)
- else:
+ return output
+
+ def reload(self):
+ # TODO
+ pass
+
+ def _repr_png_(self):
+ # TODO
+ pass
+ def _repr_jpeg_(self):
+ # TODO
+ pass
+
+def clear_output(wait=False):
+ """Clear the output of the current cell receiving output.
+
+ Parameters
+ ----------
+ wait : bool [default: false]
+ Wait to clear the output until new output is available to replace it."""
+ from IPython.core.interactiveshell import InteractiveShell
+ if InteractiveShell.initialized():
+ InteractiveShell.instance().display_pub.clear_output(wait)
+ else:
print('\033[2K\r', end='')
sys.stdout.flush()
print('\033[2K\r', end='')
sys.stderr.flush()
-
-
-@skip_doctest
-def set_matplotlib_formats(*formats, **kwargs):
- """Select figure formats for the inline backend. Optionally pass quality for JPEG.
-
- For example, this enables PNG and JPEG output with a JPEG quality of 90%::
-
- In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
-
- To set this in your config files use the following::
-
- c.InlineBackend.figure_formats = {'png', 'jpeg'}
- c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
-
- Parameters
- ----------
- *formats : strs
- One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
- **kwargs :
- Keyword args will be relayed to ``figure.canvas.print_figure``.
- """
- from IPython.core.interactiveshell import InteractiveShell
- from IPython.core.pylabtools import select_figure_formats
- # build kwargs, starting with InlineBackend config
- kw = {}
- from ipykernel.pylab.config import InlineBackend
- cfg = InlineBackend.instance()
- kw.update(cfg.print_figure_kwargs)
- kw.update(**kwargs)
- shell = InteractiveShell.instance()
- select_figure_formats(shell, formats, **kw)
-
-@skip_doctest
-def set_matplotlib_close(close=True):
- """Set whether the inline backend closes all figures automatically or not.
-
- By default, the inline backend used in the IPython Notebook will close all
- matplotlib figures automatically after each cell is run. This means that
- plots in different cells won't interfere. Sometimes, you may want to make
- a plot in one cell and then refine it in later cells. This can be accomplished
- by::
-
- In [1]: set_matplotlib_close(False)
-
- To set this in your config files use the following::
-
- c.InlineBackend.close_figures = False
-
- Parameters
- ----------
- close : bool
- Should all matplotlib figures be automatically closed after each cell is
- run?
- """
- from ipykernel.pylab.config import InlineBackend
- cfg = InlineBackend.instance()
- cfg.close_figures = close
-
+
+
+@skip_doctest
+def set_matplotlib_formats(*formats, **kwargs):
+ """Select figure formats for the inline backend. Optionally pass quality for JPEG.
+
+ For example, this enables PNG and JPEG output with a JPEG quality of 90%::
+
+ In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
+
+ To set this in your config files use the following::
+
+ c.InlineBackend.figure_formats = {'png', 'jpeg'}
+ c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
+
+ Parameters
+ ----------
+ *formats : strs
+ One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
+ **kwargs :
+ Keyword args will be relayed to ``figure.canvas.print_figure``.
+ """
+ from IPython.core.interactiveshell import InteractiveShell
+ from IPython.core.pylabtools import select_figure_formats
+ # build kwargs, starting with InlineBackend config
+ kw = {}
+ from ipykernel.pylab.config import InlineBackend
+ cfg = InlineBackend.instance()
+ kw.update(cfg.print_figure_kwargs)
+ kw.update(**kwargs)
+ shell = InteractiveShell.instance()
+ select_figure_formats(shell, formats, **kw)
+
+@skip_doctest
+def set_matplotlib_close(close=True):
+ """Set whether the inline backend closes all figures automatically or not.
+
+ By default, the inline backend used in the IPython Notebook will close all
+ matplotlib figures automatically after each cell is run. This means that
+ plots in different cells won't interfere. Sometimes, you may want to make
+ a plot in one cell and then refine it in later cells. This can be accomplished
+ by::
+
+ In [1]: set_matplotlib_close(False)
+
+ To set this in your config files use the following::
+
+ c.InlineBackend.close_figures = False
+
+ Parameters
+ ----------
+ close : bool
+ Should all matplotlib figures be automatically closed after each cell is
+ run?
+ """
+ from ipykernel.pylab.config import InlineBackend
+ cfg = InlineBackend.instance()
+ cfg.close_figures = close
+
diff --git a/contrib/python/ipython/py2/IPython/core/display_trap.py b/contrib/python/ipython/py2/IPython/core/display_trap.py
index 7a48a5e119..9931dfe2df 100644
--- a/contrib/python/ipython/py2/IPython/core/display_trap.py
+++ b/contrib/python/ipython/py2/IPython/core/display_trap.py
@@ -1,70 +1,70 @@
-# encoding: utf-8
-"""
-A context manager for handling sys.displayhook.
-
-Authors:
-
-* Robert Kern
-* Brian Granger
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import sys
-
-from traitlets.config.configurable import Configurable
-from traitlets import Any
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-
-
-class DisplayTrap(Configurable):
- """Object to manage sys.displayhook.
-
- This came from IPython.core.kernel.display_hook, but is simplified
- (no callbacks or formatters) until more of the core is refactored.
- """
-
- hook = Any()
-
- def __init__(self, hook=None):
- super(DisplayTrap, self).__init__(hook=hook, config=None)
- self.old_hook = None
- # We define this to track if a single BuiltinTrap is nested.
- # Only turn off the trap when the outermost call to __exit__ is made.
- self._nested_level = 0
-
- def __enter__(self):
- if self._nested_level == 0:
- self.set()
- self._nested_level += 1
- return self
-
- def __exit__(self, type, value, traceback):
- if self._nested_level == 1:
- self.unset()
- self._nested_level -= 1
- # Returning False will cause exceptions to propagate
- return False
-
- def set(self):
- """Set the hook."""
- if sys.displayhook is not self.hook:
- self.old_hook = sys.displayhook
- sys.displayhook = self.hook
-
- def unset(self):
- """Unset the hook."""
- sys.displayhook = self.old_hook
-
+# encoding: utf-8
+"""
+A context manager for handling sys.displayhook.
+
+Authors:
+
+* Robert Kern
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+
+from traitlets.config.configurable import Configurable
+from traitlets import Any
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+
+class DisplayTrap(Configurable):
+ """Object to manage sys.displayhook.
+
+ This came from IPython.core.kernel.display_hook, but is simplified
+ (no callbacks or formatters) until more of the core is refactored.
+ """
+
+ hook = Any()
+
+ def __init__(self, hook=None):
+ super(DisplayTrap, self).__init__(hook=hook, config=None)
+ self.old_hook = None
+ # We define this to track if a single BuiltinTrap is nested.
+ # Only turn off the trap when the outermost call to __exit__ is made.
+ self._nested_level = 0
+
+ def __enter__(self):
+ if self._nested_level == 0:
+ self.set()
+ self._nested_level += 1
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if self._nested_level == 1:
+ self.unset()
+ self._nested_level -= 1
+ # Returning False will cause exceptions to propagate
+ return False
+
+ def set(self):
+ """Set the hook."""
+ if sys.displayhook is not self.hook:
+ self.old_hook = sys.displayhook
+ sys.displayhook = self.hook
+
+ def unset(self):
+ """Unset the hook."""
+ sys.displayhook = self.old_hook
+
diff --git a/contrib/python/ipython/py2/IPython/core/displayhook.py b/contrib/python/ipython/py2/IPython/core/displayhook.py
index e2a6b0547d..cce7c83d16 100644
--- a/contrib/python/ipython/py2/IPython/core/displayhook.py
+++ b/contrib/python/ipython/py2/IPython/core/displayhook.py
@@ -1,298 +1,298 @@
-# -*- coding: utf-8 -*-
-"""Displayhook for IPython.
-
-This defines a callable class that IPython uses for `sys.displayhook`.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
-import sys
-import io as _io
-import tokenize
-
-from traitlets.config.configurable import Configurable
-from IPython.utils.py3compat import builtin_mod, cast_unicode_py2
-from traitlets import Instance, Float
+# -*- coding: utf-8 -*-
+"""Displayhook for IPython.
+
+This defines a callable class that IPython uses for `sys.displayhook`.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+
+import sys
+import io as _io
+import tokenize
+
+from traitlets.config.configurable import Configurable
+from IPython.utils.py3compat import builtin_mod, cast_unicode_py2
+from traitlets import Instance, Float
from warnings import warn
-
-# TODO: Move the various attributes (cache_size, [others now moved]). Some
-# of these are also attributes of InteractiveShell. They should be on ONE object
-# only and the other objects should ask that one object for their values.
-
-class DisplayHook(Configurable):
- """The custom IPython displayhook to replace sys.displayhook.
-
- This class does many things, but the basic idea is that it is a callable
- that gets called anytime user code returns a value.
- """
-
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
- allow_none=True)
- exec_result = Instance('IPython.core.interactiveshell.ExecutionResult',
- allow_none=True)
- cull_fraction = Float(0.2)
-
- def __init__(self, shell=None, cache_size=1000, **kwargs):
- super(DisplayHook, self).__init__(shell=shell, **kwargs)
- cache_size_min = 3
- if cache_size <= 0:
- self.do_full_cache = 0
- cache_size = 0
- elif cache_size < cache_size_min:
- self.do_full_cache = 0
- cache_size = 0
- warn('caching was disabled (min value for cache size is %s).' %
+
+# TODO: Move the various attributes (cache_size, [others now moved]). Some
+# of these are also attributes of InteractiveShell. They should be on ONE object
+# only and the other objects should ask that one object for their values.
+
+class DisplayHook(Configurable):
+ """The custom IPython displayhook to replace sys.displayhook.
+
+ This class does many things, but the basic idea is that it is a callable
+ that gets called anytime user code returns a value.
+ """
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+ exec_result = Instance('IPython.core.interactiveshell.ExecutionResult',
+ allow_none=True)
+ cull_fraction = Float(0.2)
+
+ def __init__(self, shell=None, cache_size=1000, **kwargs):
+ super(DisplayHook, self).__init__(shell=shell, **kwargs)
+ cache_size_min = 3
+ if cache_size <= 0:
+ self.do_full_cache = 0
+ cache_size = 0
+ elif cache_size < cache_size_min:
+ self.do_full_cache = 0
+ cache_size = 0
+ warn('caching was disabled (min value for cache size is %s).' %
cache_size_min,stacklevel=3)
- else:
- self.do_full_cache = 1
-
- self.cache_size = cache_size
-
- # we need a reference to the user-level namespace
- self.shell = shell
-
- self._,self.__,self.___ = '','',''
-
- # these are deliberately global:
- to_user_ns = {'_':self._,'__':self.__,'___':self.___}
- self.shell.user_ns.update(to_user_ns)
-
- @property
- def prompt_count(self):
- return self.shell.execution_count
-
- #-------------------------------------------------------------------------
- # Methods used in __call__. Override these methods to modify the behavior
- # of the displayhook.
- #-------------------------------------------------------------------------
-
- def check_for_underscore(self):
- """Check if the user has set the '_' variable by hand."""
- # If something injected a '_' variable in __builtin__, delete
- # ipython's automatic one so we don't clobber that. gettext() in
- # particular uses _, so we need to stay away from it.
- if '_' in builtin_mod.__dict__:
- try:
- del self.shell.user_ns['_']
- except KeyError:
- pass
-
- def quiet(self):
- """Should we silence the display hook because of ';'?"""
- # do not print output if input ends in ';'
-
- try:
- cell = cast_unicode_py2(self.shell.history_manager.input_hist_parsed[-1])
- except IndexError:
- # some uses of ipshellembed may fail here
- return False
-
- sio = _io.StringIO(cell)
- tokens = list(tokenize.generate_tokens(sio.readline))
-
- for token in reversed(tokens):
+ else:
+ self.do_full_cache = 1
+
+ self.cache_size = cache_size
+
+ # we need a reference to the user-level namespace
+ self.shell = shell
+
+ self._,self.__,self.___ = '','',''
+
+ # these are deliberately global:
+ to_user_ns = {'_':self._,'__':self.__,'___':self.___}
+ self.shell.user_ns.update(to_user_ns)
+
+ @property
+ def prompt_count(self):
+ return self.shell.execution_count
+
+ #-------------------------------------------------------------------------
+ # Methods used in __call__. Override these methods to modify the behavior
+ # of the displayhook.
+ #-------------------------------------------------------------------------
+
+ def check_for_underscore(self):
+ """Check if the user has set the '_' variable by hand."""
+ # If something injected a '_' variable in __builtin__, delete
+ # ipython's automatic one so we don't clobber that. gettext() in
+ # particular uses _, so we need to stay away from it.
+ if '_' in builtin_mod.__dict__:
+ try:
+ del self.shell.user_ns['_']
+ except KeyError:
+ pass
+
+ def quiet(self):
+ """Should we silence the display hook because of ';'?"""
+ # do not print output if input ends in ';'
+
+ try:
+ cell = cast_unicode_py2(self.shell.history_manager.input_hist_parsed[-1])
+ except IndexError:
+ # some uses of ipshellembed may fail here
+ return False
+
+ sio = _io.StringIO(cell)
+ tokens = list(tokenize.generate_tokens(sio.readline))
+
+ for token in reversed(tokens):
if token[0] in (tokenize.ENDMARKER, tokenize.NL, tokenize.NEWLINE, tokenize.COMMENT):
- continue
- if (token[0] == tokenize.OP) and (token[1] == ';'):
- return True
- else:
- return False
-
- def start_displayhook(self):
- """Start the displayhook, initializing resources."""
- pass
-
- def write_output_prompt(self):
- """Write the output prompt.
-
- The default implementation simply writes the prompt to
+ continue
+ if (token[0] == tokenize.OP) and (token[1] == ';'):
+ return True
+ else:
+ return False
+
+ def start_displayhook(self):
+ """Start the displayhook, initializing resources."""
+ pass
+
+ def write_output_prompt(self):
+ """Write the output prompt.
+
+ The default implementation simply writes the prompt to
``sys.stdout``.
- """
- # Use write, not print which adds an extra space.
+ """
+ # Use write, not print which adds an extra space.
sys.stdout.write(self.shell.separate_out)
outprompt = 'Out[{}]: '.format(self.shell.execution_count)
- if self.do_full_cache:
+ if self.do_full_cache:
sys.stdout.write(outprompt)
-
- def compute_format_data(self, result):
- """Compute format data of the object to be displayed.
-
- The format data is a generalization of the :func:`repr` of an object.
- In the default implementation the format data is a :class:`dict` of
- key value pair where the keys are valid MIME types and the values
- are JSON'able data structure containing the raw data for that MIME
- type. It is up to frontends to determine pick a MIME to to use and
- display that data in an appropriate manner.
-
- This method only computes the format data for the object and should
- NOT actually print or write that to a stream.
-
- Parameters
- ----------
- result : object
- The Python object passed to the display hook, whose format will be
- computed.
-
- Returns
- -------
- (format_dict, md_dict) : dict
- format_dict is a :class:`dict` whose keys are valid MIME types and values are
- JSON'able raw data for that MIME type. It is recommended that
- all return values of this should always include the "text/plain"
- MIME type representation of the object.
- md_dict is a :class:`dict` with the same MIME type keys
- of metadata associated with each output.
-
- """
- return self.shell.display_formatter.format(result)
-
+
+ def compute_format_data(self, result):
+ """Compute format data of the object to be displayed.
+
+ The format data is a generalization of the :func:`repr` of an object.
+ In the default implementation the format data is a :class:`dict` of
+ key value pair where the keys are valid MIME types and the values
+ are JSON'able data structure containing the raw data for that MIME
+ type. It is up to frontends to determine pick a MIME to to use and
+ display that data in an appropriate manner.
+
+ This method only computes the format data for the object and should
+ NOT actually print or write that to a stream.
+
+ Parameters
+ ----------
+ result : object
+ The Python object passed to the display hook, whose format will be
+ computed.
+
+ Returns
+ -------
+ (format_dict, md_dict) : dict
+ format_dict is a :class:`dict` whose keys are valid MIME types and values are
+ JSON'able raw data for that MIME type. It is recommended that
+ all return values of this should always include the "text/plain"
+ MIME type representation of the object.
+ md_dict is a :class:`dict` with the same MIME type keys
+ of metadata associated with each output.
+
+ """
+ return self.shell.display_formatter.format(result)
+
# This can be set to True by the write_output_prompt method in a subclass
prompt_end_newline = False
- def write_format_data(self, format_dict, md_dict=None):
- """Write the format data dict to the frontend.
-
- This default version of this method simply writes the plain text
+ def write_format_data(self, format_dict, md_dict=None):
+ """Write the format data dict to the frontend.
+
+ This default version of this method simply writes the plain text
representation of the object to ``sys.stdout``. Subclasses should
- override this method to send the entire `format_dict` to the
- frontends.
-
- Parameters
- ----------
- format_dict : dict
- The format dict for the object passed to `sys.displayhook`.
- md_dict : dict (optional)
- The metadata dict to be associated with the display data.
- """
- if 'text/plain' not in format_dict:
- # nothing to do
- return
- # We want to print because we want to always make sure we have a
- # newline, even if all the prompt separators are ''. This is the
- # standard IPython behavior.
- result_repr = format_dict['text/plain']
- if '\n' in result_repr:
- # So that multi-line strings line up with the left column of
- # the screen, instead of having the output prompt mess up
- # their first line.
- # We use the prompt template instead of the expanded prompt
- # because the expansion may add ANSI escapes that will interfere
- # with our ability to determine whether or not we should add
- # a newline.
+ override this method to send the entire `format_dict` to the
+ frontends.
+
+ Parameters
+ ----------
+ format_dict : dict
+ The format dict for the object passed to `sys.displayhook`.
+ md_dict : dict (optional)
+ The metadata dict to be associated with the display data.
+ """
+ if 'text/plain' not in format_dict:
+ # nothing to do
+ return
+ # We want to print because we want to always make sure we have a
+ # newline, even if all the prompt separators are ''. This is the
+ # standard IPython behavior.
+ result_repr = format_dict['text/plain']
+ if '\n' in result_repr:
+ # So that multi-line strings line up with the left column of
+ # the screen, instead of having the output prompt mess up
+ # their first line.
+ # We use the prompt template instead of the expanded prompt
+ # because the expansion may add ANSI escapes that will interfere
+ # with our ability to determine whether or not we should add
+ # a newline.
if not self.prompt_end_newline:
- # But avoid extraneous empty lines.
- result_repr = '\n' + result_repr
-
+ # But avoid extraneous empty lines.
+ result_repr = '\n' + result_repr
+
print(result_repr)
-
- def update_user_ns(self, result):
- """Update user_ns with various things like _, __, _1, etc."""
-
- # Avoid recursive reference when displaying _oh/Out
- if result is not self.shell.user_ns['_oh']:
- if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache:
- self.cull_cache()
- # Don't overwrite '_' and friends if '_' is in __builtin__ (otherwise
- # we cause buggy behavior for things like gettext).
-
- if '_' not in builtin_mod.__dict__:
- self.___ = self.__
- self.__ = self._
- self._ = result
- self.shell.push({'_':self._,
- '__':self.__,
- '___':self.___}, interactive=False)
-
- # hackish access to top-level namespace to create _1,_2... dynamically
- to_main = {}
- if self.do_full_cache:
- new_result = '_'+repr(self.prompt_count)
- to_main[new_result] = result
- self.shell.push(to_main, interactive=False)
- self.shell.user_ns['_oh'][self.prompt_count] = result
-
- def fill_exec_result(self, result):
- if self.exec_result is not None:
- self.exec_result.result = result
-
- def log_output(self, format_dict):
- """Log the output."""
- if 'text/plain' not in format_dict:
- # nothing to do
- return
- if self.shell.logger.log_output:
- self.shell.logger.log_write(format_dict['text/plain'], 'output')
- self.shell.history_manager.output_hist_reprs[self.prompt_count] = \
- format_dict['text/plain']
-
- def finish_displayhook(self):
- """Finish up all displayhook activities."""
+
+ def update_user_ns(self, result):
+ """Update user_ns with various things like _, __, _1, etc."""
+
+ # Avoid recursive reference when displaying _oh/Out
+ if result is not self.shell.user_ns['_oh']:
+ if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache:
+ self.cull_cache()
+ # Don't overwrite '_' and friends if '_' is in __builtin__ (otherwise
+ # we cause buggy behavior for things like gettext).
+
+ if '_' not in builtin_mod.__dict__:
+ self.___ = self.__
+ self.__ = self._
+ self._ = result
+ self.shell.push({'_':self._,
+ '__':self.__,
+ '___':self.___}, interactive=False)
+
+ # hackish access to top-level namespace to create _1,_2... dynamically
+ to_main = {}
+ if self.do_full_cache:
+ new_result = '_'+repr(self.prompt_count)
+ to_main[new_result] = result
+ self.shell.push(to_main, interactive=False)
+ self.shell.user_ns['_oh'][self.prompt_count] = result
+
+ def fill_exec_result(self, result):
+ if self.exec_result is not None:
+ self.exec_result.result = result
+
+ def log_output(self, format_dict):
+ """Log the output."""
+ if 'text/plain' not in format_dict:
+ # nothing to do
+ return
+ if self.shell.logger.log_output:
+ self.shell.logger.log_write(format_dict['text/plain'], 'output')
+ self.shell.history_manager.output_hist_reprs[self.prompt_count] = \
+ format_dict['text/plain']
+
+ def finish_displayhook(self):
+ """Finish up all displayhook activities."""
sys.stdout.write(self.shell.separate_out2)
sys.stdout.flush()
-
- def __call__(self, result=None):
- """Printing with history cache management.
-
- This is invoked everytime the interpreter needs to print, and is
- activated by setting the variable sys.displayhook to it.
- """
- self.check_for_underscore()
- if result is not None and not self.quiet():
- self.start_displayhook()
- self.write_output_prompt()
- format_dict, md_dict = self.compute_format_data(result)
- self.update_user_ns(result)
- self.fill_exec_result(result)
- if format_dict:
- self.write_format_data(format_dict, md_dict)
- self.log_output(format_dict)
- self.finish_displayhook()
-
- def cull_cache(self):
- """Output cache is full, cull the oldest entries"""
- oh = self.shell.user_ns.get('_oh', {})
- sz = len(oh)
- cull_count = max(int(sz * self.cull_fraction), 2)
- warn('Output cache limit (currently {sz} entries) hit.\n'
- 'Flushing oldest {cull_count} entries.'.format(sz=sz, cull_count=cull_count))
-
- for i, n in enumerate(sorted(oh)):
- if i >= cull_count:
- break
- self.shell.user_ns.pop('_%i' % n, None)
- oh.pop(n, None)
-
-
- def flush(self):
- if not self.do_full_cache:
- raise ValueError("You shouldn't have reached the cache flush "
- "if full caching is not enabled!")
- # delete auto-generated vars from global namespace
-
- for n in range(1,self.prompt_count + 1):
- key = '_'+repr(n)
- try:
- del self.shell.user_ns[key]
- except: pass
- # In some embedded circumstances, the user_ns doesn't have the
- # '_oh' key set up.
- oh = self.shell.user_ns.get('_oh', None)
- if oh is not None:
- oh.clear()
-
- # Release our own references to objects:
- self._, self.__, self.___ = '', '', ''
-
- if '_' not in builtin_mod.__dict__:
- self.shell.user_ns.update({'_':None,'__':None, '___':None})
- import gc
- # TODO: Is this really needed?
- # IronPython blocks here forever
- if sys.platform != "cli":
- gc.collect()
+
+ def __call__(self, result=None):
+ """Printing with history cache management.
+
+ This is invoked everytime the interpreter needs to print, and is
+ activated by setting the variable sys.displayhook to it.
+ """
+ self.check_for_underscore()
+ if result is not None and not self.quiet():
+ self.start_displayhook()
+ self.write_output_prompt()
+ format_dict, md_dict = self.compute_format_data(result)
+ self.update_user_ns(result)
+ self.fill_exec_result(result)
+ if format_dict:
+ self.write_format_data(format_dict, md_dict)
+ self.log_output(format_dict)
+ self.finish_displayhook()
+
+ def cull_cache(self):
+ """Output cache is full, cull the oldest entries"""
+ oh = self.shell.user_ns.get('_oh', {})
+ sz = len(oh)
+ cull_count = max(int(sz * self.cull_fraction), 2)
+ warn('Output cache limit (currently {sz} entries) hit.\n'
+ 'Flushing oldest {cull_count} entries.'.format(sz=sz, cull_count=cull_count))
+
+ for i, n in enumerate(sorted(oh)):
+ if i >= cull_count:
+ break
+ self.shell.user_ns.pop('_%i' % n, None)
+ oh.pop(n, None)
+
+
+ def flush(self):
+ if not self.do_full_cache:
+ raise ValueError("You shouldn't have reached the cache flush "
+ "if full caching is not enabled!")
+ # delete auto-generated vars from global namespace
+
+ for n in range(1,self.prompt_count + 1):
+ key = '_'+repr(n)
+ try:
+ del self.shell.user_ns[key]
+ except: pass
+ # In some embedded circumstances, the user_ns doesn't have the
+ # '_oh' key set up.
+ oh = self.shell.user_ns.get('_oh', None)
+ if oh is not None:
+ oh.clear()
+
+ # Release our own references to objects:
+ self._, self.__, self.___ = '', '', ''
+
+ if '_' not in builtin_mod.__dict__:
+ self.shell.user_ns.update({'_':None,'__':None, '___':None})
+ import gc
+ # TODO: Is this really needed?
+ # IronPython blocks here forever
+ if sys.platform != "cli":
+ gc.collect()
class CapturingDisplayHook(object):
diff --git a/contrib/python/ipython/py2/IPython/core/displaypub.py b/contrib/python/ipython/py2/IPython/core/displaypub.py
index b417aab40f..82a859ae15 100644
--- a/contrib/python/ipython/py2/IPython/core/displaypub.py
+++ b/contrib/python/ipython/py2/IPython/core/displaypub.py
@@ -1,95 +1,95 @@
-"""An interface for publishing rich data to frontends.
-
-There are two components of the display system:
-
-* Display formatters, which take a Python object and compute the
- representation of the object in various formats (text, HTML, SVG, etc.).
-* The display publisher that is used to send the representation data to the
- various frontends.
-
-This module defines the logic display publishing. The display publisher uses
-the ``display_data`` message type that is defined in the IPython messaging
-spec.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
+"""An interface for publishing rich data to frontends.
+
+There are two components of the display system:
+
+* Display formatters, which take a Python object and compute the
+ representation of the object in various formats (text, HTML, SVG, etc.).
+* The display publisher that is used to send the representation data to the
+ various frontends.
+
+This module defines the logic display publishing. The display publisher uses
+the ``display_data`` message type that is defined in the IPython messaging
+spec.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+
import sys
-from traitlets.config.configurable import Configurable
-from traitlets import List
-
-# This used to be defined here - it is imported for backwards compatibility
-from .display import publish_display_data
-
-#-----------------------------------------------------------------------------
-# Main payload class
-#-----------------------------------------------------------------------------
-
-class DisplayPublisher(Configurable):
- """A traited class that publishes display data to frontends.
-
- Instances of this class are created by the main IPython object and should
- be accessed there.
- """
-
- def _validate_data(self, data, metadata=None):
- """Validate the display data.
-
- Parameters
- ----------
- data : dict
- The formata data dictionary.
- metadata : dict
- Any metadata for the data.
- """
-
- if not isinstance(data, dict):
- raise TypeError('data must be a dict, got: %r' % data)
- if metadata is not None:
- if not isinstance(metadata, dict):
- raise TypeError('metadata must be a dict, got: %r' % data)
-
+from traitlets.config.configurable import Configurable
+from traitlets import List
+
+# This used to be defined here - it is imported for backwards compatibility
+from .display import publish_display_data
+
+#-----------------------------------------------------------------------------
+# Main payload class
+#-----------------------------------------------------------------------------
+
+class DisplayPublisher(Configurable):
+ """A traited class that publishes display data to frontends.
+
+ Instances of this class are created by the main IPython object and should
+ be accessed there.
+ """
+
+ def _validate_data(self, data, metadata=None):
+ """Validate the display data.
+
+ Parameters
+ ----------
+ data : dict
+ The formata data dictionary.
+ metadata : dict
+ Any metadata for the data.
+ """
+
+ if not isinstance(data, dict):
+ raise TypeError('data must be a dict, got: %r' % data)
+ if metadata is not None:
+ if not isinstance(metadata, dict):
+ raise TypeError('metadata must be a dict, got: %r' % data)
+
# use * to indicate transient, update are keyword-only
def publish(self, data, metadata=None, source=None, **kwargs):
- """Publish data and metadata to all frontends.
-
- See the ``display_data`` message in the messaging documentation for
- more details about this message type.
-
- The following MIME types are currently implemented:
-
- * text/plain
- * text/html
- * text/markdown
- * text/latex
- * application/json
- * application/javascript
- * image/png
- * image/jpeg
- * image/svg+xml
-
- Parameters
- ----------
- data : dict
- A dictionary having keys that are valid MIME types (like
- 'text/plain' or 'image/svg+xml') and values that are the data for
- that MIME type. The data itself must be a JSON'able data
- structure. Minimally all data should have the 'text/plain' data,
- which can be displayed by all frontends. If more than the plain
- text is given, it is up to the frontend to decide which
- representation to use.
- metadata : dict
- A dictionary for metadata related to the data. This can contain
- arbitrary key, value pairs that frontends can use to interpret
- the data. Metadata specific to each mime-type can be specified
- in the metadata dict with the same mime-type keys as
- the data itself.
- source : str, deprecated
- Unused.
+ """Publish data and metadata to all frontends.
+
+ See the ``display_data`` message in the messaging documentation for
+ more details about this message type.
+
+ The following MIME types are currently implemented:
+
+ * text/plain
+ * text/html
+ * text/markdown
+ * text/latex
+ * application/json
+ * application/javascript
+ * image/png
+ * image/jpeg
+ * image/svg+xml
+
+ Parameters
+ ----------
+ data : dict
+ A dictionary having keys that are valid MIME types (like
+ 'text/plain' or 'image/svg+xml') and values that are the data for
+ that MIME type. The data itself must be a JSON'able data
+ structure. Minimally all data should have the 'text/plain' data,
+ which can be displayed by all frontends. If more than the plain
+ text is given, it is up to the frontend to decide which
+ representation to use.
+ metadata : dict
+ A dictionary for metadata related to the data. This can contain
+ arbitrary key, value pairs that frontends can use to interpret
+ the data. Metadata specific to each mime-type can be specified
+ in the metadata dict with the same mime-type keys as
+ the data itself.
+ source : str, deprecated
+ Unused.
transient: dict, keyword-only
A dictionary for transient data.
Data in this dictionary should not be persisted as part of saving this output.
@@ -97,8 +97,8 @@ class DisplayPublisher(Configurable):
update: bool, keyword-only, default: False
If True, only update existing outputs with the same display_id,
rather than creating a new output.
- """
-
+ """
+
# These are kwargs only on Python 3, not used there.
# For consistency and avoid code divergence we leave them here to
# simplify potential backport
@@ -106,21 +106,21 @@ class DisplayPublisher(Configurable):
update = kwargs.pop('update', False)
# The default is to simply write the plain text data using sys.stdout.
- if 'text/plain' in data:
+ if 'text/plain' in data:
print(data['text/plain'])
-
- def clear_output(self, wait=False):
- """Clear the output of the cell receiving output."""
+
+ def clear_output(self, wait=False):
+ """Clear the output of the cell receiving output."""
print('\033[2K\r', end='')
sys.stdout.flush()
print('\033[2K\r', end='')
sys.stderr.flush()
-
-
-class CapturingDisplayPublisher(DisplayPublisher):
- """A DisplayPublisher that stores"""
- outputs = List()
-
+
+
+class CapturingDisplayPublisher(DisplayPublisher):
+ """A DisplayPublisher that stores"""
+ outputs = List()
+
def publish(self, data, metadata=None, source=None, **kwargs):
# These are kwargs only on Python 3, not used there.
@@ -132,8 +132,8 @@ class CapturingDisplayPublisher(DisplayPublisher):
self.outputs.append({'data':data, 'metadata':metadata,
'transient':transient, 'update':update})
- def clear_output(self, wait=False):
- super(CapturingDisplayPublisher, self).clear_output(wait)
+ def clear_output(self, wait=False):
+ super(CapturingDisplayPublisher, self).clear_output(wait)
- # empty the list, *do not* reassign a new list
+ # empty the list, *do not* reassign a new list
self.outputs.clear()
diff --git a/contrib/python/ipython/py2/IPython/core/error.py b/contrib/python/ipython/py2/IPython/core/error.py
index 66d67a6ba6..684cbc8da6 100644
--- a/contrib/python/ipython/py2/IPython/core/error.py
+++ b/contrib/python/ipython/py2/IPython/core/error.py
@@ -1,60 +1,60 @@
-# encoding: utf-8
-"""
-Global exception classes for IPython.core.
-
-Authors:
-
-* Brian Granger
-* Fernando Perez
-* Min Ragan-Kelley
-
-Notes
------
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Exception classes
-#-----------------------------------------------------------------------------
-
-class IPythonCoreError(Exception):
- pass
-
-
-class TryNext(IPythonCoreError):
- """Try next hook exception.
-
- Raise this in your hook function to indicate that the next hook handler
- should be used to handle the operation.
- """
-
-class UsageError(IPythonCoreError):
- """Error in magic function arguments, etc.
-
- Something that probably won't warrant a full traceback, but should
- nevertheless interrupt a macro / batch file.
- """
-
-class StdinNotImplementedError(IPythonCoreError, NotImplementedError):
- """raw_input was requested in a context where it is not supported
-
- For use in IPython kernels, where only some frontends may support
- stdin requests.
- """
-
-class InputRejected(Exception):
- """Input rejected by ast transformer.
-
- Raise this in your NodeTransformer to indicate that InteractiveShell should
- not execute the supplied input.
- """
+# encoding: utf-8
+"""
+Global exception classes for IPython.core.
+
+Authors:
+
+* Brian Granger
+* Fernando Perez
+* Min Ragan-Kelley
+
+Notes
+-----
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Exception classes
+#-----------------------------------------------------------------------------
+
+class IPythonCoreError(Exception):
+ pass
+
+
+class TryNext(IPythonCoreError):
+ """Try next hook exception.
+
+ Raise this in your hook function to indicate that the next hook handler
+ should be used to handle the operation.
+ """
+
+class UsageError(IPythonCoreError):
+ """Error in magic function arguments, etc.
+
+ Something that probably won't warrant a full traceback, but should
+ nevertheless interrupt a macro / batch file.
+ """
+
+class StdinNotImplementedError(IPythonCoreError, NotImplementedError):
+ """raw_input was requested in a context where it is not supported
+
+ For use in IPython kernels, where only some frontends may support
+ stdin requests.
+ """
+
+class InputRejected(Exception):
+ """Input rejected by ast transformer.
+
+ Raise this in your NodeTransformer to indicate that InteractiveShell should
+ not execute the supplied input.
+ """
diff --git a/contrib/python/ipython/py2/IPython/core/events.py b/contrib/python/ipython/py2/IPython/core/events.py
index dc289ee9a1..bfd09fec6a 100644
--- a/contrib/python/ipython/py2/IPython/core/events.py
+++ b/contrib/python/ipython/py2/IPython/core/events.py
@@ -1,131 +1,131 @@
-"""Infrastructure for registering and firing callbacks on application events.
-
-Unlike :mod:`IPython.core.hooks`, which lets end users set single functions to
-be called at specific times, or a collection of alternative methods to try,
-callbacks are designed to be used by extension authors. A number of callbacks
-can be registered for the same event without needing to be aware of one another.
-
-The functions defined in this module are no-ops indicating the names of available
-events and the arguments which will be passed to them.
-
-.. note::
-
- This API is experimental in IPython 2.0, and may be revised in future versions.
-"""
-from __future__ import print_function
-
-class EventManager(object):
- """Manage a collection of events and a sequence of callbacks for each.
-
- This is attached to :class:`~IPython.core.interactiveshell.InteractiveShell`
- instances as an ``events`` attribute.
-
- .. note::
-
- This API is experimental in IPython 2.0, and may be revised in future versions.
- """
- def __init__(self, shell, available_events):
- """Initialise the :class:`CallbackManager`.
-
- Parameters
- ----------
- shell
- The :class:`~IPython.core.interactiveshell.InteractiveShell` instance
- available_callbacks
- An iterable of names for callback events.
- """
- self.shell = shell
- self.callbacks = {n:[] for n in available_events}
-
- def register(self, event, function):
- """Register a new event callback
-
- Parameters
- ----------
- event : str
- The event for which to register this callback.
- function : callable
- A function to be called on the given event. It should take the same
- parameters as the appropriate callback prototype.
-
- Raises
- ------
- TypeError
- If ``function`` is not callable.
- KeyError
- If ``event`` is not one of the known events.
- """
- if not callable(function):
- raise TypeError('Need a callable, got %r' % function)
- self.callbacks[event].append(function)
-
- def unregister(self, event, function):
- """Remove a callback from the given event."""
- self.callbacks[event].remove(function)
-
- def trigger(self, event, *args, **kwargs):
- """Call callbacks for ``event``.
-
- Any additional arguments are passed to all callbacks registered for this
- event. Exceptions raised by callbacks are caught, and a message printed.
- """
+"""Infrastructure for registering and firing callbacks on application events.
+
+Unlike :mod:`IPython.core.hooks`, which lets end users set single functions to
+be called at specific times, or a collection of alternative methods to try,
+callbacks are designed to be used by extension authors. A number of callbacks
+can be registered for the same event without needing to be aware of one another.
+
+The functions defined in this module are no-ops indicating the names of available
+events and the arguments which will be passed to them.
+
+.. note::
+
+ This API is experimental in IPython 2.0, and may be revised in future versions.
+"""
+from __future__ import print_function
+
+class EventManager(object):
+ """Manage a collection of events and a sequence of callbacks for each.
+
+ This is attached to :class:`~IPython.core.interactiveshell.InteractiveShell`
+ instances as an ``events`` attribute.
+
+ .. note::
+
+ This API is experimental in IPython 2.0, and may be revised in future versions.
+ """
+ def __init__(self, shell, available_events):
+ """Initialise the :class:`CallbackManager`.
+
+ Parameters
+ ----------
+ shell
+ The :class:`~IPython.core.interactiveshell.InteractiveShell` instance
+ available_callbacks
+ An iterable of names for callback events.
+ """
+ self.shell = shell
+ self.callbacks = {n:[] for n in available_events}
+
+ def register(self, event, function):
+ """Register a new event callback
+
+ Parameters
+ ----------
+ event : str
+ The event for which to register this callback.
+ function : callable
+ A function to be called on the given event. It should take the same
+ parameters as the appropriate callback prototype.
+
+ Raises
+ ------
+ TypeError
+ If ``function`` is not callable.
+ KeyError
+ If ``event`` is not one of the known events.
+ """
+ if not callable(function):
+ raise TypeError('Need a callable, got %r' % function)
+ self.callbacks[event].append(function)
+
+ def unregister(self, event, function):
+ """Remove a callback from the given event."""
+ self.callbacks[event].remove(function)
+
+ def trigger(self, event, *args, **kwargs):
+ """Call callbacks for ``event``.
+
+ Any additional arguments are passed to all callbacks registered for this
+ event. Exceptions raised by callbacks are caught, and a message printed.
+ """
for func in self.callbacks[event][:]:
- try:
- func(*args, **kwargs)
- except Exception:
- print("Error in callback {} (for {}):".format(func, event))
- self.shell.showtraceback()
-
-# event_name -> prototype mapping
-available_events = {}
-
-def _define_event(callback_proto):
- available_events[callback_proto.__name__] = callback_proto
- return callback_proto
-
-# ------------------------------------------------------------------------------
-# Callback prototypes
-#
-# No-op functions which describe the names of available events and the
-# signatures of callbacks for those events.
-# ------------------------------------------------------------------------------
-
-@_define_event
-def pre_execute():
- """Fires before code is executed in response to user/frontend action.
-
- This includes comm and widget messages and silent execution, as well as user
- code cells."""
- pass
-
-@_define_event
-def pre_run_cell():
- """Fires before user-entered code runs."""
- pass
-
-@_define_event
-def post_execute():
- """Fires after code is executed in response to user/frontend action.
-
- This includes comm and widget messages and silent execution, as well as user
- code cells."""
- pass
-
-@_define_event
-def post_run_cell():
- """Fires after user-entered code runs."""
- pass
-
-@_define_event
-def shell_initialized(ip):
- """Fires after initialisation of :class:`~IPython.core.interactiveshell.InteractiveShell`.
-
- This is before extensions and startup scripts are loaded, so it can only be
- set by subclassing.
-
- Parameters
- ----------
- ip : :class:`~IPython.core.interactiveshell.InteractiveShell`
- The newly initialised shell.
- """
- pass
+ try:
+ func(*args, **kwargs)
+ except Exception:
+ print("Error in callback {} (for {}):".format(func, event))
+ self.shell.showtraceback()
+
+# event_name -> prototype mapping
+available_events = {}
+
+def _define_event(callback_proto):
+ available_events[callback_proto.__name__] = callback_proto
+ return callback_proto
+
+# ------------------------------------------------------------------------------
+# Callback prototypes
+#
+# No-op functions which describe the names of available events and the
+# signatures of callbacks for those events.
+# ------------------------------------------------------------------------------
+
+@_define_event
+def pre_execute():
+ """Fires before code is executed in response to user/frontend action.
+
+ This includes comm and widget messages and silent execution, as well as user
+ code cells."""
+ pass
+
+@_define_event
+def pre_run_cell():
+ """Fires before user-entered code runs."""
+ pass
+
+@_define_event
+def post_execute():
+ """Fires after code is executed in response to user/frontend action.
+
+ This includes comm and widget messages and silent execution, as well as user
+ code cells."""
+ pass
+
+@_define_event
+def post_run_cell():
+ """Fires after user-entered code runs."""
+ pass
+
+@_define_event
+def shell_initialized(ip):
+ """Fires after initialisation of :class:`~IPython.core.interactiveshell.InteractiveShell`.
+
+ This is before extensions and startup scripts are loaded, so it can only be
+ set by subclassing.
+
+ Parameters
+ ----------
+ ip : :class:`~IPython.core.interactiveshell.InteractiveShell`
+ The newly initialised shell.
+ """
+ pass
diff --git a/contrib/python/ipython/py2/IPython/core/excolors.py b/contrib/python/ipython/py2/IPython/core/excolors.py
index 7546b4b263..487bde18c8 100644
--- a/contrib/python/ipython/py2/IPython/core/excolors.py
+++ b/contrib/python/ipython/py2/IPython/core/excolors.py
@@ -1,133 +1,133 @@
-# -*- coding: utf-8 -*-
-"""
-Color schemes for exception handling code in IPython.
-"""
-
+# -*- coding: utf-8 -*-
+"""
+Color schemes for exception handling code in IPython.
+"""
+
import os
-import warnings
-
-#*****************************************************************************
-# Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#*****************************************************************************
-
-from IPython.utils.coloransi import ColorSchemeTable, TermColors, ColorScheme
-
-def exception_colors():
- """Return a color table with fields for exception reporting.
-
- The table is an instance of ColorSchemeTable with schemes added for
+import warnings
+
+#*****************************************************************************
+# Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+from IPython.utils.coloransi import ColorSchemeTable, TermColors, ColorScheme
+
+def exception_colors():
+ """Return a color table with fields for exception reporting.
+
+ The table is an instance of ColorSchemeTable with schemes added for
'Neutral', 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled
- in.
-
- Examples:
-
- >>> ec = exception_colors()
- >>> ec.active_scheme_name
- ''
- >>> print(ec.active_colors)
- None
-
- Now we activate a color scheme:
- >>> ec.set_active_scheme('NoColor')
- >>> ec.active_scheme_name
- 'NoColor'
- >>> sorted(ec.active_colors.keys())
- ['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line',
- 'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName',
- 'val', 'valEm']
- """
-
- ex_colors = ColorSchemeTable()
-
- # Populate it with color schemes
- C = TermColors # shorthand and local lookup
- ex_colors.add_scheme(ColorScheme(
- 'NoColor',
- # The color to be used for the top line
- topline = C.NoColor,
-
- # The colors to be used in the traceback
- filename = C.NoColor,
- lineno = C.NoColor,
- name = C.NoColor,
- vName = C.NoColor,
- val = C.NoColor,
- em = C.NoColor,
-
- # Emphasized colors for the last frame of the traceback
- normalEm = C.NoColor,
- filenameEm = C.NoColor,
- linenoEm = C.NoColor,
- nameEm = C.NoColor,
- valEm = C.NoColor,
-
- # Colors for printing the exception
- excName = C.NoColor,
- line = C.NoColor,
- caret = C.NoColor,
- Normal = C.NoColor
- ))
-
- # make some schemes as instances so we can copy them for modification easily
- ex_colors.add_scheme(ColorScheme(
- 'Linux',
- # The color to be used for the top line
- topline = C.LightRed,
-
- # The colors to be used in the traceback
- filename = C.Green,
- lineno = C.Green,
- name = C.Purple,
- vName = C.Cyan,
- val = C.Green,
- em = C.LightCyan,
-
- # Emphasized colors for the last frame of the traceback
- normalEm = C.LightCyan,
- filenameEm = C.LightGreen,
- linenoEm = C.LightGreen,
- nameEm = C.LightPurple,
- valEm = C.LightBlue,
-
- # Colors for printing the exception
- excName = C.LightRed,
- line = C.Yellow,
- caret = C.White,
- Normal = C.Normal
- ))
-
- # For light backgrounds, swap dark/light colors
- ex_colors.add_scheme(ColorScheme(
- 'LightBG',
- # The color to be used for the top line
- topline = C.Red,
-
- # The colors to be used in the traceback
- filename = C.LightGreen,
- lineno = C.LightGreen,
- name = C.LightPurple,
- vName = C.Cyan,
- val = C.LightGreen,
- em = C.Cyan,
-
- # Emphasized colors for the last frame of the traceback
- normalEm = C.Cyan,
- filenameEm = C.Green,
- linenoEm = C.Green,
- nameEm = C.Purple,
- valEm = C.Blue,
-
- # Colors for printing the exception
- excName = C.Red,
- #line = C.Brown, # brown often is displayed as yellow
- line = C.Red,
- caret = C.Normal,
- Normal = C.Normal,
- ))
-
+ in.
+
+ Examples:
+
+ >>> ec = exception_colors()
+ >>> ec.active_scheme_name
+ ''
+ >>> print(ec.active_colors)
+ None
+
+ Now we activate a color scheme:
+ >>> ec.set_active_scheme('NoColor')
+ >>> ec.active_scheme_name
+ 'NoColor'
+ >>> sorted(ec.active_colors.keys())
+ ['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line',
+ 'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName',
+ 'val', 'valEm']
+ """
+
+ ex_colors = ColorSchemeTable()
+
+ # Populate it with color schemes
+ C = TermColors # shorthand and local lookup
+ ex_colors.add_scheme(ColorScheme(
+ 'NoColor',
+ # The color to be used for the top line
+ topline = C.NoColor,
+
+ # The colors to be used in the traceback
+ filename = C.NoColor,
+ lineno = C.NoColor,
+ name = C.NoColor,
+ vName = C.NoColor,
+ val = C.NoColor,
+ em = C.NoColor,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.NoColor,
+ filenameEm = C.NoColor,
+ linenoEm = C.NoColor,
+ nameEm = C.NoColor,
+ valEm = C.NoColor,
+
+ # Colors for printing the exception
+ excName = C.NoColor,
+ line = C.NoColor,
+ caret = C.NoColor,
+ Normal = C.NoColor
+ ))
+
+ # make some schemes as instances so we can copy them for modification easily
+ ex_colors.add_scheme(ColorScheme(
+ 'Linux',
+ # The color to be used for the top line
+ topline = C.LightRed,
+
+ # The colors to be used in the traceback
+ filename = C.Green,
+ lineno = C.Green,
+ name = C.Purple,
+ vName = C.Cyan,
+ val = C.Green,
+ em = C.LightCyan,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.LightCyan,
+ filenameEm = C.LightGreen,
+ linenoEm = C.LightGreen,
+ nameEm = C.LightPurple,
+ valEm = C.LightBlue,
+
+ # Colors for printing the exception
+ excName = C.LightRed,
+ line = C.Yellow,
+ caret = C.White,
+ Normal = C.Normal
+ ))
+
+ # For light backgrounds, swap dark/light colors
+ ex_colors.add_scheme(ColorScheme(
+ 'LightBG',
+ # The color to be used for the top line
+ topline = C.Red,
+
+ # The colors to be used in the traceback
+ filename = C.LightGreen,
+ lineno = C.LightGreen,
+ name = C.LightPurple,
+ vName = C.Cyan,
+ val = C.LightGreen,
+ em = C.Cyan,
+
+ # Emphasized colors for the last frame of the traceback
+ normalEm = C.Cyan,
+ filenameEm = C.Green,
+ linenoEm = C.Green,
+ nameEm = C.Purple,
+ valEm = C.Blue,
+
+ # Colors for printing the exception
+ excName = C.Red,
+ #line = C.Brown, # brown often is displayed as yellow
+ line = C.Red,
+ caret = C.Normal,
+ Normal = C.Normal,
+ ))
+
ex_colors.add_scheme(ColorScheme(
'Neutral',
# The color to be used for the top line
@@ -163,22 +163,22 @@ def exception_colors():
if os.name == "nt":
ex_colors.add_scheme(ex_colors['Linux'].copy('Neutral'))
- return ex_colors
-
-class Deprec(object):
-
- def __init__(self, wrapped_obj):
- self.wrapped=wrapped_obj
-
- def __getattr__(self, name):
- val = getattr(self.wrapped, name)
+ return ex_colors
+
+class Deprec(object):
+
+ def __init__(self, wrapped_obj):
+ self.wrapped=wrapped_obj
+
+ def __getattr__(self, name):
+ val = getattr(self.wrapped, name)
warnings.warn("Using ExceptionColors global is deprecated and will be removed in IPython 6.0",
DeprecationWarning, stacklevel=2)
- # using getattr after warnings break ipydoctest in weird way for 3.5
- return val
-
-# For backwards compatibility, keep around a single global object. Note that
-# this should NOT be used, the factory function should be used instead, since
-# these objects are stateful and it's very easy to get strange bugs if any code
-# modifies the module-level object's state.
-ExceptionColors = Deprec(exception_colors())
+ # using getattr after warnings break ipydoctest in weird way for 3.5
+ return val
+
+# For backwards compatibility, keep around a single global object. Note that
+# this should NOT be used, the factory function should be used instead, since
+# these objects are stateful and it's very easy to get strange bugs if any code
+# modifies the module-level object's state.
+ExceptionColors = Deprec(exception_colors())
diff --git a/contrib/python/ipython/py2/IPython/core/extensions.py b/contrib/python/ipython/py2/IPython/core/extensions.py
index e20b485e97..58855466f1 100644
--- a/contrib/python/ipython/py2/IPython/core/extensions.py
+++ b/contrib/python/ipython/py2/IPython/core/extensions.py
@@ -1,173 +1,173 @@
-# encoding: utf-8
-"""A class for managing IPython extensions."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import os
-from shutil import copyfile
-import sys
-
-from traitlets.config.configurable import Configurable
-from IPython.utils.path import ensure_dir_exists
-from traitlets import Instance
-
-try:
- from importlib import reload
-except ImportError :
- ## deprecated since 3.4
- from imp import reload
-
-#-----------------------------------------------------------------------------
-# Main class
-#-----------------------------------------------------------------------------
-
-class ExtensionManager(Configurable):
- """A class to manage IPython extensions.
-
- An IPython extension is an importable Python module that has
- a function with the signature::
-
- def load_ipython_extension(ipython):
- # Do things with ipython
-
- This function is called after your extension is imported and the
- currently active :class:`InteractiveShell` instance is passed as
- the only argument. You can do anything you want with IPython at
- that point, including defining new magic and aliases, adding new
- components, etc.
+# encoding: utf-8
+"""A class for managing IPython extensions."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+from shutil import copyfile
+import sys
+
+from traitlets.config.configurable import Configurable
+from IPython.utils.path import ensure_dir_exists
+from traitlets import Instance
+
+try:
+ from importlib import reload
+except ImportError :
+ ## deprecated since 3.4
+ from imp import reload
+
+#-----------------------------------------------------------------------------
+# Main class
+#-----------------------------------------------------------------------------
+
+class ExtensionManager(Configurable):
+ """A class to manage IPython extensions.
+
+ An IPython extension is an importable Python module that has
+ a function with the signature::
+
+ def load_ipython_extension(ipython):
+ # Do things with ipython
+
+ This function is called after your extension is imported and the
+ currently active :class:`InteractiveShell` instance is passed as
+ the only argument. You can do anything you want with IPython at
+ that point, including defining new magic and aliases, adding new
+ components, etc.
- You can also optionally define an :func:`unload_ipython_extension(ipython)`
- function, which will be called if the user unloads or reloads the extension.
- The extension manager will only call :func:`load_ipython_extension` again
- if the extension is reloaded.
-
- You can put your extension modules anywhere you want, as long as
- they can be imported by Python's standard import mechanism. However,
- to make it easy to write extensions, you can also put your extensions
- in ``os.path.join(self.ipython_dir, 'extensions')``. This directory
- is added to ``sys.path`` automatically.
- """
-
+ You can also optionally define an :func:`unload_ipython_extension(ipython)`
+ function, which will be called if the user unloads or reloads the extension.
+ The extension manager will only call :func:`load_ipython_extension` again
+ if the extension is reloaded.
+
+ You can put your extension modules anywhere you want, as long as
+ they can be imported by Python's standard import mechanism. However,
+ to make it easy to write extensions, you can also put your extensions
+ in ``os.path.join(self.ipython_dir, 'extensions')``. This directory
+ is added to ``sys.path`` automatically.
+ """
+
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
-
- def __init__(self, shell=None, **kwargs):
- super(ExtensionManager, self).__init__(shell=shell, **kwargs)
+
+ def __init__(self, shell=None, **kwargs):
+ super(ExtensionManager, self).__init__(shell=shell, **kwargs)
self.shell.observe(
self._on_ipython_dir_changed, names=('ipython_dir',)
- )
- self.loaded = set()
-
- @property
- def ipython_extension_dir(self):
- return os.path.join(self.shell.ipython_dir, u'extensions')
-
+ )
+ self.loaded = set()
+
+ @property
+ def ipython_extension_dir(self):
+ return os.path.join(self.shell.ipython_dir, u'extensions')
+
def _on_ipython_dir_changed(self, change):
- ensure_dir_exists(self.ipython_extension_dir)
-
- def load_extension(self, module_str):
- """Load an IPython extension by its module name.
-
- Returns the string "already loaded" if the extension is already loaded,
- "no load function" if the module doesn't have a load_ipython_extension
- function, or None if it succeeded.
- """
- if module_str in self.loaded:
- return "already loaded"
+ ensure_dir_exists(self.ipython_extension_dir)
+
+ def load_extension(self, module_str):
+ """Load an IPython extension by its module name.
+
+ Returns the string "already loaded" if the extension is already loaded,
+ "no load function" if the module doesn't have a load_ipython_extension
+ function, or None if it succeeded.
+ """
+ if module_str in self.loaded:
+ return "already loaded"
- with self.shell.builtin_trap:
- if module_str not in sys.modules:
+ with self.shell.builtin_trap:
+ if module_str not in sys.modules:
try:
sys.modules[module_str] = __import__('IPython.extensions.' + module_str)
except ImportError:
- __import__(module_str)
- mod = sys.modules[module_str]
- if self._call_load_ipython_extension(mod):
- self.loaded.add(module_str)
- else:
- return "no load function"
-
- def unload_extension(self, module_str):
- """Unload an IPython extension by its module name.
-
- This function looks up the extension's name in ``sys.modules`` and
- simply calls ``mod.unload_ipython_extension(self)``.
+ __import__(module_str)
+ mod = sys.modules[module_str]
+ if self._call_load_ipython_extension(mod):
+ self.loaded.add(module_str)
+ else:
+ return "no load function"
+
+ def unload_extension(self, module_str):
+ """Unload an IPython extension by its module name.
+
+ This function looks up the extension's name in ``sys.modules`` and
+ simply calls ``mod.unload_ipython_extension(self)``.
- Returns the string "no unload function" if the extension doesn't define
- a function to unload itself, "not loaded" if the extension isn't loaded,
- otherwise None.
- """
- if module_str not in self.loaded:
- return "not loaded"
+ Returns the string "no unload function" if the extension doesn't define
+ a function to unload itself, "not loaded" if the extension isn't loaded,
+ otherwise None.
+ """
+ if module_str not in self.loaded:
+ return "not loaded"
- if module_str in sys.modules:
- mod = sys.modules[module_str]
- if self._call_unload_ipython_extension(mod):
- self.loaded.discard(module_str)
- else:
- return "no unload function"
-
- def reload_extension(self, module_str):
- """Reload an IPython extension by calling reload.
-
- If the module has not been loaded before,
- :meth:`InteractiveShell.load_extension` is called. Otherwise
- :func:`reload` is called and then the :func:`load_ipython_extension`
- function of the module, if it exists is called.
- """
- from IPython.utils.syspathcontext import prepended_to_syspath
-
- if (module_str in self.loaded) and (module_str in sys.modules):
- self.unload_extension(module_str)
- mod = sys.modules[module_str]
- with prepended_to_syspath(self.ipython_extension_dir):
- reload(mod)
- if self._call_load_ipython_extension(mod):
- self.loaded.add(module_str)
- else:
- self.load_extension(module_str)
-
- def _call_load_ipython_extension(self, mod):
- if hasattr(mod, 'load_ipython_extension'):
- mod.load_ipython_extension(self.shell)
- return True
-
- def _call_unload_ipython_extension(self, mod):
- if hasattr(mod, 'unload_ipython_extension'):
- mod.unload_ipython_extension(self.shell)
- return True
-
- def install_extension(self, url, filename=None):
+ if module_str in sys.modules:
+ mod = sys.modules[module_str]
+ if self._call_unload_ipython_extension(mod):
+ self.loaded.discard(module_str)
+ else:
+ return "no unload function"
+
+ def reload_extension(self, module_str):
+ """Reload an IPython extension by calling reload.
+
+ If the module has not been loaded before,
+ :meth:`InteractiveShell.load_extension` is called. Otherwise
+ :func:`reload` is called and then the :func:`load_ipython_extension`
+ function of the module, if it exists is called.
+ """
+ from IPython.utils.syspathcontext import prepended_to_syspath
+
+ if (module_str in self.loaded) and (module_str in sys.modules):
+ self.unload_extension(module_str)
+ mod = sys.modules[module_str]
+ with prepended_to_syspath(self.ipython_extension_dir):
+ reload(mod)
+ if self._call_load_ipython_extension(mod):
+ self.loaded.add(module_str)
+ else:
+ self.load_extension(module_str)
+
+ def _call_load_ipython_extension(self, mod):
+ if hasattr(mod, 'load_ipython_extension'):
+ mod.load_ipython_extension(self.shell)
+ return True
+
+ def _call_unload_ipython_extension(self, mod):
+ if hasattr(mod, 'unload_ipython_extension'):
+ mod.unload_ipython_extension(self.shell)
+ return True
+
+ def install_extension(self, url, filename=None):
"""Download and install an IPython extension.
-
- If filename is given, the file will be so named (inside the extension
- directory). Otherwise, the name from the URL will be used. The file must
- have a .py or .zip extension; otherwise, a ValueError will be raised.
-
- Returns the full path to the installed file.
- """
- # Ensure the extension directory exists
- ensure_dir_exists(self.ipython_extension_dir)
-
- if os.path.isfile(url):
- src_filename = os.path.basename(url)
- copy = copyfile
- else:
- # Deferred imports
- try:
- from urllib.parse import urlparse # Py3
- from urllib.request import urlretrieve
- except ImportError:
- from urlparse import urlparse
- from urllib import urlretrieve
- src_filename = urlparse(url).path.split('/')[-1]
- copy = urlretrieve
-
- if filename is None:
- filename = src_filename
- if os.path.splitext(filename)[1] not in ('.py', '.zip'):
- raise ValueError("The file must have a .py or .zip extension", filename)
-
- filename = os.path.join(self.ipython_extension_dir, filename)
- copy(url, filename)
- return filename
+
+ If filename is given, the file will be so named (inside the extension
+ directory). Otherwise, the name from the URL will be used. The file must
+ have a .py or .zip extension; otherwise, a ValueError will be raised.
+
+ Returns the full path to the installed file.
+ """
+ # Ensure the extension directory exists
+ ensure_dir_exists(self.ipython_extension_dir)
+
+ if os.path.isfile(url):
+ src_filename = os.path.basename(url)
+ copy = copyfile
+ else:
+ # Deferred imports
+ try:
+ from urllib.parse import urlparse # Py3
+ from urllib.request import urlretrieve
+ except ImportError:
+ from urlparse import urlparse
+ from urllib import urlretrieve
+ src_filename = urlparse(url).path.split('/')[-1]
+ copy = urlretrieve
+
+ if filename is None:
+ filename = src_filename
+ if os.path.splitext(filename)[1] not in ('.py', '.zip'):
+ raise ValueError("The file must have a .py or .zip extension", filename)
+
+ filename = os.path.join(self.ipython_extension_dir, filename)
+ copy(url, filename)
+ return filename
diff --git a/contrib/python/ipython/py2/IPython/core/formatters.py b/contrib/python/ipython/py2/IPython/core/formatters.py
index 31ed25a4b0..d990619f27 100644
--- a/contrib/python/ipython/py2/IPython/core/formatters.py
+++ b/contrib/python/ipython/py2/IPython/core/formatters.py
@@ -1,137 +1,137 @@
-# -*- coding: utf-8 -*-
-"""Display formatters.
-
-Inheritance diagram:
-
-.. inheritance-diagram:: IPython.core.formatters
- :parts: 3
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import abc
-import json
-import sys
-import traceback
-import warnings
-
-from decorator import decorator
-
-from traitlets.config.configurable import Configurable
-from IPython.core.getipython import get_ipython
-from IPython.utils.sentinel import Sentinel
+# -*- coding: utf-8 -*-
+"""Display formatters.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.core.formatters
+ :parts: 3
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import abc
+import json
+import sys
+import traceback
+import warnings
+
+from decorator import decorator
+
+from traitlets.config.configurable import Configurable
+from IPython.core.getipython import get_ipython
+from IPython.utils.sentinel import Sentinel
from IPython.utils.dir2 import get_real_method
-from IPython.lib import pretty
-from traitlets import (
- Bool, Dict, Integer, Unicode, CUnicode, ObjectName, List,
- ForwardDeclaredInstance,
+from IPython.lib import pretty
+from traitlets import (
+ Bool, Dict, Integer, Unicode, CUnicode, ObjectName, List,
+ ForwardDeclaredInstance,
default, observe,
-)
-from IPython.utils.py3compat import (
- with_metaclass, string_types, unicode_type,
-)
-
-
-class DisplayFormatter(Configurable):
-
+)
+from IPython.utils.py3compat import (
+ with_metaclass, string_types, unicode_type,
+)
+
+
+class DisplayFormatter(Configurable):
+
active_types = List(Unicode(),
- help="""List of currently active mime-types to display.
- You can use this to set a white-list for formats to display.
-
- Most users will not need to change this value.
+ help="""List of currently active mime-types to display.
+ You can use this to set a white-list for formats to display.
+
+ Most users will not need to change this value.
""").tag(config=True)
@default('active_types')
- def _active_types_default(self):
- return self.format_types
+ def _active_types_default(self):
+ return self.format_types
@observe('active_types')
def _active_types_changed(self, change):
- for key, formatter in self.formatters.items():
+ for key, formatter in self.formatters.items():
if key in change['new']:
- formatter.enabled = True
- else:
- formatter.enabled = False
+ formatter.enabled = True
+ else:
+ formatter.enabled = False
- ipython_display_formatter = ForwardDeclaredInstance('FormatterABC')
+ ipython_display_formatter = ForwardDeclaredInstance('FormatterABC')
@default('ipython_display_formatter')
def _default_formatter(self):
- return IPythonDisplayFormatter(parent=self)
+ return IPythonDisplayFormatter(parent=self)
mimebundle_formatter = ForwardDeclaredInstance('FormatterABC')
@default('mimebundle_formatter')
def _default_mime_formatter(self):
return MimeBundleFormatter(parent=self)
- # A dict of formatter whose keys are format types (MIME types) and whose
- # values are subclasses of BaseFormatter.
- formatters = Dict()
+ # A dict of formatter whose keys are format types (MIME types) and whose
+ # values are subclasses of BaseFormatter.
+ formatters = Dict()
@default('formatters')
- def _formatters_default(self):
- """Activate the default formatters."""
- formatter_classes = [
- PlainTextFormatter,
- HTMLFormatter,
- MarkdownFormatter,
- SVGFormatter,
- PNGFormatter,
- PDFFormatter,
- JPEGFormatter,
- LatexFormatter,
- JSONFormatter,
- JavascriptFormatter
- ]
- d = {}
- for cls in formatter_classes:
- f = cls(parent=self)
- d[f.format_type] = f
- return d
-
- def format(self, obj, include=None, exclude=None):
- """Return a format data dict for an object.
-
- By default all format types will be computed.
-
+ def _formatters_default(self):
+ """Activate the default formatters."""
+ formatter_classes = [
+ PlainTextFormatter,
+ HTMLFormatter,
+ MarkdownFormatter,
+ SVGFormatter,
+ PNGFormatter,
+ PDFFormatter,
+ JPEGFormatter,
+ LatexFormatter,
+ JSONFormatter,
+ JavascriptFormatter
+ ]
+ d = {}
+ for cls in formatter_classes:
+ f = cls(parent=self)
+ d[f.format_type] = f
+ return d
+
+ def format(self, obj, include=None, exclude=None):
+ """Return a format data dict for an object.
+
+ By default all format types will be computed.
+
The following MIME types are usually implemented:
-
- * text/plain
- * text/html
- * text/markdown
- * text/latex
- * application/json
- * application/javascript
- * application/pdf
- * image/png
- * image/jpeg
- * image/svg+xml
-
- Parameters
- ----------
- obj : object
- The Python object whose format data will be computed.
+
+ * text/plain
+ * text/html
+ * text/markdown
+ * text/latex
+ * application/json
+ * application/javascript
+ * application/pdf
+ * image/png
+ * image/jpeg
+ * image/svg+xml
+
+ Parameters
+ ----------
+ obj : object
+ The Python object whose format data will be computed.
include : list, tuple or set; optional
- A list of format type strings (MIME types) to include in the
- format data dict. If this is set *only* the format types included
- in this list will be computed.
+ A list of format type strings (MIME types) to include in the
+ format data dict. If this is set *only* the format types included
+ in this list will be computed.
exclude : list, tuple or set; optional
- A list of format type string (MIME types) to exclude in the format
- data dict. If this is set all format types will be computed,
- except for those included in this argument.
+ A list of format type string (MIME types) to exclude in the format
+ data dict. If this is set all format types will be computed,
+ except for those included in this argument.
Mimetypes present in exclude will take precedence over the ones in include
-
- Returns
- -------
- (format_dict, metadata_dict) : tuple of two dicts
-
- format_dict is a dictionary of key/value pairs, one of each format that was
- generated for the object. The keys are the format types, which
- will usually be MIME type strings and the values and JSON'able
- data structure containing the raw data for the representation in
- that format.
-
- metadata_dict is a dictionary of metadata about each mime-type output.
- Its keys will be a strict subset of the keys in format_dict.
+
+ Returns
+ -------
+ (format_dict, metadata_dict) : tuple of two dicts
+
+ format_dict is a dictionary of key/value pairs, one of each format that was
+ generated for the object. The keys are the format types, which
+ will usually be MIME type strings and the values and JSON'able
+ data structure containing the raw data for the representation in
+ that format.
+
+ metadata_dict is a dictionary of metadata about each mime-type output.
+ Its keys will be a strict subset of the keys in format_dict.
Notes
-----
@@ -141,13 +141,13 @@ class DisplayFormatter(Configurable):
precedence and the corresponding `_repr_*_` for this mimetype will
not be called.
- """
- format_dict = {}
- md_dict = {}
-
- if self.ipython_display_formatter(obj):
- # object handled itself, don't proceed
- return {}, {}
+ """
+ format_dict = {}
+ md_dict = {}
+
+ if self.ipython_display_formatter(obj):
+ # object handled itself, don't proceed
+ return {}, {}
format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
@@ -159,733 +159,733 @@ class DisplayFormatter(Configurable):
format_dict = {k:v for k,v in format_dict.items() if k not in exclude}
md_dict = {k:v for k,v in md_dict.items() if k not in exclude}
- for format_type, formatter in self.formatters.items():
+ for format_type, formatter in self.formatters.items():
if format_type in format_dict:
# already got it from mimebundle, don't render again
continue
- if include and format_type not in include:
- continue
- if exclude and format_type in exclude:
- continue
-
- md = None
- try:
- data = formatter(obj)
- except:
- # FIXME: log the exception
- raise
-
- # formatters can return raw data or (data, metadata)
- if isinstance(data, tuple) and len(data) == 2:
- data, md = data
-
- if data is not None:
- format_dict[format_type] = data
- if md is not None:
- md_dict[format_type] = md
- return format_dict, md_dict
-
- @property
- def format_types(self):
- """Return the format types (MIME types) of the active formatters."""
- return list(self.formatters.keys())
-
-
-#-----------------------------------------------------------------------------
-# Formatters for specific format types (text, html, svg, etc.)
-#-----------------------------------------------------------------------------
-
-
-def _safe_repr(obj):
- """Try to return a repr of an object
-
- always returns a string, at least.
- """
- try:
- return repr(obj)
- except Exception as e:
- return "un-repr-able object (%r)" % e
-
-
-class FormatterWarning(UserWarning):
- """Warning class for errors in formatters"""
-
-@decorator
-def catch_format_error(method, self, *args, **kwargs):
- """show traceback on failed format call"""
- try:
- r = method(self, *args, **kwargs)
- except NotImplementedError:
- # don't warn on NotImplementedErrors
+ if include and format_type not in include:
+ continue
+ if exclude and format_type in exclude:
+ continue
+
+ md = None
+ try:
+ data = formatter(obj)
+ except:
+ # FIXME: log the exception
+ raise
+
+ # formatters can return raw data or (data, metadata)
+ if isinstance(data, tuple) and len(data) == 2:
+ data, md = data
+
+ if data is not None:
+ format_dict[format_type] = data
+ if md is not None:
+ md_dict[format_type] = md
+ return format_dict, md_dict
+
+ @property
+ def format_types(self):
+ """Return the format types (MIME types) of the active formatters."""
+ return list(self.formatters.keys())
+
+
+#-----------------------------------------------------------------------------
+# Formatters for specific format types (text, html, svg, etc.)
+#-----------------------------------------------------------------------------
+
+
+def _safe_repr(obj):
+ """Try to return a repr of an object
+
+ always returns a string, at least.
+ """
+ try:
+ return repr(obj)
+ except Exception as e:
+ return "un-repr-able object (%r)" % e
+
+
+class FormatterWarning(UserWarning):
+ """Warning class for errors in formatters"""
+
+@decorator
+def catch_format_error(method, self, *args, **kwargs):
+ """show traceback on failed format call"""
+ try:
+ r = method(self, *args, **kwargs)
+ except NotImplementedError:
+ # don't warn on NotImplementedErrors
return self._check_return(None, args[0])
- except Exception:
- exc_info = sys.exc_info()
- ip = get_ipython()
- if ip is not None:
- ip.showtraceback(exc_info)
- else:
- traceback.print_exception(*exc_info)
+ except Exception:
+ exc_info = sys.exc_info()
+ ip = get_ipython()
+ if ip is not None:
+ ip.showtraceback(exc_info)
+ else:
+ traceback.print_exception(*exc_info)
return self._check_return(None, args[0])
- return self._check_return(r, args[0])
-
-
-class FormatterABC(with_metaclass(abc.ABCMeta, object)):
- """ Abstract base class for Formatters.
-
- A formatter is a callable class that is responsible for computing the
- raw format data for a particular format type (MIME type). For example,
- an HTML formatter would have a format type of `text/html` and would return
- the HTML representation of the object when called.
- """
-
- # The format type of the data returned, usually a MIME type.
- format_type = 'text/plain'
-
- # Is the formatter enabled...
- enabled = True
-
- @abc.abstractmethod
- def __call__(self, obj):
- """Return a JSON'able representation of the object.
-
- If the object cannot be formatted by this formatter,
- warn and return None.
- """
- return repr(obj)
-
-
-def _mod_name_key(typ):
- """Return a (__module__, __name__) tuple for a type.
-
- Used as key in Formatter.deferred_printers.
- """
- module = getattr(typ, '__module__', None)
- name = getattr(typ, '__name__', None)
- return (module, name)
-
-
-def _get_type(obj):
- """Return the type of an instance (old and new-style)"""
- return getattr(obj, '__class__', None) or type(obj)
-
-
-_raise_key_error = Sentinel('_raise_key_error', __name__,
-"""
-Special value to raise a KeyError
-
-Raise KeyError in `BaseFormatter.pop` if passed as the default value to `pop`
-""")
-
-
-class BaseFormatter(Configurable):
- """A base formatter class that is configurable.
-
- This formatter should usually be used as the base class of all formatters.
- It is a traited :class:`Configurable` class and includes an extensible
- API for users to determine how their objects are formatted. The following
- logic is used to find a function to format an given object.
-
- 1. The object is introspected to see if it has a method with the name
- :attr:`print_method`. If is does, that object is passed to that method
- for formatting.
- 2. If no print method is found, three internal dictionaries are consulted
- to find print method: :attr:`singleton_printers`, :attr:`type_printers`
- and :attr:`deferred_printers`.
-
- Users should use these dictionaries to register functions that will be
- used to compute the format data for their objects (if those objects don't
- have the special print methods). The easiest way of using these
- dictionaries is through the :meth:`for_type` and :meth:`for_type_by_name`
- methods.
-
- If no function/callable is found to compute the format data, ``None`` is
- returned and this format type is not used.
- """
-
- format_type = Unicode('text/plain')
- _return_type = string_types
-
+ return self._check_return(r, args[0])
+
+
+class FormatterABC(with_metaclass(abc.ABCMeta, object)):
+ """ Abstract base class for Formatters.
+
+ A formatter is a callable class that is responsible for computing the
+ raw format data for a particular format type (MIME type). For example,
+ an HTML formatter would have a format type of `text/html` and would return
+ the HTML representation of the object when called.
+ """
+
+ # The format type of the data returned, usually a MIME type.
+ format_type = 'text/plain'
+
+ # Is the formatter enabled...
+ enabled = True
+
+ @abc.abstractmethod
+ def __call__(self, obj):
+ """Return a JSON'able representation of the object.
+
+ If the object cannot be formatted by this formatter,
+ warn and return None.
+ """
+ return repr(obj)
+
+
+def _mod_name_key(typ):
+ """Return a (__module__, __name__) tuple for a type.
+
+ Used as key in Formatter.deferred_printers.
+ """
+ module = getattr(typ, '__module__', None)
+ name = getattr(typ, '__name__', None)
+ return (module, name)
+
+
+def _get_type(obj):
+ """Return the type of an instance (old and new-style)"""
+ return getattr(obj, '__class__', None) or type(obj)
+
+
+_raise_key_error = Sentinel('_raise_key_error', __name__,
+"""
+Special value to raise a KeyError
+
+Raise KeyError in `BaseFormatter.pop` if passed as the default value to `pop`
+""")
+
+
+class BaseFormatter(Configurable):
+ """A base formatter class that is configurable.
+
+ This formatter should usually be used as the base class of all formatters.
+ It is a traited :class:`Configurable` class and includes an extensible
+ API for users to determine how their objects are formatted. The following
+ logic is used to find a function to format an given object.
+
+ 1. The object is introspected to see if it has a method with the name
+ :attr:`print_method`. If is does, that object is passed to that method
+ for formatting.
+ 2. If no print method is found, three internal dictionaries are consulted
+ to find print method: :attr:`singleton_printers`, :attr:`type_printers`
+ and :attr:`deferred_printers`.
+
+ Users should use these dictionaries to register functions that will be
+ used to compute the format data for their objects (if those objects don't
+ have the special print methods). The easiest way of using these
+ dictionaries is through the :meth:`for_type` and :meth:`for_type_by_name`
+ methods.
+
+ If no function/callable is found to compute the format data, ``None`` is
+ returned and this format type is not used.
+ """
+
+ format_type = Unicode('text/plain')
+ _return_type = string_types
+
enabled = Bool(True).tag(config=True)
-
- print_method = ObjectName('__repr__')
-
- # The singleton printers.
- # Maps the IDs of the builtin singleton objects to the format functions.
+
+ print_method = ObjectName('__repr__')
+
+ # The singleton printers.
+ # Maps the IDs of the builtin singleton objects to the format functions.
singleton_printers = Dict().tag(config=True)
-
- # The type-specific printers.
- # Map type objects to the format functions.
+
+ # The type-specific printers.
+ # Map type objects to the format functions.
type_printers = Dict().tag(config=True)
-
- # The deferred-import type-specific printers.
- # Map (modulename, classname) pairs to the format functions.
+
+ # The deferred-import type-specific printers.
+ # Map (modulename, classname) pairs to the format functions.
deferred_printers = Dict().tag(config=True)
-
- @catch_format_error
- def __call__(self, obj):
- """Compute the format for an object."""
- if self.enabled:
- # lookup registered printer
- try:
- printer = self.lookup(obj)
- except KeyError:
- pass
- else:
- return printer(obj)
- # Finally look for special method names
+
+ @catch_format_error
+ def __call__(self, obj):
+ """Compute the format for an object."""
+ if self.enabled:
+ # lookup registered printer
+ try:
+ printer = self.lookup(obj)
+ except KeyError:
+ pass
+ else:
+ return printer(obj)
+ # Finally look for special method names
method = get_real_method(obj, self.print_method)
- if method is not None:
- return method()
- return None
- else:
- return None
-
- def __contains__(self, typ):
- """map in to lookup_by_type"""
- try:
- self.lookup_by_type(typ)
- except KeyError:
- return False
- else:
- return True
-
- def _check_return(self, r, obj):
- """Check that a return value is appropriate
-
- Return the value if so, None otherwise, warning if invalid.
- """
- if r is None or isinstance(r, self._return_type) or \
- (isinstance(r, tuple) and r and isinstance(r[0], self._return_type)):
- return r
- else:
- warnings.warn(
- "%s formatter returned invalid type %s (expected %s) for object: %s" % \
- (self.format_type, type(r), self._return_type, _safe_repr(obj)),
- FormatterWarning
- )
-
- def lookup(self, obj):
- """Look up the formatter for a given instance.
-
- Parameters
- ----------
- obj : object instance
-
- Returns
- -------
- f : callable
- The registered formatting callable for the type.
-
- Raises
- ------
- KeyError if the type has not been registered.
- """
- # look for singleton first
- obj_id = id(obj)
- if obj_id in self.singleton_printers:
- return self.singleton_printers[obj_id]
- # then lookup by type
- return self.lookup_by_type(_get_type(obj))
-
- def lookup_by_type(self, typ):
- """Look up the registered formatter for a type.
-
- Parameters
- ----------
- typ : type or '__module__.__name__' string for a type
-
- Returns
- -------
- f : callable
- The registered formatting callable for the type.
-
- Raises
- ------
- KeyError if the type has not been registered.
- """
- if isinstance(typ, string_types):
- typ_key = tuple(typ.rsplit('.',1))
- if typ_key not in self.deferred_printers:
- # We may have it cached in the type map. We will have to
- # iterate over all of the types to check.
- for cls in self.type_printers:
- if _mod_name_key(cls) == typ_key:
- return self.type_printers[cls]
- else:
- return self.deferred_printers[typ_key]
- else:
- for cls in pretty._get_mro(typ):
- if cls in self.type_printers or self._in_deferred_types(cls):
- return self.type_printers[cls]
-
- # If we have reached here, the lookup failed.
- raise KeyError("No registered printer for {0!r}".format(typ))
-
- def for_type(self, typ, func=None):
- """Add a format function for a given type.
-
- Parameters
- -----------
- typ : type or '__module__.__name__' string for a type
- The class of the object that will be formatted using `func`.
- func : callable
- A callable for computing the format data.
- `func` will be called with the object to be formatted,
- and will return the raw data in this formatter's format.
- Subclasses may use a different call signature for the
- `func` argument.
-
- If `func` is None or not specified, there will be no change,
- only returning the current value.
-
- Returns
- -------
- oldfunc : callable
- The currently registered callable.
- If you are registering a new formatter,
- this will be the previous value (to enable restoring later).
- """
- # if string given, interpret as 'pkg.module.class_name'
- if isinstance(typ, string_types):
- type_module, type_name = typ.rsplit('.', 1)
- return self.for_type_by_name(type_module, type_name, func)
-
- try:
- oldfunc = self.lookup_by_type(typ)
- except KeyError:
- oldfunc = None
-
- if func is not None:
- self.type_printers[typ] = func
-
- return oldfunc
-
- def for_type_by_name(self, type_module, type_name, func=None):
- """Add a format function for a type specified by the full dotted
- module and name of the type, rather than the type of the object.
-
- Parameters
- ----------
- type_module : str
- The full dotted name of the module the type is defined in, like
- ``numpy``.
- type_name : str
- The name of the type (the class name), like ``dtype``
- func : callable
- A callable for computing the format data.
- `func` will be called with the object to be formatted,
- and will return the raw data in this formatter's format.
- Subclasses may use a different call signature for the
- `func` argument.
-
- If `func` is None or unspecified, there will be no change,
- only returning the current value.
-
- Returns
- -------
- oldfunc : callable
- The currently registered callable.
- If you are registering a new formatter,
- this will be the previous value (to enable restoring later).
- """
- key = (type_module, type_name)
-
- try:
- oldfunc = self.lookup_by_type("%s.%s" % key)
- except KeyError:
- oldfunc = None
-
- if func is not None:
- self.deferred_printers[key] = func
- return oldfunc
-
- def pop(self, typ, default=_raise_key_error):
- """Pop a formatter for the given type.
-
- Parameters
- ----------
- typ : type or '__module__.__name__' string for a type
- default : object
- value to be returned if no formatter is registered for typ.
-
- Returns
- -------
- obj : object
- The last registered object for the type.
-
- Raises
- ------
- KeyError if the type is not registered and default is not specified.
- """
-
- if isinstance(typ, string_types):
- typ_key = tuple(typ.rsplit('.',1))
- if typ_key not in self.deferred_printers:
- # We may have it cached in the type map. We will have to
- # iterate over all of the types to check.
- for cls in self.type_printers:
- if _mod_name_key(cls) == typ_key:
- old = self.type_printers.pop(cls)
- break
- else:
- old = default
- else:
- old = self.deferred_printers.pop(typ_key)
- else:
- if typ in self.type_printers:
- old = self.type_printers.pop(typ)
- else:
- old = self.deferred_printers.pop(_mod_name_key(typ), default)
- if old is _raise_key_error:
- raise KeyError("No registered value for {0!r}".format(typ))
- return old
-
- def _in_deferred_types(self, cls):
- """
- Check if the given class is specified in the deferred type registry.
-
- Successful matches will be moved to the regular type registry for future use.
- """
- mod = getattr(cls, '__module__', None)
- name = getattr(cls, '__name__', None)
- key = (mod, name)
- if key in self.deferred_printers:
- # Move the printer over to the regular registry.
- printer = self.deferred_printers.pop(key)
- self.type_printers[cls] = printer
- return True
- return False
-
-
-class PlainTextFormatter(BaseFormatter):
- """The default pretty-printer.
-
- This uses :mod:`IPython.lib.pretty` to compute the format data of
- the object. If the object cannot be pretty printed, :func:`repr` is used.
- See the documentation of :mod:`IPython.lib.pretty` for details on
- how to write pretty printers. Here is a simple example::
-
- def dtype_pprinter(obj, p, cycle):
- if cycle:
- return p.text('dtype(...)')
- if hasattr(obj, 'fields'):
- if obj.fields is None:
- p.text(repr(obj))
- else:
- p.begin_group(7, 'dtype([')
- for i, field in enumerate(obj.descr):
- if i > 0:
- p.text(',')
- p.breakable()
- p.pretty(field)
- p.end_group(7, '])')
- """
-
- # The format type of data returned.
- format_type = Unicode('text/plain')
-
- # This subclass ignores this attribute as it always need to return
- # something.
+ if method is not None:
+ return method()
+ return None
+ else:
+ return None
+
+ def __contains__(self, typ):
+ """map in to lookup_by_type"""
+ try:
+ self.lookup_by_type(typ)
+ except KeyError:
+ return False
+ else:
+ return True
+
+ def _check_return(self, r, obj):
+ """Check that a return value is appropriate
+
+ Return the value if so, None otherwise, warning if invalid.
+ """
+ if r is None or isinstance(r, self._return_type) or \
+ (isinstance(r, tuple) and r and isinstance(r[0], self._return_type)):
+ return r
+ else:
+ warnings.warn(
+ "%s formatter returned invalid type %s (expected %s) for object: %s" % \
+ (self.format_type, type(r), self._return_type, _safe_repr(obj)),
+ FormatterWarning
+ )
+
+ def lookup(self, obj):
+ """Look up the formatter for a given instance.
+
+ Parameters
+ ----------
+ obj : object instance
+
+ Returns
+ -------
+ f : callable
+ The registered formatting callable for the type.
+
+ Raises
+ ------
+ KeyError if the type has not been registered.
+ """
+ # look for singleton first
+ obj_id = id(obj)
+ if obj_id in self.singleton_printers:
+ return self.singleton_printers[obj_id]
+ # then lookup by type
+ return self.lookup_by_type(_get_type(obj))
+
+ def lookup_by_type(self, typ):
+ """Look up the registered formatter for a type.
+
+ Parameters
+ ----------
+ typ : type or '__module__.__name__' string for a type
+
+ Returns
+ -------
+ f : callable
+ The registered formatting callable for the type.
+
+ Raises
+ ------
+ KeyError if the type has not been registered.
+ """
+ if isinstance(typ, string_types):
+ typ_key = tuple(typ.rsplit('.',1))
+ if typ_key not in self.deferred_printers:
+ # We may have it cached in the type map. We will have to
+ # iterate over all of the types to check.
+ for cls in self.type_printers:
+ if _mod_name_key(cls) == typ_key:
+ return self.type_printers[cls]
+ else:
+ return self.deferred_printers[typ_key]
+ else:
+ for cls in pretty._get_mro(typ):
+ if cls in self.type_printers or self._in_deferred_types(cls):
+ return self.type_printers[cls]
+
+ # If we have reached here, the lookup failed.
+ raise KeyError("No registered printer for {0!r}".format(typ))
+
+ def for_type(self, typ, func=None):
+ """Add a format function for a given type.
+
+ Parameters
+ -----------
+ typ : type or '__module__.__name__' string for a type
+ The class of the object that will be formatted using `func`.
+ func : callable
+ A callable for computing the format data.
+ `func` will be called with the object to be formatted,
+ and will return the raw data in this formatter's format.
+ Subclasses may use a different call signature for the
+ `func` argument.
+
+ If `func` is None or not specified, there will be no change,
+ only returning the current value.
+
+ Returns
+ -------
+ oldfunc : callable
+ The currently registered callable.
+ If you are registering a new formatter,
+ this will be the previous value (to enable restoring later).
+ """
+ # if string given, interpret as 'pkg.module.class_name'
+ if isinstance(typ, string_types):
+ type_module, type_name = typ.rsplit('.', 1)
+ return self.for_type_by_name(type_module, type_name, func)
+
+ try:
+ oldfunc = self.lookup_by_type(typ)
+ except KeyError:
+ oldfunc = None
+
+ if func is not None:
+ self.type_printers[typ] = func
+
+ return oldfunc
+
+ def for_type_by_name(self, type_module, type_name, func=None):
+ """Add a format function for a type specified by the full dotted
+ module and name of the type, rather than the type of the object.
+
+ Parameters
+ ----------
+ type_module : str
+ The full dotted name of the module the type is defined in, like
+ ``numpy``.
+ type_name : str
+ The name of the type (the class name), like ``dtype``
+ func : callable
+ A callable for computing the format data.
+ `func` will be called with the object to be formatted,
+ and will return the raw data in this formatter's format.
+ Subclasses may use a different call signature for the
+ `func` argument.
+
+ If `func` is None or unspecified, there will be no change,
+ only returning the current value.
+
+ Returns
+ -------
+ oldfunc : callable
+ The currently registered callable.
+ If you are registering a new formatter,
+ this will be the previous value (to enable restoring later).
+ """
+ key = (type_module, type_name)
+
+ try:
+ oldfunc = self.lookup_by_type("%s.%s" % key)
+ except KeyError:
+ oldfunc = None
+
+ if func is not None:
+ self.deferred_printers[key] = func
+ return oldfunc
+
+ def pop(self, typ, default=_raise_key_error):
+ """Pop a formatter for the given type.
+
+ Parameters
+ ----------
+ typ : type or '__module__.__name__' string for a type
+ default : object
+ value to be returned if no formatter is registered for typ.
+
+ Returns
+ -------
+ obj : object
+ The last registered object for the type.
+
+ Raises
+ ------
+ KeyError if the type is not registered and default is not specified.
+ """
+
+ if isinstance(typ, string_types):
+ typ_key = tuple(typ.rsplit('.',1))
+ if typ_key not in self.deferred_printers:
+ # We may have it cached in the type map. We will have to
+ # iterate over all of the types to check.
+ for cls in self.type_printers:
+ if _mod_name_key(cls) == typ_key:
+ old = self.type_printers.pop(cls)
+ break
+ else:
+ old = default
+ else:
+ old = self.deferred_printers.pop(typ_key)
+ else:
+ if typ in self.type_printers:
+ old = self.type_printers.pop(typ)
+ else:
+ old = self.deferred_printers.pop(_mod_name_key(typ), default)
+ if old is _raise_key_error:
+ raise KeyError("No registered value for {0!r}".format(typ))
+ return old
+
+ def _in_deferred_types(self, cls):
+ """
+ Check if the given class is specified in the deferred type registry.
+
+ Successful matches will be moved to the regular type registry for future use.
+ """
+ mod = getattr(cls, '__module__', None)
+ name = getattr(cls, '__name__', None)
+ key = (mod, name)
+ if key in self.deferred_printers:
+ # Move the printer over to the regular registry.
+ printer = self.deferred_printers.pop(key)
+ self.type_printers[cls] = printer
+ return True
+ return False
+
+
+class PlainTextFormatter(BaseFormatter):
+ """The default pretty-printer.
+
+ This uses :mod:`IPython.lib.pretty` to compute the format data of
+ the object. If the object cannot be pretty printed, :func:`repr` is used.
+ See the documentation of :mod:`IPython.lib.pretty` for details on
+ how to write pretty printers. Here is a simple example::
+
+ def dtype_pprinter(obj, p, cycle):
+ if cycle:
+ return p.text('dtype(...)')
+ if hasattr(obj, 'fields'):
+ if obj.fields is None:
+ p.text(repr(obj))
+ else:
+ p.begin_group(7, 'dtype([')
+ for i, field in enumerate(obj.descr):
+ if i > 0:
+ p.text(',')
+ p.breakable()
+ p.pretty(field)
+ p.end_group(7, '])')
+ """
+
+ # The format type of data returned.
+ format_type = Unicode('text/plain')
+
+ # This subclass ignores this attribute as it always need to return
+ # something.
enabled = Bool(True).tag(config=False)
-
+
max_seq_length = Integer(pretty.MAX_SEQ_LENGTH,
- help="""Truncate large collections (lists, dicts, tuples, sets) to this size.
-
- Set to 0 to disable truncation.
- """
+ help="""Truncate large collections (lists, dicts, tuples, sets) to this size.
+
+ Set to 0 to disable truncation.
+ """
).tag(config=True)
-
- # Look for a _repr_pretty_ methods to use for pretty printing.
- print_method = ObjectName('_repr_pretty_')
-
- # Whether to pretty-print or not.
+
+ # Look for a _repr_pretty_ methods to use for pretty printing.
+ print_method = ObjectName('_repr_pretty_')
+
+ # Whether to pretty-print or not.
pprint = Bool(True).tag(config=True)
-
- # Whether to be verbose or not.
+
+ # Whether to be verbose or not.
verbose = Bool(False).tag(config=True)
-
- # The maximum width.
+
+ # The maximum width.
max_width = Integer(79).tag(config=True)
-
- # The newline character.
+
+ # The newline character.
newline = Unicode('\n').tag(config=True)
-
- # format-string for pprinting floats
- float_format = Unicode('%r')
- # setter for float precision, either int or direct format-string
+
+ # format-string for pprinting floats
+ float_format = Unicode('%r')
+ # setter for float precision, either int or direct format-string
float_precision = CUnicode('').tag(config=True)
-
+
@observe('float_precision')
def _float_precision_changed(self, change):
- """float_precision changed, set float_format accordingly.
-
- float_precision can be set by int or str.
- This will set float_format, after interpreting input.
- If numpy has been imported, numpy print precision will also be set.
-
- integer `n` sets format to '%.nf', otherwise, format set directly.
-
- An empty string returns to defaults (repr for float, 8 for numpy).
-
- This parameter can be set via the '%precision' magic.
- """
-
+ """float_precision changed, set float_format accordingly.
+
+ float_precision can be set by int or str.
+ This will set float_format, after interpreting input.
+ If numpy has been imported, numpy print precision will also be set.
+
+ integer `n` sets format to '%.nf', otherwise, format set directly.
+
+ An empty string returns to defaults (repr for float, 8 for numpy).
+
+ This parameter can be set via the '%precision' magic.
+ """
+
new = change['new']
- if '%' in new:
- # got explicit format string
- fmt = new
- try:
- fmt%3.14159
- except Exception:
- raise ValueError("Precision must be int or format string, not %r"%new)
- elif new:
- # otherwise, should be an int
- try:
- i = int(new)
- assert i >= 0
- except ValueError:
- raise ValueError("Precision must be int or format string, not %r"%new)
- except AssertionError:
- raise ValueError("int precision must be non-negative, not %r"%i)
-
- fmt = '%%.%if'%i
- if 'numpy' in sys.modules:
- # set numpy precision if it has been imported
- import numpy
- numpy.set_printoptions(precision=i)
- else:
- # default back to repr
- fmt = '%r'
- if 'numpy' in sys.modules:
- import numpy
- # numpy default is 8
- numpy.set_printoptions(precision=8)
- self.float_format = fmt
-
- # Use the default pretty printers from IPython.lib.pretty.
+ if '%' in new:
+ # got explicit format string
+ fmt = new
+ try:
+ fmt%3.14159
+ except Exception:
+ raise ValueError("Precision must be int or format string, not %r"%new)
+ elif new:
+ # otherwise, should be an int
+ try:
+ i = int(new)
+ assert i >= 0
+ except ValueError:
+ raise ValueError("Precision must be int or format string, not %r"%new)
+ except AssertionError:
+ raise ValueError("int precision must be non-negative, not %r"%i)
+
+ fmt = '%%.%if'%i
+ if 'numpy' in sys.modules:
+ # set numpy precision if it has been imported
+ import numpy
+ numpy.set_printoptions(precision=i)
+ else:
+ # default back to repr
+ fmt = '%r'
+ if 'numpy' in sys.modules:
+ import numpy
+ # numpy default is 8
+ numpy.set_printoptions(precision=8)
+ self.float_format = fmt
+
+ # Use the default pretty printers from IPython.lib.pretty.
@default('singleton_printers')
- def _singleton_printers_default(self):
- return pretty._singleton_pprinters.copy()
-
+ def _singleton_printers_default(self):
+ return pretty._singleton_pprinters.copy()
+
@default('type_printers')
- def _type_printers_default(self):
- d = pretty._type_pprinters.copy()
- d[float] = lambda obj,p,cycle: p.text(self.float_format%obj)
- return d
-
+ def _type_printers_default(self):
+ d = pretty._type_pprinters.copy()
+ d[float] = lambda obj,p,cycle: p.text(self.float_format%obj)
+ return d
+
@default('deferred_printers')
- def _deferred_printers_default(self):
- return pretty._deferred_type_pprinters.copy()
-
- #### FormatterABC interface ####
-
- @catch_format_error
- def __call__(self, obj):
- """Compute the pretty representation of the object."""
- if not self.pprint:
- return repr(obj)
- else:
- # handle str and unicode on Python 2
- # io.StringIO only accepts unicode,
- # cStringIO doesn't handle unicode on py2,
- # StringIO allows str, unicode but only ascii str
- stream = pretty.CUnicodeIO()
- printer = pretty.RepresentationPrinter(stream, self.verbose,
- self.max_width, self.newline,
- max_seq_length=self.max_seq_length,
- singleton_pprinters=self.singleton_printers,
- type_pprinters=self.type_printers,
- deferred_pprinters=self.deferred_printers)
- printer.pretty(obj)
- printer.flush()
- return stream.getvalue()
-
-
-class HTMLFormatter(BaseFormatter):
- """An HTML formatter.
-
- To define the callables that compute the HTML representation of your
- objects, define a :meth:`_repr_html_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be a valid HTML snippet that
- could be injected into an existing DOM. It should *not* include the
- ```<html>`` or ```<body>`` tags.
- """
- format_type = Unicode('text/html')
-
- print_method = ObjectName('_repr_html_')
-
-
-class MarkdownFormatter(BaseFormatter):
- """A Markdown formatter.
-
- To define the callables that compute the Markdown representation of your
- objects, define a :meth:`_repr_markdown_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be a valid Markdown.
- """
- format_type = Unicode('text/markdown')
-
- print_method = ObjectName('_repr_markdown_')
-
-class SVGFormatter(BaseFormatter):
- """An SVG formatter.
-
- To define the callables that compute the SVG representation of your
- objects, define a :meth:`_repr_svg_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be valid SVG enclosed in
- ```<svg>``` tags, that could be injected into an existing DOM. It should
- *not* include the ```<html>`` or ```<body>`` tags.
- """
- format_type = Unicode('image/svg+xml')
-
- print_method = ObjectName('_repr_svg_')
-
-
-class PNGFormatter(BaseFormatter):
- """A PNG formatter.
-
- To define the callables that compute the PNG representation of your
- objects, define a :meth:`_repr_png_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be raw PNG data, *not*
- base64 encoded.
- """
- format_type = Unicode('image/png')
-
- print_method = ObjectName('_repr_png_')
-
- _return_type = (bytes, unicode_type)
-
-
-class JPEGFormatter(BaseFormatter):
- """A JPEG formatter.
-
- To define the callables that compute the JPEG representation of your
- objects, define a :meth:`_repr_jpeg_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be raw JPEG data, *not*
- base64 encoded.
- """
- format_type = Unicode('image/jpeg')
-
- print_method = ObjectName('_repr_jpeg_')
-
- _return_type = (bytes, unicode_type)
-
-
-class LatexFormatter(BaseFormatter):
- """A LaTeX formatter.
-
- To define the callables that compute the LaTeX representation of your
- objects, define a :meth:`_repr_latex_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be a valid LaTeX equation,
- enclosed in either ```$```, ```$$``` or another LaTeX equation
- environment.
- """
- format_type = Unicode('text/latex')
-
- print_method = ObjectName('_repr_latex_')
-
-
-class JSONFormatter(BaseFormatter):
- """A JSON string formatter.
-
- To define the callables that compute the JSONable representation of
- your objects, define a :meth:`_repr_json_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be a JSONable list or dict.
- JSON scalars (None, number, string) are not allowed, only dict or list containers.
- """
- format_type = Unicode('application/json')
- _return_type = (list, dict)
-
- print_method = ObjectName('_repr_json_')
-
- def _check_return(self, r, obj):
- """Check that a return value is appropriate
-
- Return the value if so, None otherwise, warning if invalid.
- """
- if r is None:
- return
- md = None
- if isinstance(r, tuple):
- # unpack data, metadata tuple for type checking on first element
- r, md = r
-
- # handle deprecated JSON-as-string form from IPython < 3
- if isinstance(r, string_types):
- warnings.warn("JSON expects JSONable list/dict containers, not JSON strings",
- FormatterWarning)
- r = json.loads(r)
-
- if md is not None:
- # put the tuple back together
- r = (r, md)
- return super(JSONFormatter, self)._check_return(r, obj)
-
-
-class JavascriptFormatter(BaseFormatter):
- """A Javascript formatter.
-
- To define the callables that compute the Javascript representation of
- your objects, define a :meth:`_repr_javascript_` method or use the
- :meth:`for_type` or :meth:`for_type_by_name` methods to register functions
- that handle this.
-
- The return value of this formatter should be valid Javascript code and
- should *not* be enclosed in ```<script>``` tags.
- """
- format_type = Unicode('application/javascript')
-
- print_method = ObjectName('_repr_javascript_')
-
-
-class PDFFormatter(BaseFormatter):
- """A PDF formatter.
-
- To define the callables that compute the PDF representation of your
- objects, define a :meth:`_repr_pdf_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this.
-
- The return value of this formatter should be raw PDF data, *not*
- base64 encoded.
- """
- format_type = Unicode('application/pdf')
-
- print_method = ObjectName('_repr_pdf_')
-
- _return_type = (bytes, unicode_type)
-
-class IPythonDisplayFormatter(BaseFormatter):
+ def _deferred_printers_default(self):
+ return pretty._deferred_type_pprinters.copy()
+
+ #### FormatterABC interface ####
+
+ @catch_format_error
+ def __call__(self, obj):
+ """Compute the pretty representation of the object."""
+ if not self.pprint:
+ return repr(obj)
+ else:
+ # handle str and unicode on Python 2
+ # io.StringIO only accepts unicode,
+ # cStringIO doesn't handle unicode on py2,
+ # StringIO allows str, unicode but only ascii str
+ stream = pretty.CUnicodeIO()
+ printer = pretty.RepresentationPrinter(stream, self.verbose,
+ self.max_width, self.newline,
+ max_seq_length=self.max_seq_length,
+ singleton_pprinters=self.singleton_printers,
+ type_pprinters=self.type_printers,
+ deferred_pprinters=self.deferred_printers)
+ printer.pretty(obj)
+ printer.flush()
+ return stream.getvalue()
+
+
+class HTMLFormatter(BaseFormatter):
+ """An HTML formatter.
+
+ To define the callables that compute the HTML representation of your
+ objects, define a :meth:`_repr_html_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a valid HTML snippet that
+ could be injected into an existing DOM. It should *not* include the
+ ```<html>`` or ```<body>`` tags.
+ """
+ format_type = Unicode('text/html')
+
+ print_method = ObjectName('_repr_html_')
+
+
+class MarkdownFormatter(BaseFormatter):
+ """A Markdown formatter.
+
+ To define the callables that compute the Markdown representation of your
+ objects, define a :meth:`_repr_markdown_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a valid Markdown.
+ """
+ format_type = Unicode('text/markdown')
+
+ print_method = ObjectName('_repr_markdown_')
+
+class SVGFormatter(BaseFormatter):
+ """An SVG formatter.
+
+ To define the callables that compute the SVG representation of your
+ objects, define a :meth:`_repr_svg_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be valid SVG enclosed in
+ ```<svg>``` tags, that could be injected into an existing DOM. It should
+ *not* include the ```<html>`` or ```<body>`` tags.
+ """
+ format_type = Unicode('image/svg+xml')
+
+ print_method = ObjectName('_repr_svg_')
+
+
+class PNGFormatter(BaseFormatter):
+ """A PNG formatter.
+
+ To define the callables that compute the PNG representation of your
+ objects, define a :meth:`_repr_png_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be raw PNG data, *not*
+ base64 encoded.
+ """
+ format_type = Unicode('image/png')
+
+ print_method = ObjectName('_repr_png_')
+
+ _return_type = (bytes, unicode_type)
+
+
+class JPEGFormatter(BaseFormatter):
+ """A JPEG formatter.
+
+ To define the callables that compute the JPEG representation of your
+ objects, define a :meth:`_repr_jpeg_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be raw JPEG data, *not*
+ base64 encoded.
+ """
+ format_type = Unicode('image/jpeg')
+
+ print_method = ObjectName('_repr_jpeg_')
+
+ _return_type = (bytes, unicode_type)
+
+
+class LatexFormatter(BaseFormatter):
+ """A LaTeX formatter.
+
+ To define the callables that compute the LaTeX representation of your
+ objects, define a :meth:`_repr_latex_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a valid LaTeX equation,
+ enclosed in either ```$```, ```$$``` or another LaTeX equation
+ environment.
+ """
+ format_type = Unicode('text/latex')
+
+ print_method = ObjectName('_repr_latex_')
+
+
+class JSONFormatter(BaseFormatter):
+ """A JSON string formatter.
+
+ To define the callables that compute the JSONable representation of
+ your objects, define a :meth:`_repr_json_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be a JSONable list or dict.
+ JSON scalars (None, number, string) are not allowed, only dict or list containers.
+ """
+ format_type = Unicode('application/json')
+ _return_type = (list, dict)
+
+ print_method = ObjectName('_repr_json_')
+
+ def _check_return(self, r, obj):
+ """Check that a return value is appropriate
+
+ Return the value if so, None otherwise, warning if invalid.
+ """
+ if r is None:
+ return
+ md = None
+ if isinstance(r, tuple):
+ # unpack data, metadata tuple for type checking on first element
+ r, md = r
+
+ # handle deprecated JSON-as-string form from IPython < 3
+ if isinstance(r, string_types):
+ warnings.warn("JSON expects JSONable list/dict containers, not JSON strings",
+ FormatterWarning)
+ r = json.loads(r)
+
+ if md is not None:
+ # put the tuple back together
+ r = (r, md)
+ return super(JSONFormatter, self)._check_return(r, obj)
+
+
+class JavascriptFormatter(BaseFormatter):
+ """A Javascript formatter.
+
+ To define the callables that compute the Javascript representation of
+ your objects, define a :meth:`_repr_javascript_` method or use the
+ :meth:`for_type` or :meth:`for_type_by_name` methods to register functions
+ that handle this.
+
+ The return value of this formatter should be valid Javascript code and
+ should *not* be enclosed in ```<script>``` tags.
+ """
+ format_type = Unicode('application/javascript')
+
+ print_method = ObjectName('_repr_javascript_')
+
+
+class PDFFormatter(BaseFormatter):
+ """A PDF formatter.
+
+ To define the callables that compute the PDF representation of your
+ objects, define a :meth:`_repr_pdf_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this.
+
+ The return value of this formatter should be raw PDF data, *not*
+ base64 encoded.
+ """
+ format_type = Unicode('application/pdf')
+
+ print_method = ObjectName('_repr_pdf_')
+
+ _return_type = (bytes, unicode_type)
+
+class IPythonDisplayFormatter(BaseFormatter):
"""An escape-hatch Formatter for objects that know how to display themselves.
-
- To define the callables that compute the representation of your
- objects, define a :meth:`_ipython_display_` method or use the :meth:`for_type`
- or :meth:`for_type_by_name` methods to register functions that handle
- this. Unlike mime-type displays, this method should not return anything,
- instead calling any appropriate display methods itself.
-
- This display formatter has highest priority.
- If it fires, no other display formatter will be called.
+
+ To define the callables that compute the representation of your
+ objects, define a :meth:`_ipython_display_` method or use the :meth:`for_type`
+ or :meth:`for_type_by_name` methods to register functions that handle
+ this. Unlike mime-type displays, this method should not return anything,
+ instead calling any appropriate display methods itself.
+
+ This display formatter has highest priority.
+ If it fires, no other display formatter will be called.
Prior to IPython 6.1, `_ipython_display_` was the only way to display custom mime-types
without registering a new Formatter.
@@ -893,29 +893,29 @@ class IPythonDisplayFormatter(BaseFormatter):
IPython 6.1 introduces `_repr_mimebundle_` for displaying custom mime-types,
so `_ipython_display_` should only be used for objects that require unusual
display patterns, such as multiple display calls.
- """
- print_method = ObjectName('_ipython_display_')
- _return_type = (type(None), bool)
-
- @catch_format_error
- def __call__(self, obj):
- """Compute the format for an object."""
- if self.enabled:
- # lookup registered printer
- try:
- printer = self.lookup(obj)
- except KeyError:
- pass
- else:
- printer(obj)
- return True
- # Finally look for special method names
+ """
+ print_method = ObjectName('_ipython_display_')
+ _return_type = (type(None), bool)
+
+ @catch_format_error
+ def __call__(self, obj):
+ """Compute the format for an object."""
+ if self.enabled:
+ # lookup registered printer
+ try:
+ printer = self.lookup(obj)
+ except KeyError:
+ pass
+ else:
+ printer(obj)
+ return True
+ # Finally look for special method names
method = get_real_method(obj, self.print_method)
- if method is not None:
- method()
- return True
-
-
+ if method is not None:
+ method()
+ return True
+
+
class MimeBundleFormatter(BaseFormatter):
"""A Formatter for arbitrary mime-types.
@@ -973,65 +973,65 @@ class MimeBundleFormatter(BaseFormatter):
return None
-FormatterABC.register(BaseFormatter)
-FormatterABC.register(PlainTextFormatter)
-FormatterABC.register(HTMLFormatter)
-FormatterABC.register(MarkdownFormatter)
-FormatterABC.register(SVGFormatter)
-FormatterABC.register(PNGFormatter)
-FormatterABC.register(PDFFormatter)
-FormatterABC.register(JPEGFormatter)
-FormatterABC.register(LatexFormatter)
-FormatterABC.register(JSONFormatter)
-FormatterABC.register(JavascriptFormatter)
-FormatterABC.register(IPythonDisplayFormatter)
+FormatterABC.register(BaseFormatter)
+FormatterABC.register(PlainTextFormatter)
+FormatterABC.register(HTMLFormatter)
+FormatterABC.register(MarkdownFormatter)
+FormatterABC.register(SVGFormatter)
+FormatterABC.register(PNGFormatter)
+FormatterABC.register(PDFFormatter)
+FormatterABC.register(JPEGFormatter)
+FormatterABC.register(LatexFormatter)
+FormatterABC.register(JSONFormatter)
+FormatterABC.register(JavascriptFormatter)
+FormatterABC.register(IPythonDisplayFormatter)
FormatterABC.register(MimeBundleFormatter)
-
-
-def format_display_data(obj, include=None, exclude=None):
- """Return a format data dict for an object.
-
- By default all format types will be computed.
-
- The following MIME types are currently implemented:
-
- * text/plain
- * text/html
- * text/markdown
- * text/latex
- * application/json
- * application/javascript
- * application/pdf
- * image/png
- * image/jpeg
- * image/svg+xml
-
- Parameters
- ----------
- obj : object
- The Python object whose format data will be computed.
-
- Returns
- -------
- format_dict : dict
- A dictionary of key/value pairs, one or each format that was
- generated for the object. The keys are the format types, which
- will usually be MIME type strings and the values and JSON'able
- data structure containing the raw data for the representation in
- that format.
- include : list or tuple, optional
- A list of format type strings (MIME types) to include in the
- format data dict. If this is set *only* the format types included
- in this list will be computed.
- exclude : list or tuple, optional
- A list of format type string (MIME types) to exclue in the format
- data dict. If this is set all format types will be computed,
- except for those included in this argument.
- """
- from IPython.core.interactiveshell import InteractiveShell
-
+
+
+def format_display_data(obj, include=None, exclude=None):
+ """Return a format data dict for an object.
+
+ By default all format types will be computed.
+
+ The following MIME types are currently implemented:
+
+ * text/plain
+ * text/html
+ * text/markdown
+ * text/latex
+ * application/json
+ * application/javascript
+ * application/pdf
+ * image/png
+ * image/jpeg
+ * image/svg+xml
+
+ Parameters
+ ----------
+ obj : object
+ The Python object whose format data will be computed.
+
+ Returns
+ -------
+ format_dict : dict
+ A dictionary of key/value pairs, one or each format that was
+ generated for the object. The keys are the format types, which
+ will usually be MIME type strings and the values and JSON'able
+ data structure containing the raw data for the representation in
+ that format.
+ include : list or tuple, optional
+ A list of format type strings (MIME types) to include in the
+ format data dict. If this is set *only* the format types included
+ in this list will be computed.
+ exclude : list or tuple, optional
+ A list of format type string (MIME types) to exclue in the format
+ data dict. If this is set all format types will be computed,
+ except for those included in this argument.
+ """
+ from IPython.core.interactiveshell import InteractiveShell
+
return InteractiveShell.instance().display_formatter.format(
- obj,
- include,
- exclude
- )
+ obj,
+ include,
+ exclude
+ )
diff --git a/contrib/python/ipython/py2/IPython/core/getipython.py b/contrib/python/ipython/py2/IPython/core/getipython.py
index 9a127418ad..e6d8a4c91d 100644
--- a/contrib/python/ipython/py2/IPython/core/getipython.py
+++ b/contrib/python/ipython/py2/IPython/core/getipython.py
@@ -1,24 +1,24 @@
-# encoding: utf-8
-"""Simple function to call to get the current InteractiveShell instance
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2013 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-
-
-def get_ipython():
- """Get the global InteractiveShell instance.
-
- Returns None if no InteractiveShell instance is registered.
- """
- from IPython.core.interactiveshell import InteractiveShell
- if InteractiveShell.initialized():
- return InteractiveShell.instance()
+# encoding: utf-8
+"""Simple function to call to get the current InteractiveShell instance
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2013 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+
+def get_ipython():
+ """Get the global InteractiveShell instance.
+
+ Returns None if no InteractiveShell instance is registered.
+ """
+ from IPython.core.interactiveshell import InteractiveShell
+ if InteractiveShell.initialized():
+ return InteractiveShell.instance()
diff --git a/contrib/python/ipython/py2/IPython/core/history.py b/contrib/python/ipython/py2/IPython/core/history.py
index df5965695b..2e7fdbc845 100644
--- a/contrib/python/ipython/py2/IPython/core/history.py
+++ b/contrib/python/ipython/py2/IPython/core/history.py
@@ -1,92 +1,92 @@
-""" History related magics and functionality """
-
+""" History related magics and functionality """
+
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
-from __future__ import print_function
-
-import atexit
-import datetime
-import os
-import re
-try:
- import sqlite3
-except ImportError:
- try:
- from pysqlite2 import dbapi2 as sqlite3
- except ImportError:
- sqlite3 = None
-import threading
-
+from __future__ import print_function
+
+import atexit
+import datetime
+import os
+import re
+try:
+ import sqlite3
+except ImportError:
+ try:
+ from pysqlite2 import dbapi2 as sqlite3
+ except ImportError:
+ sqlite3 = None
+import threading
+
from traitlets.config.configurable import LoggingConfigurable
-from decorator import decorator
-from IPython.utils.decorators import undoc
+from decorator import decorator
+from IPython.utils.decorators import undoc
from IPython.paths import locate_profile
-from IPython.utils import py3compat
-from traitlets import (
- Any, Bool, Dict, Instance, Integer, List, Unicode, TraitError,
+from IPython.utils import py3compat
+from traitlets import (
+ Any, Bool, Dict, Instance, Integer, List, Unicode, TraitError,
default, observe,
-)
+)
from warnings import warn
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-
-@undoc
-class DummyDB(object):
- """Dummy DB that will act as a black hole for history.
-
- Only used in the absence of sqlite"""
- def execute(*args, **kwargs):
- return []
-
- def commit(self, *args, **kwargs):
- pass
-
- def __enter__(self, *args, **kwargs):
- pass
-
- def __exit__(self, *args, **kwargs):
- pass
-
-
-@decorator
-def needs_sqlite(f, self, *a, **kw):
- """Decorator: return an empty list in the absence of sqlite."""
- if sqlite3 is None or not self.enabled:
- return []
- else:
- return f(self, *a, **kw)
-
-
-if sqlite3 is not None:
- DatabaseError = sqlite3.DatabaseError
- OperationalError = sqlite3.OperationalError
-else:
- @undoc
- class DatabaseError(Exception):
- "Dummy exception when sqlite could not be imported. Should never occur."
-
- @undoc
- class OperationalError(Exception):
- "Dummy exception when sqlite could not be imported. Should never occur."
-
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+@undoc
+class DummyDB(object):
+ """Dummy DB that will act as a black hole for history.
+
+ Only used in the absence of sqlite"""
+ def execute(*args, **kwargs):
+ return []
+
+ def commit(self, *args, **kwargs):
+ pass
+
+ def __enter__(self, *args, **kwargs):
+ pass
+
+ def __exit__(self, *args, **kwargs):
+ pass
+
+
+@decorator
+def needs_sqlite(f, self, *a, **kw):
+ """Decorator: return an empty list in the absence of sqlite."""
+ if sqlite3 is None or not self.enabled:
+ return []
+ else:
+ return f(self, *a, **kw)
+
+
+if sqlite3 is not None:
+ DatabaseError = sqlite3.DatabaseError
+ OperationalError = sqlite3.OperationalError
+else:
+ @undoc
+ class DatabaseError(Exception):
+ "Dummy exception when sqlite could not be imported. Should never occur."
+
+ @undoc
+ class OperationalError(Exception):
+ "Dummy exception when sqlite could not be imported. Should never occur."
+
# use 16kB as threshold for whether a corrupt history db should be saved
# that should be at least 100 entries or so
_SAVE_DB_SIZE = 16384
-@decorator
-def catch_corrupt_db(f, self, *a, **kw):
- """A decorator which wraps HistoryAccessor method calls to catch errors from
- a corrupt SQLite database, move the old database out of the way, and create
- a new one.
+@decorator
+def catch_corrupt_db(f, self, *a, **kw):
+ """A decorator which wraps HistoryAccessor method calls to catch errors from
+ a corrupt SQLite database, move the old database out of the way, and create
+ a new one.
We avoid clobbering larger databases because this may be triggered due to filesystem issues,
not just a corrupt file.
- """
- try:
- return f(self, *a, **kw)
+ """
+ try:
+ return f(self, *a, **kw)
except (DatabaseError, OperationalError) as e:
self._corrupt_db_counter += 1
self.log.error("Failed to open SQLite history %s (%s).", self.hist_file, e)
@@ -114,799 +114,799 @@ def catch_corrupt_db(f, self, *a, **kw):
newpath = base + '-corrupt' + ext
os.rename(self.hist_file, newpath)
self.log.error("History file was moved to %s and a new file created.", newpath)
- self.init_db()
- return []
- else:
+ self.init_db()
+ return []
+ else:
# Failed with :memory:, something serious is wrong
- raise
-
+ raise
+
class HistoryAccessorBase(LoggingConfigurable):
- """An abstract class for History Accessors """
-
- def get_tail(self, n=10, raw=True, output=False, include_latest=False):
- raise NotImplementedError
-
- def search(self, pattern="*", raw=True, search_raw=True,
- output=False, n=None, unique=False):
- raise NotImplementedError
-
- def get_range(self, session, start=1, stop=None, raw=True,output=False):
- raise NotImplementedError
-
- def get_range_by_str(self, rangestr, raw=True, output=False):
- raise NotImplementedError
-
-
-class HistoryAccessor(HistoryAccessorBase):
- """Access the history database without adding to it.
-
- This is intended for use by standalone history tools. IPython shells use
- HistoryManager, below, which is a subclass of this."""
-
+ """An abstract class for History Accessors """
+
+ def get_tail(self, n=10, raw=True, output=False, include_latest=False):
+ raise NotImplementedError
+
+ def search(self, pattern="*", raw=True, search_raw=True,
+ output=False, n=None, unique=False):
+ raise NotImplementedError
+
+ def get_range(self, session, start=1, stop=None, raw=True,output=False):
+ raise NotImplementedError
+
+ def get_range_by_str(self, rangestr, raw=True, output=False):
+ raise NotImplementedError
+
+
+class HistoryAccessor(HistoryAccessorBase):
+ """Access the history database without adding to it.
+
+ This is intended for use by standalone history tools. IPython shells use
+ HistoryManager, below, which is a subclass of this."""
+
# counter for init_db retries, so we don't keep trying over and over
_corrupt_db_counter = 0
# after two failures, fallback on :memory:
_corrupt_db_limit = 2
- # String holding the path to the history file
+ # String holding the path to the history file
hist_file = Unicode(
- help="""Path to file to use for SQLite history database.
-
- By default, IPython will put the history database in the IPython
- profile directory. If you would rather share one history among
- profiles, you can set this value in each, so that they are consistent.
-
- Due to an issue with fcntl, SQLite is known to misbehave on some NFS
- mounts. If you see IPython hanging, try setting this to something on a
- local disk, e.g::
-
- ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
+ help="""Path to file to use for SQLite history database.
+
+ By default, IPython will put the history database in the IPython
+ profile directory. If you would rather share one history among
+ profiles, you can set this value in each, so that they are consistent.
+
+ Due to an issue with fcntl, SQLite is known to misbehave on some NFS
+ mounts. If you see IPython hanging, try setting this to something on a
+ local disk, e.g::
+
+ ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
you can also use the specific value `:memory:` (including the colon
at both end but not the back ticks), to avoid creating an history file.
-
+
""").tag(config=True)
-
+
enabled = Bool(True,
- help="""enable the SQLite history
-
- set enabled=False to disable the SQLite history,
- in which case there will be no stored history, no SQLite connection,
- and no background saving thread. This may be necessary in some
- threaded environments where IPython is embedded.
- """
+ help="""enable the SQLite history
+
+ set enabled=False to disable the SQLite history,
+ in which case there will be no stored history, no SQLite connection,
+ and no background saving thread. This may be necessary in some
+ threaded environments where IPython is embedded.
+ """
).tag(config=True)
-
+
connection_options = Dict(
- help="""Options for configuring the SQLite connection
-
- These options are passed as keyword args to sqlite3.connect
- when establishing database conenctions.
- """
+ help="""Options for configuring the SQLite connection
+
+ These options are passed as keyword args to sqlite3.connect
+ when establishing database conenctions.
+ """
).tag(config=True)
-
- # The SQLite database
- db = Any()
+
+ # The SQLite database
+ db = Any()
@observe('db')
def _db_changed(self, change):
- """validate the db, since it can be an Instance of two different types"""
+ """validate the db, since it can be an Instance of two different types"""
new = change['new']
- connection_types = (DummyDB,)
- if sqlite3 is not None:
- connection_types = (DummyDB, sqlite3.Connection)
- if not isinstance(new, connection_types):
- msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
- (self.__class__.__name__, new)
- raise TraitError(msg)
-
- def __init__(self, profile='default', hist_file=u'', **traits):
- """Create a new history accessor.
-
- Parameters
- ----------
- profile : str
- The name of the profile from which to open history.
- hist_file : str
- Path to an SQLite history database stored by IPython. If specified,
- hist_file overrides profile.
- config : :class:`~traitlets.config.loader.Config`
- Config object. hist_file can also be set through this.
- """
- # We need a pointer back to the shell for various tasks.
- super(HistoryAccessor, self).__init__(**traits)
- # defer setting hist_file from kwarg until after init,
- # otherwise the default kwarg value would clobber any value
- # set by config
- if hist_file:
- self.hist_file = hist_file
-
- if self.hist_file == u'':
- # No one has set the hist_file, yet.
- self.hist_file = self._get_hist_file_name(profile)
-
- if sqlite3 is None and self.enabled:
- warn("IPython History requires SQLite, your history will not be saved")
- self.enabled = False
-
- self.init_db()
-
- def _get_hist_file_name(self, profile='default'):
- """Find the history file for the given profile name.
-
- This is overridden by the HistoryManager subclass, to use the shell's
- active profile.
-
- Parameters
- ----------
- profile : str
- The name of a profile which has a history file.
- """
- return os.path.join(locate_profile(profile), 'history.sqlite')
-
- @catch_corrupt_db
- def init_db(self):
- """Connect to the database, and create tables if necessary."""
- if not self.enabled:
- self.db = DummyDB()
- return
-
- # use detect_types so that timestamps return datetime objects
- kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
- kwargs.update(self.connection_options)
- self.db = sqlite3.connect(self.hist_file, **kwargs)
- self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
- primary key autoincrement, start timestamp,
- end timestamp, num_cmds integer, remark text)""")
- self.db.execute("""CREATE TABLE IF NOT EXISTS history
- (session integer, line integer, source text, source_raw text,
- PRIMARY KEY (session, line))""")
- # Output history is optional, but ensure the table's there so it can be
- # enabled later.
- self.db.execute("""CREATE TABLE IF NOT EXISTS output_history
- (session integer, line integer, output text,
- PRIMARY KEY (session, line))""")
- self.db.commit()
+ connection_types = (DummyDB,)
+ if sqlite3 is not None:
+ connection_types = (DummyDB, sqlite3.Connection)
+ if not isinstance(new, connection_types):
+ msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
+ (self.__class__.__name__, new)
+ raise TraitError(msg)
+
+ def __init__(self, profile='default', hist_file=u'', **traits):
+ """Create a new history accessor.
+
+ Parameters
+ ----------
+ profile : str
+ The name of the profile from which to open history.
+ hist_file : str
+ Path to an SQLite history database stored by IPython. If specified,
+ hist_file overrides profile.
+ config : :class:`~traitlets.config.loader.Config`
+ Config object. hist_file can also be set through this.
+ """
+ # We need a pointer back to the shell for various tasks.
+ super(HistoryAccessor, self).__init__(**traits)
+ # defer setting hist_file from kwarg until after init,
+ # otherwise the default kwarg value would clobber any value
+ # set by config
+ if hist_file:
+ self.hist_file = hist_file
+
+ if self.hist_file == u'':
+ # No one has set the hist_file, yet.
+ self.hist_file = self._get_hist_file_name(profile)
+
+ if sqlite3 is None and self.enabled:
+ warn("IPython History requires SQLite, your history will not be saved")
+ self.enabled = False
+
+ self.init_db()
+
+ def _get_hist_file_name(self, profile='default'):
+ """Find the history file for the given profile name.
+
+ This is overridden by the HistoryManager subclass, to use the shell's
+ active profile.
+
+ Parameters
+ ----------
+ profile : str
+ The name of a profile which has a history file.
+ """
+ return os.path.join(locate_profile(profile), 'history.sqlite')
+
+ @catch_corrupt_db
+ def init_db(self):
+ """Connect to the database, and create tables if necessary."""
+ if not self.enabled:
+ self.db = DummyDB()
+ return
+
+ # use detect_types so that timestamps return datetime objects
+ kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
+ kwargs.update(self.connection_options)
+ self.db = sqlite3.connect(self.hist_file, **kwargs)
+ self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
+ primary key autoincrement, start timestamp,
+ end timestamp, num_cmds integer, remark text)""")
+ self.db.execute("""CREATE TABLE IF NOT EXISTS history
+ (session integer, line integer, source text, source_raw text,
+ PRIMARY KEY (session, line))""")
+ # Output history is optional, but ensure the table's there so it can be
+ # enabled later.
+ self.db.execute("""CREATE TABLE IF NOT EXISTS output_history
+ (session integer, line integer, output text,
+ PRIMARY KEY (session, line))""")
+ self.db.commit()
# success! reset corrupt db count
self._corrupt_db_counter = 0
-
- def writeout_cache(self):
- """Overridden by HistoryManager to dump the cache before certain
- database lookups."""
- pass
-
- ## -------------------------------
- ## Methods for retrieving history:
- ## -------------------------------
- def _run_sql(self, sql, params, raw=True, output=False):
- """Prepares and runs an SQL query for the history database.
-
- Parameters
- ----------
- sql : str
- Any filtering expressions to go after SELECT ... FROM ...
- params : tuple
- Parameters passed to the SQL query (to replace "?")
- raw, output : bool
- See :meth:`get_range`
-
- Returns
- -------
- Tuples as :meth:`get_range`
- """
- toget = 'source_raw' if raw else 'source'
- sqlfrom = "history"
- if output:
- sqlfrom = "history LEFT JOIN output_history USING (session, line)"
- toget = "history.%s, output_history.output" % toget
- cur = self.db.execute("SELECT session, line, %s FROM %s " %\
- (toget, sqlfrom) + sql, params)
- if output: # Regroup into 3-tuples, and parse JSON
+
+ def writeout_cache(self):
+ """Overridden by HistoryManager to dump the cache before certain
+ database lookups."""
+ pass
+
+ ## -------------------------------
+ ## Methods for retrieving history:
+ ## -------------------------------
+ def _run_sql(self, sql, params, raw=True, output=False):
+ """Prepares and runs an SQL query for the history database.
+
+ Parameters
+ ----------
+ sql : str
+ Any filtering expressions to go after SELECT ... FROM ...
+ params : tuple
+ Parameters passed to the SQL query (to replace "?")
+ raw, output : bool
+ See :meth:`get_range`
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ toget = 'source_raw' if raw else 'source'
+ sqlfrom = "history"
+ if output:
+ sqlfrom = "history LEFT JOIN output_history USING (session, line)"
+ toget = "history.%s, output_history.output" % toget
+ cur = self.db.execute("SELECT session, line, %s FROM %s " %\
+ (toget, sqlfrom) + sql, params)
+ if output: # Regroup into 3-tuples, and parse JSON
return ((ses, lin, (py3compat.cast_unicode_py2(inp), py3compat.cast_unicode_py2(out)))
for ses, lin, inp, out in cur)
- return cur
-
- @needs_sqlite
- @catch_corrupt_db
- def get_session_info(self, session):
- """Get info about a session.
-
- Parameters
- ----------
-
- session : int
- Session number to retrieve.
-
- Returns
- -------
-
- session_id : int
- Session ID number
- start : datetime
- Timestamp for the start of the session.
- end : datetime
- Timestamp for the end of the session, or None if IPython crashed.
- num_cmds : int
- Number of commands run, or None if IPython crashed.
- remark : unicode
- A manually set description.
- """
- query = "SELECT * from sessions where session == ?"
- return self.db.execute(query, (session,)).fetchone()
-
- @catch_corrupt_db
- def get_last_session_id(self):
- """Get the last session ID currently in the database.
-
- Within IPython, this should be the same as the value stored in
- :attr:`HistoryManager.session_number`.
- """
- for record in self.get_tail(n=1, include_latest=True):
- return record[0]
-
- @catch_corrupt_db
- def get_tail(self, n=10, raw=True, output=False, include_latest=False):
- """Get the last n lines from the history database.
-
- Parameters
- ----------
- n : int
- The number of lines to get
- raw, output : bool
- See :meth:`get_range`
- include_latest : bool
- If False (default), n+1 lines are fetched, and the latest one
- is discarded. This is intended to be used where the function
- is called by a user command, which it should not return.
-
- Returns
- -------
- Tuples as :meth:`get_range`
- """
- self.writeout_cache()
- if not include_latest:
- n += 1
- cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?",
- (n,), raw=raw, output=output)
- if not include_latest:
- return reversed(list(cur)[1:])
- return reversed(list(cur))
-
- @catch_corrupt_db
- def search(self, pattern="*", raw=True, search_raw=True,
- output=False, n=None, unique=False):
- """Search the database using unix glob-style matching (wildcards
- * and ?).
-
- Parameters
- ----------
- pattern : str
- The wildcarded pattern to match when searching
- search_raw : bool
- If True, search the raw input, otherwise, the parsed input
- raw, output : bool
- See :meth:`get_range`
- n : None or int
- If an integer is given, it defines the limit of
- returned entries.
- unique : bool
- When it is true, return only unique entries.
-
- Returns
- -------
- Tuples as :meth:`get_range`
- """
- tosearch = "source_raw" if search_raw else "source"
- if output:
- tosearch = "history." + tosearch
- self.writeout_cache()
- sqlform = "WHERE %s GLOB ?" % tosearch
- params = (pattern,)
- if unique:
- sqlform += ' GROUP BY {0}'.format(tosearch)
- if n is not None:
- sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
- params += (n,)
- elif unique:
- sqlform += " ORDER BY session, line"
- cur = self._run_sql(sqlform, params, raw=raw, output=output)
- if n is not None:
- return reversed(list(cur))
- return cur
-
- @catch_corrupt_db
- def get_range(self, session, start=1, stop=None, raw=True,output=False):
- """Retrieve input by session.
-
- Parameters
- ----------
- session : int
- Session number to retrieve.
- start : int
- First line to retrieve.
- stop : int
- End of line range (excluded from output itself). If None, retrieve
- to the end of the session.
- raw : bool
- If True, return untranslated input
- output : bool
- If True, attempt to include output. This will be 'real' Python
- objects for the current session, or text reprs from previous
- sessions if db_log_output was enabled at the time. Where no output
- is found, None is used.
-
- Returns
- -------
- entries
- An iterator over the desired lines. Each line is a 3-tuple, either
- (session, line, input) if output is False, or
- (session, line, (input, output)) if output is True.
- """
- if stop:
- lineclause = "line >= ? AND line < ?"
- params = (session, start, stop)
- else:
- lineclause = "line>=?"
- params = (session, start)
-
- return self._run_sql("WHERE session==? AND %s" % lineclause,
- params, raw=raw, output=output)
-
- def get_range_by_str(self, rangestr, raw=True, output=False):
- """Get lines of history from a string of ranges, as used by magic
- commands %hist, %save, %macro, etc.
-
- Parameters
- ----------
- rangestr : str
- A string specifying ranges, e.g. "5 ~2/1-4". See
- :func:`magic_history` for full details.
- raw, output : bool
- As :meth:`get_range`
-
- Returns
- -------
- Tuples as :meth:`get_range`
- """
- for sess, s, e in extract_hist_ranges(rangestr):
- for line in self.get_range(sess, s, e, raw=raw, output=output):
- yield line
-
-
-class HistoryManager(HistoryAccessor):
- """A class to organize all history-related functionality in one place.
- """
- # Public interface
-
- # An instance of the IPython shell we are attached to
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
- allow_none=True)
- # Lists to hold processed and raw history. These start with a blank entry
- # so that we can index them starting from 1
- input_hist_parsed = List([""])
- input_hist_raw = List([""])
- # A list of directories visited during session
- dir_hist = List()
+ return cur
+
+ @needs_sqlite
+ @catch_corrupt_db
+ def get_session_info(self, session):
+ """Get info about a session.
+
+ Parameters
+ ----------
+
+ session : int
+ Session number to retrieve.
+
+ Returns
+ -------
+
+ session_id : int
+ Session ID number
+ start : datetime
+ Timestamp for the start of the session.
+ end : datetime
+ Timestamp for the end of the session, or None if IPython crashed.
+ num_cmds : int
+ Number of commands run, or None if IPython crashed.
+ remark : unicode
+ A manually set description.
+ """
+ query = "SELECT * from sessions where session == ?"
+ return self.db.execute(query, (session,)).fetchone()
+
+ @catch_corrupt_db
+ def get_last_session_id(self):
+ """Get the last session ID currently in the database.
+
+ Within IPython, this should be the same as the value stored in
+ :attr:`HistoryManager.session_number`.
+ """
+ for record in self.get_tail(n=1, include_latest=True):
+ return record[0]
+
+ @catch_corrupt_db
+ def get_tail(self, n=10, raw=True, output=False, include_latest=False):
+ """Get the last n lines from the history database.
+
+ Parameters
+ ----------
+ n : int
+ The number of lines to get
+ raw, output : bool
+ See :meth:`get_range`
+ include_latest : bool
+ If False (default), n+1 lines are fetched, and the latest one
+ is discarded. This is intended to be used where the function
+ is called by a user command, which it should not return.
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ self.writeout_cache()
+ if not include_latest:
+ n += 1
+ cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?",
+ (n,), raw=raw, output=output)
+ if not include_latest:
+ return reversed(list(cur)[1:])
+ return reversed(list(cur))
+
+ @catch_corrupt_db
+ def search(self, pattern="*", raw=True, search_raw=True,
+ output=False, n=None, unique=False):
+ """Search the database using unix glob-style matching (wildcards
+ * and ?).
+
+ Parameters
+ ----------
+ pattern : str
+ The wildcarded pattern to match when searching
+ search_raw : bool
+ If True, search the raw input, otherwise, the parsed input
+ raw, output : bool
+ See :meth:`get_range`
+ n : None or int
+ If an integer is given, it defines the limit of
+ returned entries.
+ unique : bool
+ When it is true, return only unique entries.
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ tosearch = "source_raw" if search_raw else "source"
+ if output:
+ tosearch = "history." + tosearch
+ self.writeout_cache()
+ sqlform = "WHERE %s GLOB ?" % tosearch
+ params = (pattern,)
+ if unique:
+ sqlform += ' GROUP BY {0}'.format(tosearch)
+ if n is not None:
+ sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
+ params += (n,)
+ elif unique:
+ sqlform += " ORDER BY session, line"
+ cur = self._run_sql(sqlform, params, raw=raw, output=output)
+ if n is not None:
+ return reversed(list(cur))
+ return cur
+
+ @catch_corrupt_db
+ def get_range(self, session, start=1, stop=None, raw=True,output=False):
+ """Retrieve input by session.
+
+ Parameters
+ ----------
+ session : int
+ Session number to retrieve.
+ start : int
+ First line to retrieve.
+ stop : int
+ End of line range (excluded from output itself). If None, retrieve
+ to the end of the session.
+ raw : bool
+ If True, return untranslated input
+ output : bool
+ If True, attempt to include output. This will be 'real' Python
+ objects for the current session, or text reprs from previous
+ sessions if db_log_output was enabled at the time. Where no output
+ is found, None is used.
+
+ Returns
+ -------
+ entries
+ An iterator over the desired lines. Each line is a 3-tuple, either
+ (session, line, input) if output is False, or
+ (session, line, (input, output)) if output is True.
+ """
+ if stop:
+ lineclause = "line >= ? AND line < ?"
+ params = (session, start, stop)
+ else:
+ lineclause = "line>=?"
+ params = (session, start)
+
+ return self._run_sql("WHERE session==? AND %s" % lineclause,
+ params, raw=raw, output=output)
+
+ def get_range_by_str(self, rangestr, raw=True, output=False):
+ """Get lines of history from a string of ranges, as used by magic
+ commands %hist, %save, %macro, etc.
+
+ Parameters
+ ----------
+ rangestr : str
+ A string specifying ranges, e.g. "5 ~2/1-4". See
+ :func:`magic_history` for full details.
+ raw, output : bool
+ As :meth:`get_range`
+
+ Returns
+ -------
+ Tuples as :meth:`get_range`
+ """
+ for sess, s, e in extract_hist_ranges(rangestr):
+ for line in self.get_range(sess, s, e, raw=raw, output=output):
+ yield line
+
+
+class HistoryManager(HistoryAccessor):
+ """A class to organize all history-related functionality in one place.
+ """
+ # Public interface
+
+ # An instance of the IPython shell we are attached to
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
+ # Lists to hold processed and raw history. These start with a blank entry
+ # so that we can index them starting from 1
+ input_hist_parsed = List([""])
+ input_hist_raw = List([""])
+ # A list of directories visited during session
+ dir_hist = List()
@default('dir_hist')
- def _dir_hist_default(self):
- try:
- return [py3compat.getcwd()]
- except OSError:
- return []
-
- # A dict of output history, keyed with ints from the shell's
- # execution count.
- output_hist = Dict()
- # The text/plain repr of outputs.
- output_hist_reprs = Dict()
-
- # The number of the current session in the history database
- session_number = Integer()
-
+ def _dir_hist_default(self):
+ try:
+ return [py3compat.getcwd()]
+ except OSError:
+ return []
+
+ # A dict of output history, keyed with ints from the shell's
+ # execution count.
+ output_hist = Dict()
+ # The text/plain repr of outputs.
+ output_hist_reprs = Dict()
+
+ # The number of the current session in the history database
+ session_number = Integer()
+
db_log_output = Bool(False,
- help="Should the history database include output? (default: no)"
+ help="Should the history database include output? (default: no)"
).tag(config=True)
db_cache_size = Integer(0,
- help="Write to database every x commands (higher values save disk access & power).\n"
- "Values of 1 or less effectively disable caching."
+ help="Write to database every x commands (higher values save disk access & power).\n"
+ "Values of 1 or less effectively disable caching."
).tag(config=True)
- # The input and output caches
- db_input_cache = List()
- db_output_cache = List()
-
- # History saving in separate thread
- save_thread = Instance('IPython.core.history.HistorySavingThread',
- allow_none=True)
- try: # Event is a function returning an instance of _Event...
- save_flag = Instance(threading._Event, allow_none=True)
- except AttributeError: # ...until Python 3.3, when it's a class.
- save_flag = Instance(threading.Event, allow_none=True)
-
- # Private interface
- # Variables used to store the three last inputs from the user. On each new
- # history update, we populate the user's namespace with these, shifted as
- # necessary.
- _i00 = Unicode(u'')
- _i = Unicode(u'')
- _ii = Unicode(u'')
- _iii = Unicode(u'')
-
- # A regex matching all forms of the exit command, so that we don't store
- # them in the history (it's annoying to rewind the first entry and land on
- # an exit call).
- _exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
-
- def __init__(self, shell=None, config=None, **traits):
- """Create a new history manager associated with a shell instance.
- """
- # We need a pointer back to the shell for various tasks.
- super(HistoryManager, self).__init__(shell=shell, config=config,
- **traits)
- self.save_flag = threading.Event()
- self.db_input_cache_lock = threading.Lock()
- self.db_output_cache_lock = threading.Lock()
-
- try:
- self.new_session()
- except OperationalError:
- self.log.error("Failed to create history session in %s. History will not be saved.",
- self.hist_file, exc_info=True)
- self.hist_file = ':memory:'
-
- if self.enabled and self.hist_file != ':memory:':
- self.save_thread = HistorySavingThread(self)
- self.save_thread.start()
-
- def _get_hist_file_name(self, profile=None):
- """Get default history file name based on the Shell's profile.
-
- The profile parameter is ignored, but must exist for compatibility with
- the parent class."""
- profile_dir = self.shell.profile_dir.location
- return os.path.join(profile_dir, 'history.sqlite')
-
- @needs_sqlite
- def new_session(self, conn=None):
- """Get a new session number."""
- if conn is None:
- conn = self.db
-
- with conn:
- cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL,
- NULL, "") """, (datetime.datetime.now(),))
- self.session_number = cur.lastrowid
-
- def end_session(self):
- """Close the database session, filling in the end time and line count."""
- self.writeout_cache()
- with self.db:
- self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
- session==?""", (datetime.datetime.now(),
- len(self.input_hist_parsed)-1, self.session_number))
- self.session_number = 0
-
- def name_session(self, name):
- """Give the current session a name in the history database."""
- with self.db:
- self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
- (name, self.session_number))
-
- def reset(self, new_session=True):
- """Clear the session history, releasing all object references, and
- optionally open a new session."""
- self.output_hist.clear()
- # The directory history can't be completely empty
- self.dir_hist[:] = [py3compat.getcwd()]
-
- if new_session:
- if self.session_number:
- self.end_session()
- self.input_hist_parsed[:] = [""]
- self.input_hist_raw[:] = [""]
- self.new_session()
-
- # ------------------------------
- # Methods for retrieving history
- # ------------------------------
- def get_session_info(self, session=0):
- """Get info about a session.
-
- Parameters
- ----------
-
- session : int
- Session number to retrieve. The current session is 0, and negative
- numbers count back from current session, so -1 is the previous session.
-
- Returns
- -------
-
- session_id : int
- Session ID number
- start : datetime
- Timestamp for the start of the session.
- end : datetime
- Timestamp for the end of the session, or None if IPython crashed.
- num_cmds : int
- Number of commands run, or None if IPython crashed.
- remark : unicode
- A manually set description.
- """
- if session <= 0:
- session += self.session_number
-
- return super(HistoryManager, self).get_session_info(session=session)
-
- def _get_range_session(self, start=1, stop=None, raw=True, output=False):
- """Get input and output history from the current session. Called by
- get_range, and takes similar parameters."""
- input_hist = self.input_hist_raw if raw else self.input_hist_parsed
-
- n = len(input_hist)
- if start < 0:
- start += n
- if not stop or (stop > n):
- stop = n
- elif stop < 0:
- stop += n
-
- for i in range(start, stop):
- if output:
- line = (input_hist[i], self.output_hist_reprs.get(i))
- else:
- line = input_hist[i]
- yield (0, i, line)
-
- def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
- """Retrieve input by session.
-
- Parameters
- ----------
- session : int
- Session number to retrieve. The current session is 0, and negative
- numbers count back from current session, so -1 is previous session.
- start : int
- First line to retrieve.
- stop : int
- End of line range (excluded from output itself). If None, retrieve
- to the end of the session.
- raw : bool
- If True, return untranslated input
- output : bool
- If True, attempt to include output. This will be 'real' Python
- objects for the current session, or text reprs from previous
- sessions if db_log_output was enabled at the time. Where no output
- is found, None is used.
-
- Returns
- -------
- entries
- An iterator over the desired lines. Each line is a 3-tuple, either
- (session, line, input) if output is False, or
- (session, line, (input, output)) if output is True.
- """
- if session <= 0:
- session += self.session_number
- if session==self.session_number: # Current session
- return self._get_range_session(start, stop, raw, output)
- return super(HistoryManager, self).get_range(session, start, stop, raw,
- output)
-
- ## ----------------------------
- ## Methods for storing history:
- ## ----------------------------
- def store_inputs(self, line_num, source, source_raw=None):
- """Store source and raw input in history and create input cache
- variables ``_i*``.
-
- Parameters
- ----------
- line_num : int
- The prompt number of this input.
-
- source : str
- Python input.
-
- source_raw : str, optional
- If given, this is the raw input without any IPython transformations
- applied to it. If not given, ``source`` is used.
- """
- if source_raw is None:
- source_raw = source
- source = source.rstrip('\n')
- source_raw = source_raw.rstrip('\n')
-
- # do not store exit/quit commands
- if self._exit_re.match(source_raw.strip()):
- return
-
- self.input_hist_parsed.append(source)
- self.input_hist_raw.append(source_raw)
-
- with self.db_input_cache_lock:
- self.db_input_cache.append((line_num, source, source_raw))
- # Trigger to flush cache and write to DB.
- if len(self.db_input_cache) >= self.db_cache_size:
- self.save_flag.set()
-
- # update the auto _i variables
- self._iii = self._ii
- self._ii = self._i
- self._i = self._i00
- self._i00 = source_raw
-
- # hackish access to user namespace to create _i1,_i2... dynamically
- new_i = '_i%s' % line_num
- to_main = {'_i': self._i,
- '_ii': self._ii,
- '_iii': self._iii,
- new_i : self._i00 }
-
- if self.shell is not None:
- self.shell.push(to_main, interactive=False)
-
- def store_output(self, line_num):
- """If database output logging is enabled, this saves all the
- outputs from the indicated prompt number to the database. It's
- called by run_cell after code has been executed.
-
- Parameters
- ----------
- line_num : int
- The line number from which to save outputs
- """
- if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
- return
- output = self.output_hist_reprs[line_num]
-
- with self.db_output_cache_lock:
- self.db_output_cache.append((line_num, output))
- if self.db_cache_size <= 1:
- self.save_flag.set()
-
- def _writeout_input_cache(self, conn):
- with conn:
- for line in self.db_input_cache:
- conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
- (self.session_number,)+line)
-
- def _writeout_output_cache(self, conn):
- with conn:
- for line in self.db_output_cache:
- conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
- (self.session_number,)+line)
-
- @needs_sqlite
- def writeout_cache(self, conn=None):
- """Write any entries in the cache to the database."""
- if conn is None:
- conn = self.db
-
- with self.db_input_cache_lock:
- try:
- self._writeout_input_cache(conn)
- except sqlite3.IntegrityError:
- self.new_session(conn)
- print("ERROR! Session/line number was not unique in",
- "database. History logging moved to new session",
- self.session_number)
- try:
- # Try writing to the new session. If this fails, don't
- # recurse
- self._writeout_input_cache(conn)
- except sqlite3.IntegrityError:
- pass
- finally:
- self.db_input_cache = []
-
- with self.db_output_cache_lock:
- try:
- self._writeout_output_cache(conn)
- except sqlite3.IntegrityError:
- print("!! Session/line number for output was not unique",
- "in database. Output will not be stored.")
- finally:
- self.db_output_cache = []
-
-
-class HistorySavingThread(threading.Thread):
- """This thread takes care of writing history to the database, so that
- the UI isn't held up while that happens.
-
- It waits for the HistoryManager's save_flag to be set, then writes out
- the history cache. The main thread is responsible for setting the flag when
- the cache size reaches a defined threshold."""
- daemon = True
- stop_now = False
- enabled = True
- def __init__(self, history_manager):
- super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
- self.history_manager = history_manager
- self.enabled = history_manager.enabled
- atexit.register(self.stop)
-
- @needs_sqlite
- def run(self):
- # We need a separate db connection per thread:
- try:
- self.db = sqlite3.connect(self.history_manager.hist_file,
- **self.history_manager.connection_options
- )
- while True:
- self.history_manager.save_flag.wait()
- if self.stop_now:
- self.db.close()
- return
- self.history_manager.save_flag.clear()
- self.history_manager.writeout_cache(self.db)
- except Exception as e:
- print(("The history saving thread hit an unexpected error (%s)."
- "History will not be written to the database.") % repr(e))
-
- def stop(self):
- """This can be called from the main thread to safely stop this thread.
-
- Note that it does not attempt to write out remaining history before
- exiting. That should be done by calling the HistoryManager's
- end_session method."""
- self.stop_now = True
- self.history_manager.save_flag.set()
- self.join()
-
-
-# To match, e.g. ~5/8-~2/3
-range_re = re.compile(r"""
-((?P<startsess>~?\d+)/)?
-(?P<start>\d+)?
-((?P<sep>[\-:])
- ((?P<endsess>~?\d+)/)?
- (?P<end>\d+))?
-$""", re.VERBOSE)
-
-
-def extract_hist_ranges(ranges_str):
- """Turn a string of history ranges into 3-tuples of (session, start, stop).
-
- Examples
- --------
- >>> list(extract_hist_ranges("~8/5-~7/4 2"))
- [(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
- """
- for range_str in ranges_str.split():
- rmatch = range_re.match(range_str)
- if not rmatch:
- continue
- start = rmatch.group("start")
- if start:
- start = int(start)
- end = rmatch.group("end")
- # If no end specified, get (a, a + 1)
- end = int(end) if end else start + 1
- else: # start not specified
- if not rmatch.group('startsess'): # no startsess
- continue
- start = 1
- end = None # provide the entire session hist
-
- if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
- end += 1
- startsess = rmatch.group("startsess") or "0"
- endsess = rmatch.group("endsess") or startsess
- startsess = int(startsess.replace("~","-"))
- endsess = int(endsess.replace("~","-"))
- assert endsess >= startsess, "start session must be earlier than end session"
-
- if endsess == startsess:
- yield (startsess, start, end)
- continue
- # Multiple sessions in one range:
- yield (startsess, start, None)
- for sess in range(startsess+1, endsess):
- yield (sess, 1, None)
- yield (endsess, 1, end)
-
-
-def _format_lineno(session, line):
- """Helper function to format line numbers properly."""
- if session == 0:
- return str(line)
- return "%s#%s" % (session, line)
+ # The input and output caches
+ db_input_cache = List()
+ db_output_cache = List()
+
+ # History saving in separate thread
+ save_thread = Instance('IPython.core.history.HistorySavingThread',
+ allow_none=True)
+ try: # Event is a function returning an instance of _Event...
+ save_flag = Instance(threading._Event, allow_none=True)
+ except AttributeError: # ...until Python 3.3, when it's a class.
+ save_flag = Instance(threading.Event, allow_none=True)
+
+ # Private interface
+ # Variables used to store the three last inputs from the user. On each new
+ # history update, we populate the user's namespace with these, shifted as
+ # necessary.
+ _i00 = Unicode(u'')
+ _i = Unicode(u'')
+ _ii = Unicode(u'')
+ _iii = Unicode(u'')
+
+ # A regex matching all forms of the exit command, so that we don't store
+ # them in the history (it's annoying to rewind the first entry and land on
+ # an exit call).
+ _exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
+
+ def __init__(self, shell=None, config=None, **traits):
+ """Create a new history manager associated with a shell instance.
+ """
+ # We need a pointer back to the shell for various tasks.
+ super(HistoryManager, self).__init__(shell=shell, config=config,
+ **traits)
+ self.save_flag = threading.Event()
+ self.db_input_cache_lock = threading.Lock()
+ self.db_output_cache_lock = threading.Lock()
+
+ try:
+ self.new_session()
+ except OperationalError:
+ self.log.error("Failed to create history session in %s. History will not be saved.",
+ self.hist_file, exc_info=True)
+ self.hist_file = ':memory:'
+
+ if self.enabled and self.hist_file != ':memory:':
+ self.save_thread = HistorySavingThread(self)
+ self.save_thread.start()
+
+ def _get_hist_file_name(self, profile=None):
+ """Get default history file name based on the Shell's profile.
+
+ The profile parameter is ignored, but must exist for compatibility with
+ the parent class."""
+ profile_dir = self.shell.profile_dir.location
+ return os.path.join(profile_dir, 'history.sqlite')
+
+ @needs_sqlite
+ def new_session(self, conn=None):
+ """Get a new session number."""
+ if conn is None:
+ conn = self.db
+
+ with conn:
+ cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL,
+ NULL, "") """, (datetime.datetime.now(),))
+ self.session_number = cur.lastrowid
+
+ def end_session(self):
+ """Close the database session, filling in the end time and line count."""
+ self.writeout_cache()
+ with self.db:
+ self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
+ session==?""", (datetime.datetime.now(),
+ len(self.input_hist_parsed)-1, self.session_number))
+ self.session_number = 0
+
+ def name_session(self, name):
+ """Give the current session a name in the history database."""
+ with self.db:
+ self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
+ (name, self.session_number))
+
+ def reset(self, new_session=True):
+ """Clear the session history, releasing all object references, and
+ optionally open a new session."""
+ self.output_hist.clear()
+ # The directory history can't be completely empty
+ self.dir_hist[:] = [py3compat.getcwd()]
+
+ if new_session:
+ if self.session_number:
+ self.end_session()
+ self.input_hist_parsed[:] = [""]
+ self.input_hist_raw[:] = [""]
+ self.new_session()
+
+ # ------------------------------
+ # Methods for retrieving history
+ # ------------------------------
+ def get_session_info(self, session=0):
+ """Get info about a session.
+
+ Parameters
+ ----------
+
+ session : int
+ Session number to retrieve. The current session is 0, and negative
+ numbers count back from current session, so -1 is the previous session.
+
+ Returns
+ -------
+
+ session_id : int
+ Session ID number
+ start : datetime
+ Timestamp for the start of the session.
+ end : datetime
+ Timestamp for the end of the session, or None if IPython crashed.
+ num_cmds : int
+ Number of commands run, or None if IPython crashed.
+ remark : unicode
+ A manually set description.
+ """
+ if session <= 0:
+ session += self.session_number
+
+ return super(HistoryManager, self).get_session_info(session=session)
+
+ def _get_range_session(self, start=1, stop=None, raw=True, output=False):
+ """Get input and output history from the current session. Called by
+ get_range, and takes similar parameters."""
+ input_hist = self.input_hist_raw if raw else self.input_hist_parsed
+
+ n = len(input_hist)
+ if start < 0:
+ start += n
+ if not stop or (stop > n):
+ stop = n
+ elif stop < 0:
+ stop += n
+
+ for i in range(start, stop):
+ if output:
+ line = (input_hist[i], self.output_hist_reprs.get(i))
+ else:
+ line = input_hist[i]
+ yield (0, i, line)
+
+ def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
+ """Retrieve input by session.
+
+ Parameters
+ ----------
+ session : int
+ Session number to retrieve. The current session is 0, and negative
+ numbers count back from current session, so -1 is previous session.
+ start : int
+ First line to retrieve.
+ stop : int
+ End of line range (excluded from output itself). If None, retrieve
+ to the end of the session.
+ raw : bool
+ If True, return untranslated input
+ output : bool
+ If True, attempt to include output. This will be 'real' Python
+ objects for the current session, or text reprs from previous
+ sessions if db_log_output was enabled at the time. Where no output
+ is found, None is used.
+
+ Returns
+ -------
+ entries
+ An iterator over the desired lines. Each line is a 3-tuple, either
+ (session, line, input) if output is False, or
+ (session, line, (input, output)) if output is True.
+ """
+ if session <= 0:
+ session += self.session_number
+ if session==self.session_number: # Current session
+ return self._get_range_session(start, stop, raw, output)
+ return super(HistoryManager, self).get_range(session, start, stop, raw,
+ output)
+
+ ## ----------------------------
+ ## Methods for storing history:
+ ## ----------------------------
+ def store_inputs(self, line_num, source, source_raw=None):
+ """Store source and raw input in history and create input cache
+ variables ``_i*``.
+
+ Parameters
+ ----------
+ line_num : int
+ The prompt number of this input.
+
+ source : str
+ Python input.
+
+ source_raw : str, optional
+ If given, this is the raw input without any IPython transformations
+ applied to it. If not given, ``source`` is used.
+ """
+ if source_raw is None:
+ source_raw = source
+ source = source.rstrip('\n')
+ source_raw = source_raw.rstrip('\n')
+
+ # do not store exit/quit commands
+ if self._exit_re.match(source_raw.strip()):
+ return
+
+ self.input_hist_parsed.append(source)
+ self.input_hist_raw.append(source_raw)
+
+ with self.db_input_cache_lock:
+ self.db_input_cache.append((line_num, source, source_raw))
+ # Trigger to flush cache and write to DB.
+ if len(self.db_input_cache) >= self.db_cache_size:
+ self.save_flag.set()
+
+ # update the auto _i variables
+ self._iii = self._ii
+ self._ii = self._i
+ self._i = self._i00
+ self._i00 = source_raw
+
+ # hackish access to user namespace to create _i1,_i2... dynamically
+ new_i = '_i%s' % line_num
+ to_main = {'_i': self._i,
+ '_ii': self._ii,
+ '_iii': self._iii,
+ new_i : self._i00 }
+
+ if self.shell is not None:
+ self.shell.push(to_main, interactive=False)
+
+ def store_output(self, line_num):
+ """If database output logging is enabled, this saves all the
+ outputs from the indicated prompt number to the database. It's
+ called by run_cell after code has been executed.
+
+ Parameters
+ ----------
+ line_num : int
+ The line number from which to save outputs
+ """
+ if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
+ return
+ output = self.output_hist_reprs[line_num]
+
+ with self.db_output_cache_lock:
+ self.db_output_cache.append((line_num, output))
+ if self.db_cache_size <= 1:
+ self.save_flag.set()
+
+ def _writeout_input_cache(self, conn):
+ with conn:
+ for line in self.db_input_cache:
+ conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
+ (self.session_number,)+line)
+
+ def _writeout_output_cache(self, conn):
+ with conn:
+ for line in self.db_output_cache:
+ conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
+ (self.session_number,)+line)
+
+ @needs_sqlite
+ def writeout_cache(self, conn=None):
+ """Write any entries in the cache to the database."""
+ if conn is None:
+ conn = self.db
+
+ with self.db_input_cache_lock:
+ try:
+ self._writeout_input_cache(conn)
+ except sqlite3.IntegrityError:
+ self.new_session(conn)
+ print("ERROR! Session/line number was not unique in",
+ "database. History logging moved to new session",
+ self.session_number)
+ try:
+ # Try writing to the new session. If this fails, don't
+ # recurse
+ self._writeout_input_cache(conn)
+ except sqlite3.IntegrityError:
+ pass
+ finally:
+ self.db_input_cache = []
+
+ with self.db_output_cache_lock:
+ try:
+ self._writeout_output_cache(conn)
+ except sqlite3.IntegrityError:
+ print("!! Session/line number for output was not unique",
+ "in database. Output will not be stored.")
+ finally:
+ self.db_output_cache = []
+
+
+class HistorySavingThread(threading.Thread):
+ """This thread takes care of writing history to the database, so that
+ the UI isn't held up while that happens.
+
+ It waits for the HistoryManager's save_flag to be set, then writes out
+ the history cache. The main thread is responsible for setting the flag when
+ the cache size reaches a defined threshold."""
+ daemon = True
+ stop_now = False
+ enabled = True
+ def __init__(self, history_manager):
+ super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
+ self.history_manager = history_manager
+ self.enabled = history_manager.enabled
+ atexit.register(self.stop)
+
+ @needs_sqlite
+ def run(self):
+ # We need a separate db connection per thread:
+ try:
+ self.db = sqlite3.connect(self.history_manager.hist_file,
+ **self.history_manager.connection_options
+ )
+ while True:
+ self.history_manager.save_flag.wait()
+ if self.stop_now:
+ self.db.close()
+ return
+ self.history_manager.save_flag.clear()
+ self.history_manager.writeout_cache(self.db)
+ except Exception as e:
+ print(("The history saving thread hit an unexpected error (%s)."
+ "History will not be written to the database.") % repr(e))
+
+ def stop(self):
+ """This can be called from the main thread to safely stop this thread.
+
+ Note that it does not attempt to write out remaining history before
+ exiting. That should be done by calling the HistoryManager's
+ end_session method."""
+ self.stop_now = True
+ self.history_manager.save_flag.set()
+ self.join()
+
+
+# To match, e.g. ~5/8-~2/3
+range_re = re.compile(r"""
+((?P<startsess>~?\d+)/)?
+(?P<start>\d+)?
+((?P<sep>[\-:])
+ ((?P<endsess>~?\d+)/)?
+ (?P<end>\d+))?
+$""", re.VERBOSE)
+
+
+def extract_hist_ranges(ranges_str):
+ """Turn a string of history ranges into 3-tuples of (session, start, stop).
+
+ Examples
+ --------
+ >>> list(extract_hist_ranges("~8/5-~7/4 2"))
+ [(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
+ """
+ for range_str in ranges_str.split():
+ rmatch = range_re.match(range_str)
+ if not rmatch:
+ continue
+ start = rmatch.group("start")
+ if start:
+ start = int(start)
+ end = rmatch.group("end")
+ # If no end specified, get (a, a + 1)
+ end = int(end) if end else start + 1
+ else: # start not specified
+ if not rmatch.group('startsess'): # no startsess
+ continue
+ start = 1
+ end = None # provide the entire session hist
+
+ if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
+ end += 1
+ startsess = rmatch.group("startsess") or "0"
+ endsess = rmatch.group("endsess") or startsess
+ startsess = int(startsess.replace("~","-"))
+ endsess = int(endsess.replace("~","-"))
+ assert endsess >= startsess, "start session must be earlier than end session"
+
+ if endsess == startsess:
+ yield (startsess, start, end)
+ continue
+ # Multiple sessions in one range:
+ yield (startsess, start, None)
+ for sess in range(startsess+1, endsess):
+ yield (sess, 1, None)
+ yield (endsess, 1, end)
+
+
+def _format_lineno(session, line):
+ """Helper function to format line numbers properly."""
+ if session == 0:
+ return str(line)
+ return "%s#%s" % (session, line)
diff --git a/contrib/python/ipython/py2/IPython/core/historyapp.py b/contrib/python/ipython/py2/IPython/core/historyapp.py
index b693cbc0d8..d51426d2ca 100644
--- a/contrib/python/ipython/py2/IPython/core/historyapp.py
+++ b/contrib/python/ipython/py2/IPython/core/historyapp.py
@@ -1,162 +1,162 @@
-# encoding: utf-8
-"""
-An application for managing IPython history.
-
-To be invoked as the `ipython history` subcommand.
-"""
-from __future__ import print_function
-
-import os
-import sqlite3
-
-from traitlets.config.application import Application
-from IPython.core.application import BaseIPythonApplication
-from traitlets import Bool, Int, Dict
-from IPython.utils.io import ask_yes_no
-
-trim_hist_help = """Trim the IPython history database to the last 1000 entries.
-
-This actually copies the last 1000 entries to a new database, and then replaces
-the old file with the new. Use the `--keep=` argument to specify a number
-other than 1000.
-"""
-
-clear_hist_help = """Clear the IPython history database, deleting all entries.
-
-Because this is a destructive operation, IPython will prompt the user if they
-really want to do this. Passing a `-f` flag will force clearing without a
-prompt.
-
-This is an handy alias to `ipython history trim --keep=0`
-"""
-
-
-class HistoryTrim(BaseIPythonApplication):
- description = trim_hist_help
-
+# encoding: utf-8
+"""
+An application for managing IPython history.
+
+To be invoked as the `ipython history` subcommand.
+"""
+from __future__ import print_function
+
+import os
+import sqlite3
+
+from traitlets.config.application import Application
+from IPython.core.application import BaseIPythonApplication
+from traitlets import Bool, Int, Dict
+from IPython.utils.io import ask_yes_no
+
+trim_hist_help = """Trim the IPython history database to the last 1000 entries.
+
+This actually copies the last 1000 entries to a new database, and then replaces
+the old file with the new. Use the `--keep=` argument to specify a number
+other than 1000.
+"""
+
+clear_hist_help = """Clear the IPython history database, deleting all entries.
+
+Because this is a destructive operation, IPython will prompt the user if they
+really want to do this. Passing a `-f` flag will force clearing without a
+prompt.
+
+This is an handy alias to `ipython history trim --keep=0`
+"""
+
+
+class HistoryTrim(BaseIPythonApplication):
+ description = trim_hist_help
+
backup = Bool(False,
help="Keep the old history file as history.sqlite.<N>"
).tag(config=True)
-
+
keep = Int(1000,
help="Number of recent lines to keep in the database."
).tag(config=True)
-
- flags = Dict(dict(
- backup = ({'HistoryTrim' : {'backup' : True}},
+
+ flags = Dict(dict(
+ backup = ({'HistoryTrim' : {'backup' : True}},
backup.help
- )
- ))
-
- aliases=Dict(dict(
- keep = 'HistoryTrim.keep'
- ))
-
- def start(self):
- profile_dir = self.profile_dir.location
- hist_file = os.path.join(profile_dir, 'history.sqlite')
- con = sqlite3.connect(hist_file)
-
- # Grab the recent history from the current database.
- inputs = list(con.execute('SELECT session, line, source, source_raw FROM '
- 'history ORDER BY session DESC, line DESC LIMIT ?', (self.keep+1,)))
- if len(inputs) <= self.keep:
- print("There are already at most %d entries in the history database." % self.keep)
- print("Not doing anything. Use --keep= argument to keep fewer entries")
- return
-
- print("Trimming history to the most recent %d entries." % self.keep)
-
- inputs.pop() # Remove the extra element we got to check the length.
- inputs.reverse()
- if inputs:
- first_session = inputs[0][0]
- outputs = list(con.execute('SELECT session, line, output FROM '
- 'output_history WHERE session >= ?', (first_session,)))
- sessions = list(con.execute('SELECT session, start, end, num_cmds, remark FROM '
- 'sessions WHERE session >= ?', (first_session,)))
- con.close()
-
- # Create the new history database.
- new_hist_file = os.path.join(profile_dir, 'history.sqlite.new')
- i = 0
- while os.path.exists(new_hist_file):
- # Make sure we don't interfere with an existing file.
- i += 1
- new_hist_file = os.path.join(profile_dir, 'history.sqlite.new'+str(i))
- new_db = sqlite3.connect(new_hist_file)
- new_db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
- primary key autoincrement, start timestamp,
- end timestamp, num_cmds integer, remark text)""")
- new_db.execute("""CREATE TABLE IF NOT EXISTS history
- (session integer, line integer, source text, source_raw text,
- PRIMARY KEY (session, line))""")
- new_db.execute("""CREATE TABLE IF NOT EXISTS output_history
- (session integer, line integer, output text,
- PRIMARY KEY (session, line))""")
- new_db.commit()
-
-
- if inputs:
- with new_db:
- # Add the recent history into the new database.
- new_db.executemany('insert into sessions values (?,?,?,?,?)', sessions)
- new_db.executemany('insert into history values (?,?,?,?)', inputs)
- new_db.executemany('insert into output_history values (?,?,?)', outputs)
- new_db.close()
-
- if self.backup:
- i = 1
- backup_hist_file = os.path.join(profile_dir, 'history.sqlite.old.%d' % i)
- while os.path.exists(backup_hist_file):
- i += 1
- backup_hist_file = os.path.join(profile_dir, 'history.sqlite.old.%d' % i)
- os.rename(hist_file, backup_hist_file)
- print("Backed up longer history file to", backup_hist_file)
- else:
- os.remove(hist_file)
-
- os.rename(new_hist_file, hist_file)
-
-class HistoryClear(HistoryTrim):
- description = clear_hist_help
+ )
+ ))
+
+ aliases=Dict(dict(
+ keep = 'HistoryTrim.keep'
+ ))
+
+ def start(self):
+ profile_dir = self.profile_dir.location
+ hist_file = os.path.join(profile_dir, 'history.sqlite')
+ con = sqlite3.connect(hist_file)
+
+ # Grab the recent history from the current database.
+ inputs = list(con.execute('SELECT session, line, source, source_raw FROM '
+ 'history ORDER BY session DESC, line DESC LIMIT ?', (self.keep+1,)))
+ if len(inputs) <= self.keep:
+ print("There are already at most %d entries in the history database." % self.keep)
+ print("Not doing anything. Use --keep= argument to keep fewer entries")
+ return
+
+ print("Trimming history to the most recent %d entries." % self.keep)
+
+ inputs.pop() # Remove the extra element we got to check the length.
+ inputs.reverse()
+ if inputs:
+ first_session = inputs[0][0]
+ outputs = list(con.execute('SELECT session, line, output FROM '
+ 'output_history WHERE session >= ?', (first_session,)))
+ sessions = list(con.execute('SELECT session, start, end, num_cmds, remark FROM '
+ 'sessions WHERE session >= ?', (first_session,)))
+ con.close()
+
+ # Create the new history database.
+ new_hist_file = os.path.join(profile_dir, 'history.sqlite.new')
+ i = 0
+ while os.path.exists(new_hist_file):
+ # Make sure we don't interfere with an existing file.
+ i += 1
+ new_hist_file = os.path.join(profile_dir, 'history.sqlite.new'+str(i))
+ new_db = sqlite3.connect(new_hist_file)
+ new_db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
+ primary key autoincrement, start timestamp,
+ end timestamp, num_cmds integer, remark text)""")
+ new_db.execute("""CREATE TABLE IF NOT EXISTS history
+ (session integer, line integer, source text, source_raw text,
+ PRIMARY KEY (session, line))""")
+ new_db.execute("""CREATE TABLE IF NOT EXISTS output_history
+ (session integer, line integer, output text,
+ PRIMARY KEY (session, line))""")
+ new_db.commit()
+
+
+ if inputs:
+ with new_db:
+ # Add the recent history into the new database.
+ new_db.executemany('insert into sessions values (?,?,?,?,?)', sessions)
+ new_db.executemany('insert into history values (?,?,?,?)', inputs)
+ new_db.executemany('insert into output_history values (?,?,?)', outputs)
+ new_db.close()
+
+ if self.backup:
+ i = 1
+ backup_hist_file = os.path.join(profile_dir, 'history.sqlite.old.%d' % i)
+ while os.path.exists(backup_hist_file):
+ i += 1
+ backup_hist_file = os.path.join(profile_dir, 'history.sqlite.old.%d' % i)
+ os.rename(hist_file, backup_hist_file)
+ print("Backed up longer history file to", backup_hist_file)
+ else:
+ os.remove(hist_file)
+
+ os.rename(new_hist_file, hist_file)
+
+class HistoryClear(HistoryTrim):
+ description = clear_hist_help
keep = Int(0,
- help="Number of recent lines to keep in the database.")
-
+ help="Number of recent lines to keep in the database.")
+
force = Bool(False,
help="Don't prompt user for confirmation"
).tag(config=True)
-
- flags = Dict(dict(
- force = ({'HistoryClear' : {'force' : True}},
+
+ flags = Dict(dict(
+ force = ({'HistoryClear' : {'force' : True}},
force.help),
- f = ({'HistoryTrim' : {'force' : True}},
+ f = ({'HistoryTrim' : {'force' : True}},
force.help
- )
- ))
- aliases = Dict()
-
- def start(self):
- if self.force or ask_yes_no("Really delete all ipython history? ",
- default="no", interrupt="no"):
- HistoryTrim.start(self)
-
-class HistoryApp(Application):
- name = u'ipython-history'
- description = "Manage the IPython history database."
-
- subcommands = Dict(dict(
- trim = (HistoryTrim, HistoryTrim.description.splitlines()[0]),
- clear = (HistoryClear, HistoryClear.description.splitlines()[0]),
- ))
-
- def start(self):
- if self.subapp is None:
- print("No subcommand specified. Must specify one of: %s" % \
- (self.subcommands.keys()))
- print()
- self.print_description()
- self.print_subcommands()
- self.exit(1)
- else:
- return self.subapp.start()
+ )
+ ))
+ aliases = Dict()
+
+ def start(self):
+ if self.force or ask_yes_no("Really delete all ipython history? ",
+ default="no", interrupt="no"):
+ HistoryTrim.start(self)
+
+class HistoryApp(Application):
+ name = u'ipython-history'
+ description = "Manage the IPython history database."
+
+ subcommands = Dict(dict(
+ trim = (HistoryTrim, HistoryTrim.description.splitlines()[0]),
+ clear = (HistoryClear, HistoryClear.description.splitlines()[0]),
+ ))
+
+ def start(self):
+ if self.subapp is None:
+ print("No subcommand specified. Must specify one of: %s" % \
+ (self.subcommands.keys()))
+ print()
+ self.print_description()
+ self.print_subcommands()
+ self.exit(1)
+ else:
+ return self.subapp.start()
diff --git a/contrib/python/ipython/py2/IPython/core/hooks.py b/contrib/python/ipython/py2/IPython/core/hooks.py
index 374ccb4b0f..e6fc84087f 100644
--- a/contrib/python/ipython/py2/IPython/core/hooks.py
+++ b/contrib/python/ipython/py2/IPython/core/hooks.py
@@ -1,98 +1,98 @@
-"""Hooks for IPython.
-
-In Python, it is possible to overwrite any method of any object if you really
-want to. But IPython exposes a few 'hooks', methods which are *designed* to
-be overwritten by users for customization purposes. This module defines the
-default versions of all such hooks, which get used by IPython if not
-overridden by the user.
-
-Hooks are simple functions, but they should be declared with ``self`` as their
-first argument, because when activated they are registered into IPython as
-instance methods. The self argument will be the IPython running instance
-itself, so hooks have full access to the entire IPython object.
-
-If you wish to define a new hook and activate it, you can make an :doc:`extension
-</config/extensions/index>` or a :ref:`startup script <startup_files>`. For
-example, you could use a startup file like this::
-
- import os
-
- def calljed(self,filename, linenum):
- "My editor hook calls the jed editor directly."
- print "Calling my own editor, jed ..."
- if os.system('jed +%d %s' % (linenum,filename)) != 0:
- raise TryNext()
-
- def load_ipython_extension(ip):
- ip.set_hook('editor', calljed)
-
-"""
-
-#*****************************************************************************
-# Copyright (C) 2005 Fernando Perez. <fperez@colorado.edu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#*****************************************************************************
-
-import os
-import subprocess
+"""Hooks for IPython.
+
+In Python, it is possible to overwrite any method of any object if you really
+want to. But IPython exposes a few 'hooks', methods which are *designed* to
+be overwritten by users for customization purposes. This module defines the
+default versions of all such hooks, which get used by IPython if not
+overridden by the user.
+
+Hooks are simple functions, but they should be declared with ``self`` as their
+first argument, because when activated they are registered into IPython as
+instance methods. The self argument will be the IPython running instance
+itself, so hooks have full access to the entire IPython object.
+
+If you wish to define a new hook and activate it, you can make an :doc:`extension
+</config/extensions/index>` or a :ref:`startup script <startup_files>`. For
+example, you could use a startup file like this::
+
+ import os
+
+ def calljed(self,filename, linenum):
+ "My editor hook calls the jed editor directly."
+ print "Calling my own editor, jed ..."
+ if os.system('jed +%d %s' % (linenum,filename)) != 0:
+ raise TryNext()
+
+ def load_ipython_extension(ip):
+ ip.set_hook('editor', calljed)
+
+"""
+
+#*****************************************************************************
+# Copyright (C) 2005 Fernando Perez. <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+import os
+import subprocess
import warnings
-import sys
-
-from IPython.core.error import TryNext
-
-# List here all the default hooks. For now it's just the editor functions
-# but over time we'll move here all the public API for user-accessible things.
-
-__all__ = ['editor', 'fix_error_editor', 'synchronize_with_editor',
- 'shutdown_hook', 'late_startup_hook',
- 'show_in_pager','pre_prompt_hook',
- 'pre_run_code_hook', 'clipboard_get']
-
-deprecated = {'pre_run_code_hook': "a callback for the 'pre_execute' or 'pre_run_cell' event",
- 'late_startup_hook': "a callback for the 'shell_initialized' event",
- 'shutdown_hook': "the atexit module",
- }
-
-def editor(self, filename, linenum=None, wait=True):
- """Open the default editor at the given filename and linenumber.
-
- This is IPython's default editor hook, you can use it as an example to
- write your own modified one. To set your own editor function as the
- new editor hook, call ip.set_hook('editor',yourfunc)."""
-
- # IPython configures a default editor at startup by reading $EDITOR from
- # the environment, and falling back on vi (unix) or notepad (win32).
- editor = self.editor
-
- # marker for at which line to open the file (for existing objects)
- if linenum is None or editor=='notepad':
- linemark = ''
- else:
- linemark = '+%d' % int(linenum)
-
- # Enclose in quotes if necessary and legal
- if ' ' in editor and os.path.isfile(editor) and editor[0] != '"':
- editor = '"%s"' % editor
-
- # Call the actual editor
- proc = subprocess.Popen('%s %s %s' % (editor, linemark, filename),
- shell=True)
- if wait and proc.wait() != 0:
- raise TryNext()
-
-import tempfile
-def fix_error_editor(self,filename,linenum,column,msg):
+import sys
+
+from IPython.core.error import TryNext
+
+# List here all the default hooks. For now it's just the editor functions
+# but over time we'll move here all the public API for user-accessible things.
+
+__all__ = ['editor', 'fix_error_editor', 'synchronize_with_editor',
+ 'shutdown_hook', 'late_startup_hook',
+ 'show_in_pager','pre_prompt_hook',
+ 'pre_run_code_hook', 'clipboard_get']
+
+deprecated = {'pre_run_code_hook': "a callback for the 'pre_execute' or 'pre_run_cell' event",
+ 'late_startup_hook': "a callback for the 'shell_initialized' event",
+ 'shutdown_hook': "the atexit module",
+ }
+
+def editor(self, filename, linenum=None, wait=True):
+ """Open the default editor at the given filename and linenumber.
+
+ This is IPython's default editor hook, you can use it as an example to
+ write your own modified one. To set your own editor function as the
+ new editor hook, call ip.set_hook('editor',yourfunc)."""
+
+ # IPython configures a default editor at startup by reading $EDITOR from
+ # the environment, and falling back on vi (unix) or notepad (win32).
+ editor = self.editor
+
+ # marker for at which line to open the file (for existing objects)
+ if linenum is None or editor=='notepad':
+ linemark = ''
+ else:
+ linemark = '+%d' % int(linenum)
+
+ # Enclose in quotes if necessary and legal
+ if ' ' in editor and os.path.isfile(editor) and editor[0] != '"':
+ editor = '"%s"' % editor
+
+ # Call the actual editor
+ proc = subprocess.Popen('%s %s %s' % (editor, linemark, filename),
+ shell=True)
+ if wait and proc.wait() != 0:
+ raise TryNext()
+
+import tempfile
+def fix_error_editor(self,filename,linenum,column,msg):
"""DEPRECATED
Open the editor at the given filename, linenumber, column and
- show an error message. This is used for correcting syntax errors.
- The current implementation only has special support for the VIM editor,
- and falls back on the 'editor' hook if VIM is not used.
-
+ show an error message. This is used for correcting syntax errors.
+ The current implementation only has special support for the VIM editor,
+ and falls back on the 'editor' hook if VIM is not used.
+
Call ip.set_hook('fix_error_editor',yourfunc) to use your own function,
- """
+ """
warnings.warn("""
`fix_error_editor` is pending deprecation as of IPython 5.0 and will be removed
@@ -102,125 +102,125 @@ happend to use this function and still need it please make your voice heard on
the mailing list ipython-dev@python.org , or on the GitHub Issue tracker:
https://github.com/ipython/ipython/issues/9649 """, UserWarning)
- def vim_quickfix_file():
- t = tempfile.NamedTemporaryFile()
- t.write('%s:%d:%d:%s\n' % (filename,linenum,column,msg))
- t.flush()
- return t
- if os.path.basename(self.editor) != 'vim':
- self.hooks.editor(filename,linenum)
- return
- t = vim_quickfix_file()
- try:
- if os.system('vim --cmd "set errorformat=%f:%l:%c:%m" -q ' + t.name):
- raise TryNext()
- finally:
- t.close()
-
-
-def synchronize_with_editor(self, filename, linenum, column):
- pass
-
-
-class CommandChainDispatcher:
- """ Dispatch calls to a chain of commands until some func can handle it
-
- Usage: instantiate, execute "add" to add commands (with optional
- priority), execute normally via f() calling mechanism.
-
- """
- def __init__(self,commands=None):
- if commands is None:
- self.chain = []
- else:
- self.chain = commands
-
-
- def __call__(self,*args, **kw):
- """ Command chain is called just like normal func.
-
- This will call all funcs in chain with the same args as were given to
- this function, and return the result of first func that didn't raise
- TryNext"""
- last_exc = TryNext()
- for prio,cmd in self.chain:
- #print "prio",prio,"cmd",cmd #dbg
- try:
- return cmd(*args, **kw)
- except TryNext as exc:
- last_exc = exc
- # if no function will accept it, raise TryNext up to the caller
- raise last_exc
-
- def __str__(self):
- return str(self.chain)
-
- def add(self, func, priority=0):
- """ Add a func to the cmd chain with given priority """
- self.chain.append((priority, func))
- self.chain.sort(key=lambda x: x[0])
-
- def __iter__(self):
- """ Return all objects in chain.
-
- Handy if the objects are not callable.
- """
- return iter(self.chain)
-
-
-def shutdown_hook(self):
- """ default shutdown hook
-
- Typically, shotdown hooks should raise TryNext so all shutdown ops are done
- """
-
- #print "default shutdown hook ok" # dbg
- return
-
-
-def late_startup_hook(self):
- """ Executed after ipython has been constructed and configured
-
- """
- #print "default startup hook ok" # dbg
-
-
-def show_in_pager(self, data, start, screen_lines):
- """ Run a string through pager """
- # raising TryNext here will use the default paging functionality
- raise TryNext
-
-
-def pre_prompt_hook(self):
- """ Run before displaying the next prompt
-
- Use this e.g. to display output from asynchronous operations (in order
- to not mess up text entry)
- """
-
- return None
-
-
-def pre_run_code_hook(self):
- """ Executed before running the (prefiltered) code in IPython """
- return None
-
-
-def clipboard_get(self):
- """ Get text from the clipboard.
- """
- from IPython.lib.clipboard import (
- osx_clipboard_get, tkinter_clipboard_get,
- win32_clipboard_get
- )
- if sys.platform == 'win32':
- chain = [win32_clipboard_get, tkinter_clipboard_get]
- elif sys.platform == 'darwin':
- chain = [osx_clipboard_get, tkinter_clipboard_get]
- else:
- chain = [tkinter_clipboard_get]
- dispatcher = CommandChainDispatcher()
- for func in chain:
- dispatcher.add(func)
- text = dispatcher()
- return text
+ def vim_quickfix_file():
+ t = tempfile.NamedTemporaryFile()
+ t.write('%s:%d:%d:%s\n' % (filename,linenum,column,msg))
+ t.flush()
+ return t
+ if os.path.basename(self.editor) != 'vim':
+ self.hooks.editor(filename,linenum)
+ return
+ t = vim_quickfix_file()
+ try:
+ if os.system('vim --cmd "set errorformat=%f:%l:%c:%m" -q ' + t.name):
+ raise TryNext()
+ finally:
+ t.close()
+
+
+def synchronize_with_editor(self, filename, linenum, column):
+ pass
+
+
+class CommandChainDispatcher:
+ """ Dispatch calls to a chain of commands until some func can handle it
+
+ Usage: instantiate, execute "add" to add commands (with optional
+ priority), execute normally via f() calling mechanism.
+
+ """
+ def __init__(self,commands=None):
+ if commands is None:
+ self.chain = []
+ else:
+ self.chain = commands
+
+
+ def __call__(self,*args, **kw):
+ """ Command chain is called just like normal func.
+
+ This will call all funcs in chain with the same args as were given to
+ this function, and return the result of first func that didn't raise
+ TryNext"""
+ last_exc = TryNext()
+ for prio,cmd in self.chain:
+ #print "prio",prio,"cmd",cmd #dbg
+ try:
+ return cmd(*args, **kw)
+ except TryNext as exc:
+ last_exc = exc
+ # if no function will accept it, raise TryNext up to the caller
+ raise last_exc
+
+ def __str__(self):
+ return str(self.chain)
+
+ def add(self, func, priority=0):
+ """ Add a func to the cmd chain with given priority """
+ self.chain.append((priority, func))
+ self.chain.sort(key=lambda x: x[0])
+
+ def __iter__(self):
+ """ Return all objects in chain.
+
+ Handy if the objects are not callable.
+ """
+ return iter(self.chain)
+
+
+def shutdown_hook(self):
+ """ default shutdown hook
+
+ Typically, shotdown hooks should raise TryNext so all shutdown ops are done
+ """
+
+ #print "default shutdown hook ok" # dbg
+ return
+
+
+def late_startup_hook(self):
+ """ Executed after ipython has been constructed and configured
+
+ """
+ #print "default startup hook ok" # dbg
+
+
+def show_in_pager(self, data, start, screen_lines):
+ """ Run a string through pager """
+ # raising TryNext here will use the default paging functionality
+ raise TryNext
+
+
+def pre_prompt_hook(self):
+ """ Run before displaying the next prompt
+
+ Use this e.g. to display output from asynchronous operations (in order
+ to not mess up text entry)
+ """
+
+ return None
+
+
+def pre_run_code_hook(self):
+ """ Executed before running the (prefiltered) code in IPython """
+ return None
+
+
+def clipboard_get(self):
+ """ Get text from the clipboard.
+ """
+ from IPython.lib.clipboard import (
+ osx_clipboard_get, tkinter_clipboard_get,
+ win32_clipboard_get
+ )
+ if sys.platform == 'win32':
+ chain = [win32_clipboard_get, tkinter_clipboard_get]
+ elif sys.platform == 'darwin':
+ chain = [osx_clipboard_get, tkinter_clipboard_get]
+ else:
+ chain = [tkinter_clipboard_get]
+ dispatcher = CommandChainDispatcher()
+ for func in chain:
+ dispatcher.add(func)
+ text = dispatcher()
+ return text
diff --git a/contrib/python/ipython/py2/IPython/core/inputsplitter.py b/contrib/python/ipython/py2/IPython/core/inputsplitter.py
index 2c7125f88d..ac14747d69 100644
--- a/contrib/python/ipython/py2/IPython/core/inputsplitter.py
+++ b/contrib/python/ipython/py2/IPython/core/inputsplitter.py
@@ -1,681 +1,681 @@
-"""Input handling and transformation machinery.
-
-The first class in this module, :class:`InputSplitter`, is designed to tell when
-input from a line-oriented frontend is complete and should be executed, and when
-the user should be prompted for another line of code instead. The name 'input
-splitter' is largely for historical reasons.
-
-A companion, :class:`IPythonInputSplitter`, provides the same functionality but
-with full support for the extended IPython syntax (magics, system calls, etc).
-The code to actually do these transformations is in :mod:`IPython.core.inputtransformer`.
-:class:`IPythonInputSplitter` feeds the raw code to the transformers in order
-and stores the results.
-
-For more details, see the class docstrings below.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-import ast
-import codeop
-import re
-import sys
-import warnings
-
-from IPython.utils.py3compat import cast_unicode
-from IPython.core.inputtransformer import (leading_indent,
- classic_prompt,
- ipy_prompt,
- strip_encoding_cookie,
- cellmagic,
- assemble_logical_lines,
- help_end,
- escaped_commands,
- assign_from_magic,
- assign_from_system,
- assemble_python_lines,
- )
-
-# These are available in this module for backwards compatibility.
-from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
- ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,
- ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES)
-
-#-----------------------------------------------------------------------------
-# Utilities
-#-----------------------------------------------------------------------------
-
-# FIXME: These are general-purpose utilities that later can be moved to the
-# general ward. Kept here for now because we're being very strict about test
-# coverage with this code, and this lets us ensure that we keep 100% coverage
-# while developing.
-
-# compiled regexps for autoindent management
-dedent_re = re.compile('|'.join([
- r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
- r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
- r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
- r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
- r'^\s+pass\s*$', # pass (optionally followed by trailing spaces)
- r'^\s+break\s*$', # break (optionally followed by trailing spaces)
- r'^\s+continue\s*$', # continue (optionally followed by trailing spaces)
-]))
-ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
-
-# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
-# before pure comments
-comment_line_re = re.compile('^\s*\#')
-
-
-def num_ini_spaces(s):
- """Return the number of initial spaces in a string.
-
- Note that tabs are counted as a single space. For now, we do *not* support
- mixing of tabs and spaces in the user's input.
-
- Parameters
- ----------
- s : string
-
- Returns
- -------
- n : int
- """
-
- ini_spaces = ini_spaces_re.match(s)
- if ini_spaces:
- return ini_spaces.end()
- else:
- return 0
-
-def last_blank(src):
- """Determine if the input source ends in a blank.
-
- A blank is either a newline or a line consisting of whitespace.
-
- Parameters
- ----------
- src : string
- A single or multiline string.
- """
- if not src: return False
- ll = src.splitlines()[-1]
- return (ll == '') or ll.isspace()
-
-
-last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
-last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
-
-def last_two_blanks(src):
- """Determine if the input source ends in two blanks.
-
- A blank is either a newline or a line consisting of whitespace.
-
- Parameters
- ----------
- src : string
- A single or multiline string.
- """
- if not src: return False
- # The logic here is tricky: I couldn't get a regexp to work and pass all
- # the tests, so I took a different approach: split the source by lines,
- # grab the last two and prepend '###\n' as a stand-in for whatever was in
- # the body before the last two lines. Then, with that structure, it's
- # possible to analyze with two regexps. Not the most elegant solution, but
- # it works. If anyone tries to change this logic, make sure to validate
- # the whole test suite first!
- new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
- return (bool(last_two_blanks_re.match(new_src)) or
- bool(last_two_blanks_re2.match(new_src)) )
-
-
-def remove_comments(src):
- """Remove all comments from input source.
-
- Note: comments are NOT recognized inside of strings!
-
- Parameters
- ----------
- src : string
- A single or multiline input string.
-
- Returns
- -------
- String with all Python comments removed.
- """
-
- return re.sub('#.*', '', src)
-
-
-def get_input_encoding():
- """Return the default standard input encoding.
-
- If sys.stdin has no encoding, 'ascii' is returned."""
- # There are strange environments for which sys.stdin.encoding is None. We
- # ensure that a valid encoding is returned.
- encoding = getattr(sys.stdin, 'encoding', None)
- if encoding is None:
- encoding = 'ascii'
- return encoding
-
-#-----------------------------------------------------------------------------
-# Classes and functions for normal Python syntax handling
-#-----------------------------------------------------------------------------
-
-class InputSplitter(object):
- r"""An object that can accumulate lines of Python source before execution.
-
- This object is designed to be fed python source line-by-line, using
- :meth:`push`. It will return on each push whether the currently pushed
- code could be executed already. In addition, it provides a method called
- :meth:`push_accepts_more` that can be used to query whether more input
- can be pushed into a single interactive block.
-
- This is a simple example of how an interactive terminal-based client can use
- this tool::
-
- isp = InputSplitter()
- while isp.push_accepts_more():
- indent = ' '*isp.indent_spaces
- prompt = '>>> ' + indent
- line = indent + raw_input(prompt)
- isp.push(line)
- print 'Input source was:\n', isp.source_reset(),
- """
- # Number of spaces of indentation computed from input that has been pushed
- # so far. This is the attributes callers should query to get the current
- # indentation level, in order to provide auto-indent facilities.
- indent_spaces = 0
- # String, indicating the default input encoding. It is computed by default
- # at initialization time via get_input_encoding(), but it can be reset by a
- # client with specific knowledge of the encoding.
- encoding = ''
- # String where the current full source input is stored, properly encoded.
- # Reading this attribute is the normal way of querying the currently pushed
- # source code, that has been properly encoded.
- source = ''
- # Code object corresponding to the current source. It is automatically
- # synced to the source, so it can be queried at any time to obtain the code
- # object; it will be None if the source doesn't compile to valid Python.
- code = None
-
- # Private attributes
-
- # List with lines of input accumulated so far
- _buffer = None
- # Command compiler
- _compile = None
- # Mark when input has changed indentation all the way back to flush-left
- _full_dedent = False
- # Boolean indicating whether the current block is complete
- _is_complete = None
- # Boolean indicating whether the current block has an unrecoverable syntax error
- _is_invalid = False
-
- def __init__(self):
- """Create a new InputSplitter instance.
- """
- self._buffer = []
- self._compile = codeop.CommandCompiler()
- self.encoding = get_input_encoding()
-
- def reset(self):
- """Reset the input buffer and associated state."""
- self.indent_spaces = 0
- self._buffer[:] = []
- self.source = ''
- self.code = None
- self._is_complete = False
- self._is_invalid = False
- self._full_dedent = False
-
- def source_reset(self):
- """Return the input source and perform a full reset.
- """
- out = self.source
- self.reset()
- return out
-
- def check_complete(self, source):
- """Return whether a block of code is ready to execute, or should be continued
-
- This is a non-stateful API, and will reset the state of this InputSplitter.
-
- Parameters
- ----------
- source : string
- Python input code, which can be multiline.
-
- Returns
- -------
- status : str
- One of 'complete', 'incomplete', or 'invalid' if source is not a
- prefix of valid code.
- indent_spaces : int or None
- The number of spaces by which to indent the next line of code. If
- status is not 'incomplete', this is None.
- """
- self.reset()
- try:
- self.push(source)
- except SyntaxError:
- # Transformers in IPythonInputSplitter can raise SyntaxError,
- # which push() will not catch.
- return 'invalid', None
- else:
- if self._is_invalid:
- return 'invalid', None
- elif self.push_accepts_more():
- return 'incomplete', self.indent_spaces
- else:
- return 'complete', None
- finally:
- self.reset()
-
- def push(self, lines):
- """Push one or more lines of input.
-
- This stores the given lines and returns a status code indicating
- whether the code forms a complete Python block or not.
-
- Any exceptions generated in compilation are swallowed, but if an
- exception was produced, the method returns True.
-
- Parameters
- ----------
- lines : string
- One or more lines of Python input.
-
- Returns
- -------
- is_complete : boolean
- True if the current input source (the result of the current input
- plus prior inputs) forms a complete Python execution block. Note that
- this value is also stored as a private attribute (``_is_complete``), so it
- can be queried at any time.
- """
- self._store(lines)
- source = self.source
-
- # Before calling _compile(), reset the code object to None so that if an
- # exception is raised in compilation, we don't mislead by having
- # inconsistent code/source attributes.
- self.code, self._is_complete = None, None
- self._is_invalid = False
-
- # Honor termination lines properly
- if source.endswith('\\\n'):
- return False
-
- self._update_indent(lines)
- try:
- with warnings.catch_warnings():
- warnings.simplefilter('error', SyntaxWarning)
- self.code = self._compile(source, symbol="exec")
- # Invalid syntax can produce any of a number of different errors from
- # inside the compiler, so we have to catch them all. Syntax errors
- # immediately produce a 'ready' block, so the invalid Python can be
- # sent to the kernel for evaluation with possible ipython
- # special-syntax conversion.
- except (SyntaxError, OverflowError, ValueError, TypeError,
- MemoryError, SyntaxWarning):
- self._is_complete = True
- self._is_invalid = True
- else:
- # Compilation didn't produce any exceptions (though it may not have
- # given a complete code object)
- self._is_complete = self.code is not None
-
- return self._is_complete
-
- def push_accepts_more(self):
- """Return whether a block of interactive input can accept more input.
-
- This method is meant to be used by line-oriented frontends, who need to
- guess whether a block is complete or not based solely on prior and
- current input lines. The InputSplitter considers it has a complete
- interactive block and will not accept more input when either:
-
- * A SyntaxError is raised
-
- * The code is complete and consists of a single line or a single
- non-compound statement
-
- * The code is complete and has a blank line at the end
-
- If the current input produces a syntax error, this method immediately
- returns False but does *not* raise the syntax error exception, as
- typically clients will want to send invalid syntax to an execution
- backend which might convert the invalid syntax into valid Python via
- one of the dynamic IPython mechanisms.
- """
-
- # With incomplete input, unconditionally accept more
- # A syntax error also sets _is_complete to True - see push()
- if not self._is_complete:
- #print("Not complete") # debug
- return True
-
- # The user can make any (complete) input execute by leaving a blank line
- last_line = self.source.splitlines()[-1]
- if (not last_line) or last_line.isspace():
- #print("Blank line") # debug
- return False
-
- # If there's just a single line or AST node, and we're flush left, as is
- # the case after a simple statement such as 'a=1', we want to execute it
- # straight away.
- if self.indent_spaces==0:
- if len(self.source.splitlines()) <= 1:
- return False
-
- try:
- code_ast = ast.parse(u''.join(self._buffer))
- except Exception:
- #print("Can't parse AST") # debug
- return False
- else:
- if len(code_ast.body) == 1 and \
- not hasattr(code_ast.body[0], 'body'):
- #print("Simple statement") # debug
- return False
-
- # General fallback - accept more code
- return True
-
- #------------------------------------------------------------------------
- # Private interface
- #------------------------------------------------------------------------
-
- def _find_indent(self, line):
- """Compute the new indentation level for a single line.
-
- Parameters
- ----------
- line : str
- A single new line of non-whitespace, non-comment Python input.
-
- Returns
- -------
- indent_spaces : int
- New value for the indent level (it may be equal to self.indent_spaces
- if indentation doesn't change.
-
- full_dedent : boolean
- Whether the new line causes a full flush-left dedent.
- """
- indent_spaces = self.indent_spaces
- full_dedent = self._full_dedent
-
- inisp = num_ini_spaces(line)
- if inisp < indent_spaces:
- indent_spaces = inisp
- if indent_spaces <= 0:
- #print 'Full dedent in text',self.source # dbg
- full_dedent = True
-
- if line.rstrip()[-1] == ':':
- indent_spaces += 4
- elif dedent_re.match(line):
- indent_spaces -= 4
- if indent_spaces <= 0:
- full_dedent = True
-
- # Safety
- if indent_spaces < 0:
- indent_spaces = 0
- #print 'safety' # dbg
-
- return indent_spaces, full_dedent
-
- def _update_indent(self, lines):
- for line in remove_comments(lines).splitlines():
- if line and not line.isspace():
- self.indent_spaces, self._full_dedent = self._find_indent(line)
-
- def _store(self, lines, buffer=None, store='source'):
- """Store one or more lines of input.
-
- If input lines are not newline-terminated, a newline is automatically
- appended."""
-
- if buffer is None:
- buffer = self._buffer
-
- if lines.endswith('\n'):
- buffer.append(lines)
- else:
- buffer.append(lines+'\n')
- setattr(self, store, self._set_source(buffer))
-
- def _set_source(self, buffer):
- return u''.join(buffer)
-
-
-class IPythonInputSplitter(InputSplitter):
- """An input splitter that recognizes all of IPython's special syntax."""
-
- # String with raw, untransformed input.
- source_raw = ''
-
- # Flag to track when a transformer has stored input that it hasn't given
- # back yet.
- transformer_accumulating = False
-
- # Flag to track when assemble_python_lines has stored input that it hasn't
- # given back yet.
- within_python_line = False
-
- # Private attributes
-
- # List with lines of raw input accumulated so far.
- _buffer_raw = None
-
- def __init__(self, line_input_checker=True, physical_line_transforms=None,
- logical_line_transforms=None, python_line_transforms=None):
- super(IPythonInputSplitter, self).__init__()
- self._buffer_raw = []
- self._validate = True
-
- if physical_line_transforms is not None:
- self.physical_line_transforms = physical_line_transforms
- else:
- self.physical_line_transforms = [
- leading_indent(),
- classic_prompt(),
- ipy_prompt(),
- cellmagic(end_on_blank_line=line_input_checker),
- strip_encoding_cookie(),
- ]
-
- self.assemble_logical_lines = assemble_logical_lines()
- if logical_line_transforms is not None:
- self.logical_line_transforms = logical_line_transforms
- else:
- self.logical_line_transforms = [
- help_end(),
- escaped_commands(),
- assign_from_magic(),
- assign_from_system(),
- ]
-
- self.assemble_python_lines = assemble_python_lines()
- if python_line_transforms is not None:
- self.python_line_transforms = python_line_transforms
- else:
- # We don't use any of these at present
- self.python_line_transforms = []
-
- @property
- def transforms(self):
- "Quick access to all transformers."
- return self.physical_line_transforms + \
- [self.assemble_logical_lines] + self.logical_line_transforms + \
- [self.assemble_python_lines] + self.python_line_transforms
-
- @property
- def transforms_in_use(self):
- """Transformers, excluding logical line transformers if we're in a
- Python line."""
- t = self.physical_line_transforms[:]
- if not self.within_python_line:
- t += [self.assemble_logical_lines] + self.logical_line_transforms
- return t + [self.assemble_python_lines] + self.python_line_transforms
-
- def reset(self):
- """Reset the input buffer and associated state."""
- super(IPythonInputSplitter, self).reset()
- self._buffer_raw[:] = []
- self.source_raw = ''
- self.transformer_accumulating = False
- self.within_python_line = False
-
- for t in self.transforms:
- try:
- t.reset()
- except SyntaxError:
- # Nothing that calls reset() expects to handle transformer
- # errors
- pass
-
- def flush_transformers(self):
- def _flush(transform, outs):
- """yield transformed lines
-
- always strings, never None
-
- transform: the current transform
- outs: an iterable of previously transformed inputs.
- Each may be multiline, which will be passed
- one line at a time to transform.
- """
- for out in outs:
- for line in out.splitlines():
- # push one line at a time
- tmp = transform.push(line)
- if tmp is not None:
- yield tmp
-
- # reset the transform
- tmp = transform.reset()
- if tmp is not None:
- yield tmp
-
- out = []
- for t in self.transforms_in_use:
- out = _flush(t, out)
-
- out = list(out)
- if out:
- self._store('\n'.join(out))
-
- def raw_reset(self):
- """Return raw input only and perform a full reset.
- """
- out = self.source_raw
- self.reset()
- return out
-
- def source_reset(self):
- try:
- self.flush_transformers()
- return self.source
- finally:
- self.reset()
-
- def push_accepts_more(self):
- if self.transformer_accumulating:
- return True
- else:
- return super(IPythonInputSplitter, self).push_accepts_more()
-
- def transform_cell(self, cell):
- """Process and translate a cell of input.
- """
- self.reset()
- try:
- self.push(cell)
- self.flush_transformers()
- return self.source
- finally:
- self.reset()
-
- def push(self, lines):
- """Push one or more lines of IPython input.
-
- This stores the given lines and returns a status code indicating
- whether the code forms a complete Python block or not, after processing
- all input lines for special IPython syntax.
-
- Any exceptions generated in compilation are swallowed, but if an
- exception was produced, the method returns True.
-
- Parameters
- ----------
- lines : string
- One or more lines of Python input.
-
- Returns
- -------
- is_complete : boolean
- True if the current input source (the result of the current input
- plus prior inputs) forms a complete Python execution block. Note that
- this value is also stored as a private attribute (_is_complete), so it
- can be queried at any time.
- """
-
- # We must ensure all input is pure unicode
- lines = cast_unicode(lines, self.encoding)
- # ''.splitlines() --> [], but we need to push the empty line to transformers
- lines_list = lines.splitlines()
- if not lines_list:
- lines_list = ['']
-
- # Store raw source before applying any transformations to it. Note
- # that this must be done *after* the reset() call that would otherwise
- # flush the buffer.
- self._store(lines, self._buffer_raw, 'source_raw')
-
- for line in lines_list:
- out = self.push_line(line)
-
- return out
-
- def push_line(self, line):
- buf = self._buffer
-
- def _accumulating(dbg):
- #print(dbg)
- self.transformer_accumulating = True
- return False
-
- for transformer in self.physical_line_transforms:
- line = transformer.push(line)
- if line is None:
- return _accumulating(transformer)
-
- if not self.within_python_line:
- line = self.assemble_logical_lines.push(line)
- if line is None:
- return _accumulating('acc logical line')
-
- for transformer in self.logical_line_transforms:
- line = transformer.push(line)
- if line is None:
- return _accumulating(transformer)
-
- line = self.assemble_python_lines.push(line)
- if line is None:
- self.within_python_line = True
- return _accumulating('acc python line')
- else:
- self.within_python_line = False
-
- for transformer in self.python_line_transforms:
- line = transformer.push(line)
- if line is None:
- return _accumulating(transformer)
-
- #print("transformers clear") #debug
- self.transformer_accumulating = False
- return super(IPythonInputSplitter, self).push(line)
+"""Input handling and transformation machinery.
+
+The first class in this module, :class:`InputSplitter`, is designed to tell when
+input from a line-oriented frontend is complete and should be executed, and when
+the user should be prompted for another line of code instead. The name 'input
+splitter' is largely for historical reasons.
+
+A companion, :class:`IPythonInputSplitter`, provides the same functionality but
+with full support for the extended IPython syntax (magics, system calls, etc).
+The code to actually do these transformations is in :mod:`IPython.core.inputtransformer`.
+:class:`IPythonInputSplitter` feeds the raw code to the transformers in order
+and stores the results.
+
+For more details, see the class docstrings below.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+import ast
+import codeop
+import re
+import sys
+import warnings
+
+from IPython.utils.py3compat import cast_unicode
+from IPython.core.inputtransformer import (leading_indent,
+ classic_prompt,
+ ipy_prompt,
+ strip_encoding_cookie,
+ cellmagic,
+ assemble_logical_lines,
+ help_end,
+ escaped_commands,
+ assign_from_magic,
+ assign_from_system,
+ assemble_python_lines,
+ )
+
+# These are available in this module for backwards compatibility.
+from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
+ ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,
+ ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES)
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
+# FIXME: These are general-purpose utilities that later can be moved to the
+# general ward. Kept here for now because we're being very strict about test
+# coverage with this code, and this lets us ensure that we keep 100% coverage
+# while developing.
+
+# compiled regexps for autoindent management
+dedent_re = re.compile('|'.join([
+ r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
+ r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
+ r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
+ r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
+ r'^\s+pass\s*$', # pass (optionally followed by trailing spaces)
+ r'^\s+break\s*$', # break (optionally followed by trailing spaces)
+ r'^\s+continue\s*$', # continue (optionally followed by trailing spaces)
+]))
+ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
+
+# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
+# before pure comments
+comment_line_re = re.compile('^\s*\#')
+
+
+def num_ini_spaces(s):
+ """Return the number of initial spaces in a string.
+
+ Note that tabs are counted as a single space. For now, we do *not* support
+ mixing of tabs and spaces in the user's input.
+
+ Parameters
+ ----------
+ s : string
+
+ Returns
+ -------
+ n : int
+ """
+
+ ini_spaces = ini_spaces_re.match(s)
+ if ini_spaces:
+ return ini_spaces.end()
+ else:
+ return 0
+
+def last_blank(src):
+ """Determine if the input source ends in a blank.
+
+ A blank is either a newline or a line consisting of whitespace.
+
+ Parameters
+ ----------
+ src : string
+ A single or multiline string.
+ """
+ if not src: return False
+ ll = src.splitlines()[-1]
+ return (ll == '') or ll.isspace()
+
+
+last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
+last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
+
+def last_two_blanks(src):
+ """Determine if the input source ends in two blanks.
+
+ A blank is either a newline or a line consisting of whitespace.
+
+ Parameters
+ ----------
+ src : string
+ A single or multiline string.
+ """
+ if not src: return False
+ # The logic here is tricky: I couldn't get a regexp to work and pass all
+ # the tests, so I took a different approach: split the source by lines,
+ # grab the last two and prepend '###\n' as a stand-in for whatever was in
+ # the body before the last two lines. Then, with that structure, it's
+ # possible to analyze with two regexps. Not the most elegant solution, but
+ # it works. If anyone tries to change this logic, make sure to validate
+ # the whole test suite first!
+ new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
+ return (bool(last_two_blanks_re.match(new_src)) or
+ bool(last_two_blanks_re2.match(new_src)) )
+
+
+def remove_comments(src):
+ """Remove all comments from input source.
+
+ Note: comments are NOT recognized inside of strings!
+
+ Parameters
+ ----------
+ src : string
+ A single or multiline input string.
+
+ Returns
+ -------
+ String with all Python comments removed.
+ """
+
+ return re.sub('#.*', '', src)
+
+
+def get_input_encoding():
+ """Return the default standard input encoding.
+
+ If sys.stdin has no encoding, 'ascii' is returned."""
+ # There are strange environments for which sys.stdin.encoding is None. We
+ # ensure that a valid encoding is returned.
+ encoding = getattr(sys.stdin, 'encoding', None)
+ if encoding is None:
+ encoding = 'ascii'
+ return encoding
+
+#-----------------------------------------------------------------------------
+# Classes and functions for normal Python syntax handling
+#-----------------------------------------------------------------------------
+
+class InputSplitter(object):
+ r"""An object that can accumulate lines of Python source before execution.
+
+ This object is designed to be fed python source line-by-line, using
+ :meth:`push`. It will return on each push whether the currently pushed
+ code could be executed already. In addition, it provides a method called
+ :meth:`push_accepts_more` that can be used to query whether more input
+ can be pushed into a single interactive block.
+
+ This is a simple example of how an interactive terminal-based client can use
+ this tool::
+
+ isp = InputSplitter()
+ while isp.push_accepts_more():
+ indent = ' '*isp.indent_spaces
+ prompt = '>>> ' + indent
+ line = indent + raw_input(prompt)
+ isp.push(line)
+ print 'Input source was:\n', isp.source_reset(),
+ """
+ # Number of spaces of indentation computed from input that has been pushed
+ # so far. This is the attributes callers should query to get the current
+ # indentation level, in order to provide auto-indent facilities.
+ indent_spaces = 0
+ # String, indicating the default input encoding. It is computed by default
+ # at initialization time via get_input_encoding(), but it can be reset by a
+ # client with specific knowledge of the encoding.
+ encoding = ''
+ # String where the current full source input is stored, properly encoded.
+ # Reading this attribute is the normal way of querying the currently pushed
+ # source code, that has been properly encoded.
+ source = ''
+ # Code object corresponding to the current source. It is automatically
+ # synced to the source, so it can be queried at any time to obtain the code
+ # object; it will be None if the source doesn't compile to valid Python.
+ code = None
+
+ # Private attributes
+
+ # List with lines of input accumulated so far
+ _buffer = None
+ # Command compiler
+ _compile = None
+ # Mark when input has changed indentation all the way back to flush-left
+ _full_dedent = False
+ # Boolean indicating whether the current block is complete
+ _is_complete = None
+ # Boolean indicating whether the current block has an unrecoverable syntax error
+ _is_invalid = False
+
+ def __init__(self):
+ """Create a new InputSplitter instance.
+ """
+ self._buffer = []
+ self._compile = codeop.CommandCompiler()
+ self.encoding = get_input_encoding()
+
+ def reset(self):
+ """Reset the input buffer and associated state."""
+ self.indent_spaces = 0
+ self._buffer[:] = []
+ self.source = ''
+ self.code = None
+ self._is_complete = False
+ self._is_invalid = False
+ self._full_dedent = False
+
+ def source_reset(self):
+ """Return the input source and perform a full reset.
+ """
+ out = self.source
+ self.reset()
+ return out
+
+ def check_complete(self, source):
+ """Return whether a block of code is ready to execute, or should be continued
+
+ This is a non-stateful API, and will reset the state of this InputSplitter.
+
+ Parameters
+ ----------
+ source : string
+ Python input code, which can be multiline.
+
+ Returns
+ -------
+ status : str
+ One of 'complete', 'incomplete', or 'invalid' if source is not a
+ prefix of valid code.
+ indent_spaces : int or None
+ The number of spaces by which to indent the next line of code. If
+ status is not 'incomplete', this is None.
+ """
+ self.reset()
+ try:
+ self.push(source)
+ except SyntaxError:
+ # Transformers in IPythonInputSplitter can raise SyntaxError,
+ # which push() will not catch.
+ return 'invalid', None
+ else:
+ if self._is_invalid:
+ return 'invalid', None
+ elif self.push_accepts_more():
+ return 'incomplete', self.indent_spaces
+ else:
+ return 'complete', None
+ finally:
+ self.reset()
+
+ def push(self, lines):
+ """Push one or more lines of input.
+
+ This stores the given lines and returns a status code indicating
+ whether the code forms a complete Python block or not.
+
+ Any exceptions generated in compilation are swallowed, but if an
+ exception was produced, the method returns True.
+
+ Parameters
+ ----------
+ lines : string
+ One or more lines of Python input.
+
+ Returns
+ -------
+ is_complete : boolean
+ True if the current input source (the result of the current input
+ plus prior inputs) forms a complete Python execution block. Note that
+ this value is also stored as a private attribute (``_is_complete``), so it
+ can be queried at any time.
+ """
+ self._store(lines)
+ source = self.source
+
+ # Before calling _compile(), reset the code object to None so that if an
+ # exception is raised in compilation, we don't mislead by having
+ # inconsistent code/source attributes.
+ self.code, self._is_complete = None, None
+ self._is_invalid = False
+
+ # Honor termination lines properly
+ if source.endswith('\\\n'):
+ return False
+
+ self._update_indent(lines)
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('error', SyntaxWarning)
+ self.code = self._compile(source, symbol="exec")
+ # Invalid syntax can produce any of a number of different errors from
+ # inside the compiler, so we have to catch them all. Syntax errors
+ # immediately produce a 'ready' block, so the invalid Python can be
+ # sent to the kernel for evaluation with possible ipython
+ # special-syntax conversion.
+ except (SyntaxError, OverflowError, ValueError, TypeError,
+ MemoryError, SyntaxWarning):
+ self._is_complete = True
+ self._is_invalid = True
+ else:
+ # Compilation didn't produce any exceptions (though it may not have
+ # given a complete code object)
+ self._is_complete = self.code is not None
+
+ return self._is_complete
+
+ def push_accepts_more(self):
+ """Return whether a block of interactive input can accept more input.
+
+ This method is meant to be used by line-oriented frontends, who need to
+ guess whether a block is complete or not based solely on prior and
+ current input lines. The InputSplitter considers it has a complete
+ interactive block and will not accept more input when either:
+
+ * A SyntaxError is raised
+
+ * The code is complete and consists of a single line or a single
+ non-compound statement
+
+ * The code is complete and has a blank line at the end
+
+ If the current input produces a syntax error, this method immediately
+ returns False but does *not* raise the syntax error exception, as
+ typically clients will want to send invalid syntax to an execution
+ backend which might convert the invalid syntax into valid Python via
+ one of the dynamic IPython mechanisms.
+ """
+
+ # With incomplete input, unconditionally accept more
+ # A syntax error also sets _is_complete to True - see push()
+ if not self._is_complete:
+ #print("Not complete") # debug
+ return True
+
+ # The user can make any (complete) input execute by leaving a blank line
+ last_line = self.source.splitlines()[-1]
+ if (not last_line) or last_line.isspace():
+ #print("Blank line") # debug
+ return False
+
+ # If there's just a single line or AST node, and we're flush left, as is
+ # the case after a simple statement such as 'a=1', we want to execute it
+ # straight away.
+ if self.indent_spaces==0:
+ if len(self.source.splitlines()) <= 1:
+ return False
+
+ try:
+ code_ast = ast.parse(u''.join(self._buffer))
+ except Exception:
+ #print("Can't parse AST") # debug
+ return False
+ else:
+ if len(code_ast.body) == 1 and \
+ not hasattr(code_ast.body[0], 'body'):
+ #print("Simple statement") # debug
+ return False
+
+ # General fallback - accept more code
+ return True
+
+ #------------------------------------------------------------------------
+ # Private interface
+ #------------------------------------------------------------------------
+
+ def _find_indent(self, line):
+ """Compute the new indentation level for a single line.
+
+ Parameters
+ ----------
+ line : str
+ A single new line of non-whitespace, non-comment Python input.
+
+ Returns
+ -------
+ indent_spaces : int
+ New value for the indent level (it may be equal to self.indent_spaces
+ if indentation doesn't change.
+
+ full_dedent : boolean
+ Whether the new line causes a full flush-left dedent.
+ """
+ indent_spaces = self.indent_spaces
+ full_dedent = self._full_dedent
+
+ inisp = num_ini_spaces(line)
+ if inisp < indent_spaces:
+ indent_spaces = inisp
+ if indent_spaces <= 0:
+ #print 'Full dedent in text',self.source # dbg
+ full_dedent = True
+
+ if line.rstrip()[-1] == ':':
+ indent_spaces += 4
+ elif dedent_re.match(line):
+ indent_spaces -= 4
+ if indent_spaces <= 0:
+ full_dedent = True
+
+ # Safety
+ if indent_spaces < 0:
+ indent_spaces = 0
+ #print 'safety' # dbg
+
+ return indent_spaces, full_dedent
+
+ def _update_indent(self, lines):
+ for line in remove_comments(lines).splitlines():
+ if line and not line.isspace():
+ self.indent_spaces, self._full_dedent = self._find_indent(line)
+
+ def _store(self, lines, buffer=None, store='source'):
+ """Store one or more lines of input.
+
+ If input lines are not newline-terminated, a newline is automatically
+ appended."""
+
+ if buffer is None:
+ buffer = self._buffer
+
+ if lines.endswith('\n'):
+ buffer.append(lines)
+ else:
+ buffer.append(lines+'\n')
+ setattr(self, store, self._set_source(buffer))
+
+ def _set_source(self, buffer):
+ return u''.join(buffer)
+
+
+class IPythonInputSplitter(InputSplitter):
+ """An input splitter that recognizes all of IPython's special syntax."""
+
+ # String with raw, untransformed input.
+ source_raw = ''
+
+ # Flag to track when a transformer has stored input that it hasn't given
+ # back yet.
+ transformer_accumulating = False
+
+ # Flag to track when assemble_python_lines has stored input that it hasn't
+ # given back yet.
+ within_python_line = False
+
+ # Private attributes
+
+ # List with lines of raw input accumulated so far.
+ _buffer_raw = None
+
+ def __init__(self, line_input_checker=True, physical_line_transforms=None,
+ logical_line_transforms=None, python_line_transforms=None):
+ super(IPythonInputSplitter, self).__init__()
+ self._buffer_raw = []
+ self._validate = True
+
+ if physical_line_transforms is not None:
+ self.physical_line_transforms = physical_line_transforms
+ else:
+ self.physical_line_transforms = [
+ leading_indent(),
+ classic_prompt(),
+ ipy_prompt(),
+ cellmagic(end_on_blank_line=line_input_checker),
+ strip_encoding_cookie(),
+ ]
+
+ self.assemble_logical_lines = assemble_logical_lines()
+ if logical_line_transforms is not None:
+ self.logical_line_transforms = logical_line_transforms
+ else:
+ self.logical_line_transforms = [
+ help_end(),
+ escaped_commands(),
+ assign_from_magic(),
+ assign_from_system(),
+ ]
+
+ self.assemble_python_lines = assemble_python_lines()
+ if python_line_transforms is not None:
+ self.python_line_transforms = python_line_transforms
+ else:
+ # We don't use any of these at present
+ self.python_line_transforms = []
+
+ @property
+ def transforms(self):
+ "Quick access to all transformers."
+ return self.physical_line_transforms + \
+ [self.assemble_logical_lines] + self.logical_line_transforms + \
+ [self.assemble_python_lines] + self.python_line_transforms
+
+ @property
+ def transforms_in_use(self):
+ """Transformers, excluding logical line transformers if we're in a
+ Python line."""
+ t = self.physical_line_transforms[:]
+ if not self.within_python_line:
+ t += [self.assemble_logical_lines] + self.logical_line_transforms
+ return t + [self.assemble_python_lines] + self.python_line_transforms
+
+ def reset(self):
+ """Reset the input buffer and associated state."""
+ super(IPythonInputSplitter, self).reset()
+ self._buffer_raw[:] = []
+ self.source_raw = ''
+ self.transformer_accumulating = False
+ self.within_python_line = False
+
+ for t in self.transforms:
+ try:
+ t.reset()
+ except SyntaxError:
+ # Nothing that calls reset() expects to handle transformer
+ # errors
+ pass
+
+ def flush_transformers(self):
+ def _flush(transform, outs):
+ """yield transformed lines
+
+ always strings, never None
+
+ transform: the current transform
+ outs: an iterable of previously transformed inputs.
+ Each may be multiline, which will be passed
+ one line at a time to transform.
+ """
+ for out in outs:
+ for line in out.splitlines():
+ # push one line at a time
+ tmp = transform.push(line)
+ if tmp is not None:
+ yield tmp
+
+ # reset the transform
+ tmp = transform.reset()
+ if tmp is not None:
+ yield tmp
+
+ out = []
+ for t in self.transforms_in_use:
+ out = _flush(t, out)
+
+ out = list(out)
+ if out:
+ self._store('\n'.join(out))
+
+ def raw_reset(self):
+ """Return raw input only and perform a full reset.
+ """
+ out = self.source_raw
+ self.reset()
+ return out
+
+ def source_reset(self):
+ try:
+ self.flush_transformers()
+ return self.source
+ finally:
+ self.reset()
+
+ def push_accepts_more(self):
+ if self.transformer_accumulating:
+ return True
+ else:
+ return super(IPythonInputSplitter, self).push_accepts_more()
+
+ def transform_cell(self, cell):
+ """Process and translate a cell of input.
+ """
+ self.reset()
+ try:
+ self.push(cell)
+ self.flush_transformers()
+ return self.source
+ finally:
+ self.reset()
+
+ def push(self, lines):
+ """Push one or more lines of IPython input.
+
+ This stores the given lines and returns a status code indicating
+ whether the code forms a complete Python block or not, after processing
+ all input lines for special IPython syntax.
+
+ Any exceptions generated in compilation are swallowed, but if an
+ exception was produced, the method returns True.
+
+ Parameters
+ ----------
+ lines : string
+ One or more lines of Python input.
+
+ Returns
+ -------
+ is_complete : boolean
+ True if the current input source (the result of the current input
+ plus prior inputs) forms a complete Python execution block. Note that
+ this value is also stored as a private attribute (_is_complete), so it
+ can be queried at any time.
+ """
+
+ # We must ensure all input is pure unicode
+ lines = cast_unicode(lines, self.encoding)
+ # ''.splitlines() --> [], but we need to push the empty line to transformers
+ lines_list = lines.splitlines()
+ if not lines_list:
+ lines_list = ['']
+
+ # Store raw source before applying any transformations to it. Note
+ # that this must be done *after* the reset() call that would otherwise
+ # flush the buffer.
+ self._store(lines, self._buffer_raw, 'source_raw')
+
+ for line in lines_list:
+ out = self.push_line(line)
+
+ return out
+
+ def push_line(self, line):
+ buf = self._buffer
+
+ def _accumulating(dbg):
+ #print(dbg)
+ self.transformer_accumulating = True
+ return False
+
+ for transformer in self.physical_line_transforms:
+ line = transformer.push(line)
+ if line is None:
+ return _accumulating(transformer)
+
+ if not self.within_python_line:
+ line = self.assemble_logical_lines.push(line)
+ if line is None:
+ return _accumulating('acc logical line')
+
+ for transformer in self.logical_line_transforms:
+ line = transformer.push(line)
+ if line is None:
+ return _accumulating(transformer)
+
+ line = self.assemble_python_lines.push(line)
+ if line is None:
+ self.within_python_line = True
+ return _accumulating('acc python line')
+ else:
+ self.within_python_line = False
+
+ for transformer in self.python_line_transforms:
+ line = transformer.push(line)
+ if line is None:
+ return _accumulating(transformer)
+
+ #print("transformers clear") #debug
+ self.transformer_accumulating = False
+ return super(IPythonInputSplitter, self).push(line)
diff --git a/contrib/python/ipython/py2/IPython/core/inputtransformer.py b/contrib/python/ipython/py2/IPython/core/inputtransformer.py
index a67d93e1a4..3ba49b951d 100644
--- a/contrib/python/ipython/py2/IPython/core/inputtransformer.py
+++ b/contrib/python/ipython/py2/IPython/core/inputtransformer.py
@@ -1,555 +1,555 @@
-"""Input transformer classes to support IPython special syntax.
-
-This includes the machinery to recognise and transform ``%magic`` commands,
-``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
-"""
-import abc
-import functools
-import re
-
-from IPython.core.splitinput import LineInfo
-from IPython.utils import tokenize2
-from IPython.utils.openpy import cookie_comment_re
-from IPython.utils.py3compat import with_metaclass, PY3
-from IPython.utils.tokenize2 import generate_tokens, untokenize, TokenError
-
-if PY3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
-#-----------------------------------------------------------------------------
-# Globals
-#-----------------------------------------------------------------------------
-
-# The escape sequences that define the syntax transformations IPython will
-# apply to user input. These can NOT be just changed here: many regular
-# expressions and other parts of the code may use their hardcoded values, and
-# for all intents and purposes they constitute the 'IPython syntax', so they
-# should be considered fixed.
-
-ESC_SHELL = '!' # Send line to underlying system shell
-ESC_SH_CAP = '!!' # Send line to system shell and capture output
-ESC_HELP = '?' # Find information about object
-ESC_HELP2 = '??' # Find extra-detailed information about object
-ESC_MAGIC = '%' # Call magic function
-ESC_MAGIC2 = '%%' # Call cell-magic function
-ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
-ESC_QUOTE2 = ';' # Quote all args as a single string, call
-ESC_PAREN = '/' # Call first argument with rest of line as arguments
-
-ESC_SEQUENCES = [ESC_SHELL, ESC_SH_CAP, ESC_HELP ,\
- ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,\
- ESC_QUOTE, ESC_QUOTE2, ESC_PAREN ]
-
-
-class InputTransformer(with_metaclass(abc.ABCMeta, object)):
- """Abstract base class for line-based input transformers."""
-
- @abc.abstractmethod
- def push(self, line):
- """Send a line of input to the transformer, returning the transformed
- input or None if the transformer is waiting for more input.
-
- Must be overridden by subclasses.
-
- Implementations may raise ``SyntaxError`` if the input is invalid. No
- other exceptions may be raised.
- """
- pass
-
- @abc.abstractmethod
- def reset(self):
- """Return, transformed any lines that the transformer has accumulated,
- and reset its internal state.
-
- Must be overridden by subclasses.
- """
- pass
-
- @classmethod
- def wrap(cls, func):
- """Can be used by subclasses as a decorator, to return a factory that
- will allow instantiation with the decorated object.
- """
- @functools.wraps(func)
- def transformer_factory(**kwargs):
- return cls(func, **kwargs)
-
- return transformer_factory
-
-class StatelessInputTransformer(InputTransformer):
- """Wrapper for a stateless input transformer implemented as a function."""
- def __init__(self, func):
- self.func = func
-
- def __repr__(self):
- return "StatelessInputTransformer(func={0!r})".format(self.func)
-
- def push(self, line):
- """Send a line of input to the transformer, returning the
- transformed input."""
- return self.func(line)
-
- def reset(self):
- """No-op - exists for compatibility."""
- pass
-
-class CoroutineInputTransformer(InputTransformer):
- """Wrapper for an input transformer implemented as a coroutine."""
- def __init__(self, coro, **kwargs):
- # Prime it
- self.coro = coro(**kwargs)
- next(self.coro)
-
- def __repr__(self):
- return "CoroutineInputTransformer(coro={0!r})".format(self.coro)
-
- def push(self, line):
- """Send a line of input to the transformer, returning the
- transformed input or None if the transformer is waiting for more
- input.
- """
- return self.coro.send(line)
-
- def reset(self):
- """Return, transformed any lines that the transformer has
- accumulated, and reset its internal state.
- """
- return self.coro.send(None)
-
-class TokenInputTransformer(InputTransformer):
- """Wrapper for a token-based input transformer.
-
- func should accept a list of tokens (5-tuples, see tokenize docs), and
- return an iterable which can be passed to tokenize.untokenize().
- """
- def __init__(self, func):
- self.func = func
+"""Input transformer classes to support IPython special syntax.
+
+This includes the machinery to recognise and transform ``%magic`` commands,
+``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
+"""
+import abc
+import functools
+import re
+
+from IPython.core.splitinput import LineInfo
+from IPython.utils import tokenize2
+from IPython.utils.openpy import cookie_comment_re
+from IPython.utils.py3compat import with_metaclass, PY3
+from IPython.utils.tokenize2 import generate_tokens, untokenize, TokenError
+
+if PY3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# The escape sequences that define the syntax transformations IPython will
+# apply to user input. These can NOT be just changed here: many regular
+# expressions and other parts of the code may use their hardcoded values, and
+# for all intents and purposes they constitute the 'IPython syntax', so they
+# should be considered fixed.
+
+ESC_SHELL = '!' # Send line to underlying system shell
+ESC_SH_CAP = '!!' # Send line to system shell and capture output
+ESC_HELP = '?' # Find information about object
+ESC_HELP2 = '??' # Find extra-detailed information about object
+ESC_MAGIC = '%' # Call magic function
+ESC_MAGIC2 = '%%' # Call cell-magic function
+ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
+ESC_QUOTE2 = ';' # Quote all args as a single string, call
+ESC_PAREN = '/' # Call first argument with rest of line as arguments
+
+ESC_SEQUENCES = [ESC_SHELL, ESC_SH_CAP, ESC_HELP ,\
+ ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,\
+ ESC_QUOTE, ESC_QUOTE2, ESC_PAREN ]
+
+
+class InputTransformer(with_metaclass(abc.ABCMeta, object)):
+ """Abstract base class for line-based input transformers."""
+
+ @abc.abstractmethod
+ def push(self, line):
+ """Send a line of input to the transformer, returning the transformed
+ input or None if the transformer is waiting for more input.
+
+ Must be overridden by subclasses.
+
+ Implementations may raise ``SyntaxError`` if the input is invalid. No
+ other exceptions may be raised.
+ """
+ pass
+
+ @abc.abstractmethod
+ def reset(self):
+ """Return, transformed any lines that the transformer has accumulated,
+ and reset its internal state.
+
+ Must be overridden by subclasses.
+ """
+ pass
+
+ @classmethod
+ def wrap(cls, func):
+ """Can be used by subclasses as a decorator, to return a factory that
+ will allow instantiation with the decorated object.
+ """
+ @functools.wraps(func)
+ def transformer_factory(**kwargs):
+ return cls(func, **kwargs)
+
+ return transformer_factory
+
+class StatelessInputTransformer(InputTransformer):
+ """Wrapper for a stateless input transformer implemented as a function."""
+ def __init__(self, func):
+ self.func = func
+
+ def __repr__(self):
+ return "StatelessInputTransformer(func={0!r})".format(self.func)
+
+ def push(self, line):
+ """Send a line of input to the transformer, returning the
+ transformed input."""
+ return self.func(line)
+
+ def reset(self):
+ """No-op - exists for compatibility."""
+ pass
+
+class CoroutineInputTransformer(InputTransformer):
+ """Wrapper for an input transformer implemented as a coroutine."""
+ def __init__(self, coro, **kwargs):
+ # Prime it
+ self.coro = coro(**kwargs)
+ next(self.coro)
+
+ def __repr__(self):
+ return "CoroutineInputTransformer(coro={0!r})".format(self.coro)
+
+ def push(self, line):
+ """Send a line of input to the transformer, returning the
+ transformed input or None if the transformer is waiting for more
+ input.
+ """
+ return self.coro.send(line)
+
+ def reset(self):
+ """Return, transformed any lines that the transformer has
+ accumulated, and reset its internal state.
+ """
+ return self.coro.send(None)
+
+class TokenInputTransformer(InputTransformer):
+ """Wrapper for a token-based input transformer.
+
+ func should accept a list of tokens (5-tuples, see tokenize docs), and
+ return an iterable which can be passed to tokenize.untokenize().
+ """
+ def __init__(self, func):
+ self.func = func
self.buf = []
- self.reset_tokenizer()
+ self.reset_tokenizer()
- def reset_tokenizer(self):
+ def reset_tokenizer(self):
it = iter(self.buf)
nxt = it.__next__ if PY3 else it.next
self.tokenizer = generate_tokens(nxt)
- def push(self, line):
+ def push(self, line):
self.buf.append(line + '\n')
if all(l.isspace() for l in self.buf):
- return self.reset()
-
- tokens = []
- stop_at_NL = False
- try:
- for intok in self.tokenizer:
- tokens.append(intok)
- t = intok[0]
- if t == tokenize2.NEWLINE or (stop_at_NL and t == tokenize2.NL):
- # Stop before we try to pull a line we don't have yet
- break
- elif t == tokenize2.ERRORTOKEN:
- stop_at_NL = True
- except TokenError:
- # Multi-line statement - stop and try again with the next line
- self.reset_tokenizer()
- return None
-
- return self.output(tokens)
-
- def output(self, tokens):
+ return self.reset()
+
+ tokens = []
+ stop_at_NL = False
+ try:
+ for intok in self.tokenizer:
+ tokens.append(intok)
+ t = intok[0]
+ if t == tokenize2.NEWLINE or (stop_at_NL and t == tokenize2.NL):
+ # Stop before we try to pull a line we don't have yet
+ break
+ elif t == tokenize2.ERRORTOKEN:
+ stop_at_NL = True
+ except TokenError:
+ # Multi-line statement - stop and try again with the next line
+ self.reset_tokenizer()
+ return None
+
+ return self.output(tokens)
+
+ def output(self, tokens):
self.buf[:] = []
- self.reset_tokenizer()
- return untokenize(self.func(tokens)).rstrip('\n')
-
- def reset(self):
+ self.reset_tokenizer()
+ return untokenize(self.func(tokens)).rstrip('\n')
+
+ def reset(self):
l = ''.join(self.buf)
self.buf[:] = []
- self.reset_tokenizer()
- if l:
- return l.rstrip('\n')
-
-class assemble_python_lines(TokenInputTransformer):
- def __init__(self):
- super(assemble_python_lines, self).__init__(None)
-
- def output(self, tokens):
- return self.reset()
-
-@CoroutineInputTransformer.wrap
-def assemble_logical_lines():
- """Join lines following explicit line continuations (\)"""
- line = ''
- while True:
- line = (yield line)
- if not line or line.isspace():
- continue
-
- parts = []
- while line is not None:
- if line.endswith('\\') and (not has_comment(line)):
- parts.append(line[:-1])
- line = (yield None) # Get another line
- else:
- parts.append(line)
- break
-
- # Output
- line = ''.join(parts)
-
-# Utilities
-def _make_help_call(target, esc, lspace, next_input=None):
- """Prepares a pinfo(2)/psearch call from a target name and the escape
- (i.e. ? or ??)"""
- method = 'pinfo2' if esc == '??' \
- else 'psearch' if '*' in target \
- else 'pinfo'
- arg = " ".join([method, target])
- if next_input is None:
- return '%sget_ipython().magic(%r)' % (lspace, arg)
- else:
- return '%sget_ipython().set_next_input(%r);get_ipython().magic(%r)' % \
- (lspace, next_input, arg)
-
-# These define the transformations for the different escape characters.
-def _tr_system(line_info):
- "Translate lines escaped with: !"
- cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
- return '%sget_ipython().system(%r)' % (line_info.pre, cmd)
-
-def _tr_system2(line_info):
- "Translate lines escaped with: !!"
- cmd = line_info.line.lstrip()[2:]
- return '%sget_ipython().getoutput(%r)' % (line_info.pre, cmd)
-
-def _tr_help(line_info):
- "Translate lines escaped with: ?/??"
- # A naked help line should just fire the intro help screen
- if not line_info.line[1:]:
- return 'get_ipython().show_usage()'
-
- return _make_help_call(line_info.ifun, line_info.esc, line_info.pre)
-
-def _tr_magic(line_info):
- "Translate lines escaped with: %"
- tpl = '%sget_ipython().magic(%r)'
- if line_info.line.startswith(ESC_MAGIC2):
- return line_info.line
- cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
- return tpl % (line_info.pre, cmd)
-
-def _tr_quote(line_info):
- "Translate lines escaped with: ,"
- return '%s%s("%s")' % (line_info.pre, line_info.ifun,
- '", "'.join(line_info.the_rest.split()) )
-
-def _tr_quote2(line_info):
- "Translate lines escaped with: ;"
- return '%s%s("%s")' % (line_info.pre, line_info.ifun,
- line_info.the_rest)
-
-def _tr_paren(line_info):
- "Translate lines escaped with: /"
- return '%s%s(%s)' % (line_info.pre, line_info.ifun,
- ", ".join(line_info.the_rest.split()))
-
-tr = { ESC_SHELL : _tr_system,
- ESC_SH_CAP : _tr_system2,
- ESC_HELP : _tr_help,
- ESC_HELP2 : _tr_help,
- ESC_MAGIC : _tr_magic,
- ESC_QUOTE : _tr_quote,
- ESC_QUOTE2 : _tr_quote2,
- ESC_PAREN : _tr_paren }
-
-@StatelessInputTransformer.wrap
-def escaped_commands(line):
- """Transform escaped commands - %magic, !system, ?help + various autocalls.
- """
- if not line or line.isspace():
- return line
- lineinf = LineInfo(line)
- if lineinf.esc not in tr:
- return line
-
- return tr[lineinf.esc](lineinf)
-
-_initial_space_re = re.compile(r'\s*')
-
-_help_end_re = re.compile(r"""(%{0,2}
- [a-zA-Z_*][\w*]* # Variable name
- (\.[a-zA-Z_*][\w*]*)* # .etc.etc
- )
- (\?\??)$ # ? or ??
- """,
- re.VERBOSE)
-
-# Extra pseudotokens for multiline strings and data structures
-_MULTILINE_STRING = object()
-_MULTILINE_STRUCTURE = object()
-
-def _line_tokens(line):
- """Helper for has_comment and ends_in_comment_or_string."""
- readline = StringIO(line).readline
- toktypes = set()
- try:
- for t in generate_tokens(readline):
- toktypes.add(t[0])
- except TokenError as e:
- # There are only two cases where a TokenError is raised.
- if 'multi-line string' in e.args[0]:
- toktypes.add(_MULTILINE_STRING)
- else:
- toktypes.add(_MULTILINE_STRUCTURE)
- return toktypes
-
-def has_comment(src):
- """Indicate whether an input line has (i.e. ends in, or is) a comment.
-
- This uses tokenize, so it can distinguish comments from # inside strings.
-
- Parameters
- ----------
- src : string
- A single line input string.
-
- Returns
- -------
- comment : bool
- True if source has a comment.
- """
- return (tokenize2.COMMENT in _line_tokens(src))
-
-def ends_in_comment_or_string(src):
- """Indicates whether or not an input line ends in a comment or within
- a multiline string.
-
- Parameters
- ----------
- src : string
- A single line input string.
-
- Returns
- -------
- comment : bool
- True if source ends in a comment or multiline string.
- """
- toktypes = _line_tokens(src)
- return (tokenize2.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
-
-
-@StatelessInputTransformer.wrap
-def help_end(line):
- """Translate lines with ?/?? at the end"""
- m = _help_end_re.search(line)
- if m is None or ends_in_comment_or_string(line):
- return line
- target = m.group(1)
- esc = m.group(3)
- lspace = _initial_space_re.match(line).group(0)
-
- # If we're mid-command, put it back on the next prompt for the user.
- next_input = line.rstrip('?') if line.strip() != m.group(0) else None
-
- return _make_help_call(target, esc, lspace, next_input)
-
-
-@CoroutineInputTransformer.wrap
-def cellmagic(end_on_blank_line=False):
- """Captures & transforms cell magics.
-
- After a cell magic is started, this stores up any lines it gets until it is
- reset (sent None).
- """
- tpl = 'get_ipython().run_cell_magic(%r, %r, %r)'
- cellmagic_help_re = re.compile('%%\w+\?')
- line = ''
- while True:
- line = (yield line)
- # consume leading empty lines
- while not line:
- line = (yield line)
-
- if not line.startswith(ESC_MAGIC2):
- # This isn't a cell magic, idle waiting for reset then start over
- while line is not None:
- line = (yield line)
- continue
-
- if cellmagic_help_re.match(line):
- # This case will be handled by help_end
- continue
-
- first = line
- body = []
- line = (yield None)
- while (line is not None) and \
- ((line.strip() != '') or not end_on_blank_line):
- body.append(line)
- line = (yield None)
-
- # Output
- magic_name, _, first = first.partition(' ')
- magic_name = magic_name.lstrip(ESC_MAGIC2)
- line = tpl % (magic_name, first, u'\n'.join(body))
-
-
-def _strip_prompts(prompt_re, initial_re=None, turnoff_re=None):
- """Remove matching input prompts from a block of input.
-
- Parameters
- ----------
- prompt_re : regular expression
- A regular expression matching any input prompt (including continuation)
- initial_re : regular expression, optional
- A regular expression matching only the initial prompt, but not continuation.
- If no initial expression is given, prompt_re will be used everywhere.
- Used mainly for plain Python prompts, where the continuation prompt
- ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
-
- If initial_re and prompt_re differ,
- only initial_re will be tested against the first line.
- If any prompt is found on the first two lines,
- prompts will be stripped from the rest of the block.
- """
- if initial_re is None:
- initial_re = prompt_re
- line = ''
- while True:
- line = (yield line)
-
- # First line of cell
- if line is None:
- continue
- out, n1 = initial_re.subn('', line, count=1)
- if turnoff_re and not n1:
- if turnoff_re.match(line):
- # We're in e.g. a cell magic; disable this transformer for
- # the rest of the cell.
- while line is not None:
- line = (yield line)
- continue
-
- line = (yield out)
-
- if line is None:
- continue
- # check for any prompt on the second line of the cell,
- # because people often copy from just after the first prompt,
- # so we might not see it in the first line.
- out, n2 = prompt_re.subn('', line, count=1)
- line = (yield out)
-
- if n1 or n2:
- # Found a prompt in the first two lines - check for it in
- # the rest of the cell as well.
- while line is not None:
- line = (yield prompt_re.sub('', line, count=1))
-
- else:
- # Prompts not in input - wait for reset
- while line is not None:
- line = (yield line)
-
-@CoroutineInputTransformer.wrap
-def classic_prompt():
- """Strip the >>>/... prompts of the Python interactive shell."""
- # FIXME: non-capturing version (?:...) usable?
- prompt_re = re.compile(r'^(>>>|\.\.\.)( |$)')
- initial_re = re.compile(r'^>>>( |$)')
- # Any %magic/!system is IPython syntax, so we needn't look for >>> prompts
- turnoff_re = re.compile(r'^[%!]')
- return _strip_prompts(prompt_re, initial_re, turnoff_re)
-
-@CoroutineInputTransformer.wrap
-def ipy_prompt():
- """Strip IPython's In [1]:/...: prompts."""
- # FIXME: non-capturing version (?:...) usable?
- prompt_re = re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)')
- # Disable prompt stripping inside cell magics
- turnoff_re = re.compile(r'^%%')
- return _strip_prompts(prompt_re, turnoff_re=turnoff_re)
-
-
-@CoroutineInputTransformer.wrap
-def leading_indent():
- """Remove leading indentation.
-
- If the first line starts with a spaces or tabs, the same whitespace will be
- removed from each following line until it is reset.
- """
- space_re = re.compile(r'^[ \t]+')
- line = ''
- while True:
- line = (yield line)
-
- if line is None:
- continue
-
- m = space_re.match(line)
- if m:
- space = m.group(0)
- while line is not None:
- if line.startswith(space):
- line = line[len(space):]
- line = (yield line)
- else:
- # No leading spaces - wait for reset
- while line is not None:
- line = (yield line)
-
-
-@CoroutineInputTransformer.wrap
-def strip_encoding_cookie():
- """Remove encoding comment if found in first two lines
-
- If the first or second line has the `# coding: utf-8` comment,
- it will be removed.
- """
- line = ''
- while True:
- line = (yield line)
- # check comment on first two lines
- for i in range(2):
- if line is None:
- break
- if cookie_comment_re.match(line):
- line = (yield "")
- else:
- line = (yield line)
-
- # no-op on the rest of the cell
- while line is not None:
- line = (yield line)
-
-_assign_pat = \
-r'''(?P<lhs>(\s*)
- ([\w\.]+) # Initial identifier
- (\s*,\s*
- \*?[\w\.]+)* # Further identifiers for unpacking
- \s*?,? # Trailing comma
- )
- \s*=\s*
-'''
-
-assign_system_re = re.compile(r'{}!\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
-assign_system_template = '%s = get_ipython().getoutput(%r)'
-@StatelessInputTransformer.wrap
-def assign_from_system(line):
- """Transform assignment from system commands (e.g. files = !ls)"""
- m = assign_system_re.match(line)
- if m is None:
- return line
-
- return assign_system_template % m.group('lhs', 'cmd')
-
-assign_magic_re = re.compile(r'{}%\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
-assign_magic_template = '%s = get_ipython().magic(%r)'
-@StatelessInputTransformer.wrap
-def assign_from_magic(line):
- """Transform assignment from magic commands (e.g. a = %who_ls)"""
- m = assign_magic_re.match(line)
- if m is None:
- return line
-
- return assign_magic_template % m.group('lhs', 'cmd')
+ self.reset_tokenizer()
+ if l:
+ return l.rstrip('\n')
+
+class assemble_python_lines(TokenInputTransformer):
+ def __init__(self):
+ super(assemble_python_lines, self).__init__(None)
+
+ def output(self, tokens):
+ return self.reset()
+
+@CoroutineInputTransformer.wrap
+def assemble_logical_lines():
+ """Join lines following explicit line continuations (\)"""
+ line = ''
+ while True:
+ line = (yield line)
+ if not line or line.isspace():
+ continue
+
+ parts = []
+ while line is not None:
+ if line.endswith('\\') and (not has_comment(line)):
+ parts.append(line[:-1])
+ line = (yield None) # Get another line
+ else:
+ parts.append(line)
+ break
+
+ # Output
+ line = ''.join(parts)
+
+# Utilities
+def _make_help_call(target, esc, lspace, next_input=None):
+ """Prepares a pinfo(2)/psearch call from a target name and the escape
+ (i.e. ? or ??)"""
+ method = 'pinfo2' if esc == '??' \
+ else 'psearch' if '*' in target \
+ else 'pinfo'
+ arg = " ".join([method, target])
+ if next_input is None:
+ return '%sget_ipython().magic(%r)' % (lspace, arg)
+ else:
+ return '%sget_ipython().set_next_input(%r);get_ipython().magic(%r)' % \
+ (lspace, next_input, arg)
+
+# These define the transformations for the different escape characters.
+def _tr_system(line_info):
+ "Translate lines escaped with: !"
+ cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
+ return '%sget_ipython().system(%r)' % (line_info.pre, cmd)
+
+def _tr_system2(line_info):
+ "Translate lines escaped with: !!"
+ cmd = line_info.line.lstrip()[2:]
+ return '%sget_ipython().getoutput(%r)' % (line_info.pre, cmd)
+
+def _tr_help(line_info):
+ "Translate lines escaped with: ?/??"
+ # A naked help line should just fire the intro help screen
+ if not line_info.line[1:]:
+ return 'get_ipython().show_usage()'
+
+ return _make_help_call(line_info.ifun, line_info.esc, line_info.pre)
+
+def _tr_magic(line_info):
+ "Translate lines escaped with: %"
+ tpl = '%sget_ipython().magic(%r)'
+ if line_info.line.startswith(ESC_MAGIC2):
+ return line_info.line
+ cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
+ return tpl % (line_info.pre, cmd)
+
+def _tr_quote(line_info):
+ "Translate lines escaped with: ,"
+ return '%s%s("%s")' % (line_info.pre, line_info.ifun,
+ '", "'.join(line_info.the_rest.split()) )
+
+def _tr_quote2(line_info):
+ "Translate lines escaped with: ;"
+ return '%s%s("%s")' % (line_info.pre, line_info.ifun,
+ line_info.the_rest)
+
+def _tr_paren(line_info):
+ "Translate lines escaped with: /"
+ return '%s%s(%s)' % (line_info.pre, line_info.ifun,
+ ", ".join(line_info.the_rest.split()))
+
+tr = { ESC_SHELL : _tr_system,
+ ESC_SH_CAP : _tr_system2,
+ ESC_HELP : _tr_help,
+ ESC_HELP2 : _tr_help,
+ ESC_MAGIC : _tr_magic,
+ ESC_QUOTE : _tr_quote,
+ ESC_QUOTE2 : _tr_quote2,
+ ESC_PAREN : _tr_paren }
+
+@StatelessInputTransformer.wrap
+def escaped_commands(line):
+ """Transform escaped commands - %magic, !system, ?help + various autocalls.
+ """
+ if not line or line.isspace():
+ return line
+ lineinf = LineInfo(line)
+ if lineinf.esc not in tr:
+ return line
+
+ return tr[lineinf.esc](lineinf)
+
+_initial_space_re = re.compile(r'\s*')
+
+_help_end_re = re.compile(r"""(%{0,2}
+ [a-zA-Z_*][\w*]* # Variable name
+ (\.[a-zA-Z_*][\w*]*)* # .etc.etc
+ )
+ (\?\??)$ # ? or ??
+ """,
+ re.VERBOSE)
+
+# Extra pseudotokens for multiline strings and data structures
+_MULTILINE_STRING = object()
+_MULTILINE_STRUCTURE = object()
+
+def _line_tokens(line):
+ """Helper for has_comment and ends_in_comment_or_string."""
+ readline = StringIO(line).readline
+ toktypes = set()
+ try:
+ for t in generate_tokens(readline):
+ toktypes.add(t[0])
+ except TokenError as e:
+ # There are only two cases where a TokenError is raised.
+ if 'multi-line string' in e.args[0]:
+ toktypes.add(_MULTILINE_STRING)
+ else:
+ toktypes.add(_MULTILINE_STRUCTURE)
+ return toktypes
+
+def has_comment(src):
+ """Indicate whether an input line has (i.e. ends in, or is) a comment.
+
+ This uses tokenize, so it can distinguish comments from # inside strings.
+
+ Parameters
+ ----------
+ src : string
+ A single line input string.
+
+ Returns
+ -------
+ comment : bool
+ True if source has a comment.
+ """
+ return (tokenize2.COMMENT in _line_tokens(src))
+
+def ends_in_comment_or_string(src):
+ """Indicates whether or not an input line ends in a comment or within
+ a multiline string.
+
+ Parameters
+ ----------
+ src : string
+ A single line input string.
+
+ Returns
+ -------
+ comment : bool
+ True if source ends in a comment or multiline string.
+ """
+ toktypes = _line_tokens(src)
+ return (tokenize2.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
+
+
+@StatelessInputTransformer.wrap
+def help_end(line):
+ """Translate lines with ?/?? at the end"""
+ m = _help_end_re.search(line)
+ if m is None or ends_in_comment_or_string(line):
+ return line
+ target = m.group(1)
+ esc = m.group(3)
+ lspace = _initial_space_re.match(line).group(0)
+
+ # If we're mid-command, put it back on the next prompt for the user.
+ next_input = line.rstrip('?') if line.strip() != m.group(0) else None
+
+ return _make_help_call(target, esc, lspace, next_input)
+
+
+@CoroutineInputTransformer.wrap
+def cellmagic(end_on_blank_line=False):
+ """Captures & transforms cell magics.
+
+ After a cell magic is started, this stores up any lines it gets until it is
+ reset (sent None).
+ """
+ tpl = 'get_ipython().run_cell_magic(%r, %r, %r)'
+ cellmagic_help_re = re.compile('%%\w+\?')
+ line = ''
+ while True:
+ line = (yield line)
+ # consume leading empty lines
+ while not line:
+ line = (yield line)
+
+ if not line.startswith(ESC_MAGIC2):
+ # This isn't a cell magic, idle waiting for reset then start over
+ while line is not None:
+ line = (yield line)
+ continue
+
+ if cellmagic_help_re.match(line):
+ # This case will be handled by help_end
+ continue
+
+ first = line
+ body = []
+ line = (yield None)
+ while (line is not None) and \
+ ((line.strip() != '') or not end_on_blank_line):
+ body.append(line)
+ line = (yield None)
+
+ # Output
+ magic_name, _, first = first.partition(' ')
+ magic_name = magic_name.lstrip(ESC_MAGIC2)
+ line = tpl % (magic_name, first, u'\n'.join(body))
+
+
+def _strip_prompts(prompt_re, initial_re=None, turnoff_re=None):
+ """Remove matching input prompts from a block of input.
+
+ Parameters
+ ----------
+ prompt_re : regular expression
+ A regular expression matching any input prompt (including continuation)
+ initial_re : regular expression, optional
+ A regular expression matching only the initial prompt, but not continuation.
+ If no initial expression is given, prompt_re will be used everywhere.
+ Used mainly for plain Python prompts, where the continuation prompt
+ ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
+
+ If initial_re and prompt_re differ,
+ only initial_re will be tested against the first line.
+ If any prompt is found on the first two lines,
+ prompts will be stripped from the rest of the block.
+ """
+ if initial_re is None:
+ initial_re = prompt_re
+ line = ''
+ while True:
+ line = (yield line)
+
+ # First line of cell
+ if line is None:
+ continue
+ out, n1 = initial_re.subn('', line, count=1)
+ if turnoff_re and not n1:
+ if turnoff_re.match(line):
+ # We're in e.g. a cell magic; disable this transformer for
+ # the rest of the cell.
+ while line is not None:
+ line = (yield line)
+ continue
+
+ line = (yield out)
+
+ if line is None:
+ continue
+ # check for any prompt on the second line of the cell,
+ # because people often copy from just after the first prompt,
+ # so we might not see it in the first line.
+ out, n2 = prompt_re.subn('', line, count=1)
+ line = (yield out)
+
+ if n1 or n2:
+ # Found a prompt in the first two lines - check for it in
+ # the rest of the cell as well.
+ while line is not None:
+ line = (yield prompt_re.sub('', line, count=1))
+
+ else:
+ # Prompts not in input - wait for reset
+ while line is not None:
+ line = (yield line)
+
+@CoroutineInputTransformer.wrap
+def classic_prompt():
+ """Strip the >>>/... prompts of the Python interactive shell."""
+ # FIXME: non-capturing version (?:...) usable?
+ prompt_re = re.compile(r'^(>>>|\.\.\.)( |$)')
+ initial_re = re.compile(r'^>>>( |$)')
+ # Any %magic/!system is IPython syntax, so we needn't look for >>> prompts
+ turnoff_re = re.compile(r'^[%!]')
+ return _strip_prompts(prompt_re, initial_re, turnoff_re)
+
+@CoroutineInputTransformer.wrap
+def ipy_prompt():
+ """Strip IPython's In [1]:/...: prompts."""
+ # FIXME: non-capturing version (?:...) usable?
+ prompt_re = re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)')
+ # Disable prompt stripping inside cell magics
+ turnoff_re = re.compile(r'^%%')
+ return _strip_prompts(prompt_re, turnoff_re=turnoff_re)
+
+
+@CoroutineInputTransformer.wrap
+def leading_indent():
+ """Remove leading indentation.
+
+ If the first line starts with a spaces or tabs, the same whitespace will be
+ removed from each following line until it is reset.
+ """
+ space_re = re.compile(r'^[ \t]+')
+ line = ''
+ while True:
+ line = (yield line)
+
+ if line is None:
+ continue
+
+ m = space_re.match(line)
+ if m:
+ space = m.group(0)
+ while line is not None:
+ if line.startswith(space):
+ line = line[len(space):]
+ line = (yield line)
+ else:
+ # No leading spaces - wait for reset
+ while line is not None:
+ line = (yield line)
+
+
+@CoroutineInputTransformer.wrap
+def strip_encoding_cookie():
+ """Remove encoding comment if found in first two lines
+
+ If the first or second line has the `# coding: utf-8` comment,
+ it will be removed.
+ """
+ line = ''
+ while True:
+ line = (yield line)
+ # check comment on first two lines
+ for i in range(2):
+ if line is None:
+ break
+ if cookie_comment_re.match(line):
+ line = (yield "")
+ else:
+ line = (yield line)
+
+ # no-op on the rest of the cell
+ while line is not None:
+ line = (yield line)
+
+_assign_pat = \
+r'''(?P<lhs>(\s*)
+ ([\w\.]+) # Initial identifier
+ (\s*,\s*
+ \*?[\w\.]+)* # Further identifiers for unpacking
+ \s*?,? # Trailing comma
+ )
+ \s*=\s*
+'''
+
+assign_system_re = re.compile(r'{}!\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
+assign_system_template = '%s = get_ipython().getoutput(%r)'
+@StatelessInputTransformer.wrap
+def assign_from_system(line):
+ """Transform assignment from system commands (e.g. files = !ls)"""
+ m = assign_system_re.match(line)
+ if m is None:
+ return line
+
+ return assign_system_template % m.group('lhs', 'cmd')
+
+assign_magic_re = re.compile(r'{}%\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
+assign_magic_template = '%s = get_ipython().magic(%r)'
+@StatelessInputTransformer.wrap
+def assign_from_magic(line):
+ """Transform assignment from magic commands (e.g. a = %who_ls)"""
+ m = assign_magic_re.match(line)
+ if m is None:
+ return line
+
+ return assign_magic_template % m.group('lhs', 'cmd')
diff --git a/contrib/python/ipython/py2/IPython/core/interactiveshell.py b/contrib/python/ipython/py2/IPython/core/interactiveshell.py
index ba96cb0676..ad8824b606 100644
--- a/contrib/python/ipython/py2/IPython/core/interactiveshell.py
+++ b/contrib/python/ipython/py2/IPython/core/interactiveshell.py
@@ -1,78 +1,78 @@
-# -*- coding: utf-8 -*-
-"""Main IPython class."""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
-# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-from __future__ import absolute_import, print_function
-
-import __future__
-import abc
-import ast
-import atexit
-import functools
-import os
-import re
-import runpy
-import sys
-import tempfile
-import traceback
-import types
-import subprocess
-import warnings
-from io import open as io_open
-
-from pickleshare import PickleShareDB
-
-from traitlets.config.configurable import SingletonConfigurable
+# -*- coding: utf-8 -*-
+"""Main IPython class."""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
+# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+from __future__ import absolute_import, print_function
+
+import __future__
+import abc
+import ast
+import atexit
+import functools
+import os
+import re
+import runpy
+import sys
+import tempfile
+import traceback
+import types
+import subprocess
+import warnings
+from io import open as io_open
+
+from pickleshare import PickleShareDB
+
+from traitlets.config.configurable import SingletonConfigurable
from IPython.core import oinspect
-from IPython.core import magic
-from IPython.core import page
-from IPython.core import prefilter
-from IPython.core import shadowns
-from IPython.core import ultratb
-from IPython.core.alias import Alias, AliasManager
-from IPython.core.autocall import ExitAutocall
-from IPython.core.builtin_trap import BuiltinTrap
-from IPython.core.events import EventManager, available_events
-from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
+from IPython.core import magic
+from IPython.core import page
+from IPython.core import prefilter
+from IPython.core import shadowns
+from IPython.core import ultratb
+from IPython.core.alias import Alias, AliasManager
+from IPython.core.autocall import ExitAutocall
+from IPython.core.builtin_trap import BuiltinTrap
+from IPython.core.events import EventManager, available_events
+from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.debugger import Pdb
-from IPython.core.display_trap import DisplayTrap
-from IPython.core.displayhook import DisplayHook
-from IPython.core.displaypub import DisplayPublisher
-from IPython.core.error import InputRejected, UsageError
-from IPython.core.extensions import ExtensionManager
-from IPython.core.formatters import DisplayFormatter
-from IPython.core.history import HistoryManager
+from IPython.core.display_trap import DisplayTrap
+from IPython.core.displayhook import DisplayHook
+from IPython.core.displaypub import DisplayPublisher
+from IPython.core.error import InputRejected, UsageError
+from IPython.core.extensions import ExtensionManager
+from IPython.core.formatters import DisplayFormatter
+from IPython.core.history import HistoryManager
from IPython.core.inputsplitter import ESC_MAGIC, ESC_MAGIC2
-from IPython.core.logger import Logger
-from IPython.core.macro import Macro
-from IPython.core.payload import PayloadManager
-from IPython.core.prefilter import PrefilterManager
-from IPython.core.profiledir import ProfileDir
-from IPython.core.usage import default_banner
+from IPython.core.logger import Logger
+from IPython.core.macro import Macro
+from IPython.core.payload import PayloadManager
+from IPython.core.prefilter import PrefilterManager
+from IPython.core.profiledir import ProfileDir
+from IPython.core.usage import default_banner
from IPython.testing.skipdoctest import skip_doctest_py2, skip_doctest
from IPython.display import display
-from IPython.utils import PyColorize
-from IPython.utils import io
-from IPython.utils import py3compat
-from IPython.utils import openpy
-from IPython.utils.decorators import undoc
-from IPython.utils.io import ask_yes_no
-from IPython.utils.ipstruct import Struct
-from IPython.paths import get_ipython_dir
+from IPython.utils import PyColorize
+from IPython.utils import io
+from IPython.utils import py3compat
+from IPython.utils import openpy
+from IPython.utils.decorators import undoc
+from IPython.utils.io import ask_yes_no
+from IPython.utils.ipstruct import Struct
+from IPython.paths import get_ipython_dir
from IPython.utils.path import get_home_dir, get_py_filename, ensure_dir_exists
-from IPython.utils.process import system, getoutput
-from IPython.utils.py3compat import (builtin_mod, unicode_type, string_types,
- with_metaclass, iteritems)
-from IPython.utils.strdispatch import StrDispatch
-from IPython.utils.syspathcontext import prepended_to_syspath
+from IPython.utils.process import system, getoutput
+from IPython.utils.py3compat import (builtin_mod, unicode_type, string_types,
+ with_metaclass, iteritems)
+from IPython.utils.strdispatch import StrDispatch
+from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import format_screen, LSString, SList, DollarFormatter
from IPython.utils.tempdir import TemporaryDirectory
from traitlets import (
@@ -81,8 +81,8 @@ from traitlets import (
)
from warnings import warn
from logging import error
-import IPython.core.hooks
-
+import IPython.core.hooks
+
# NoOpContext is deprecated, but ipykernel imports it from here.
# See https://github.com/ipython/ipykernel/issues/157
from IPython.utils.contexts import NoOpContext
@@ -106,87 +106,87 @@ class ProvisionalWarning(DeprecationWarning):
"""
pass
-#-----------------------------------------------------------------------------
-# Globals
-#-----------------------------------------------------------------------------
-
-# compiled regexps for autoindent management
-dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
-
-#-----------------------------------------------------------------------------
-# Utilities
-#-----------------------------------------------------------------------------
-
-@undoc
-def softspace(file, newvalue):
- """Copied from code.py, to remove the dependency"""
-
- oldvalue = 0
- try:
- oldvalue = file.softspace
- except AttributeError:
- pass
- try:
- file.softspace = newvalue
- except (AttributeError, TypeError):
- # "attribute-less object" or "read-only attributes"
- pass
- return oldvalue
-
-@undoc
-def no_op(*a, **kw): pass
-
-
-class SpaceInInput(Exception): pass
-
-
-def get_default_colors():
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# compiled regexps for autoindent management
+dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
+@undoc
+def softspace(file, newvalue):
+ """Copied from code.py, to remove the dependency"""
+
+ oldvalue = 0
+ try:
+ oldvalue = file.softspace
+ except AttributeError:
+ pass
+ try:
+ file.softspace = newvalue
+ except (AttributeError, TypeError):
+ # "attribute-less object" or "read-only attributes"
+ pass
+ return oldvalue
+
+@undoc
+def no_op(*a, **kw): pass
+
+
+class SpaceInInput(Exception): pass
+
+
+def get_default_colors():
"DEPRECATED"
warn('get_default_color is Deprecated, and is `Neutral` on all platforms.',
DeprecationWarning, stacklevel=2)
return 'Neutral'
-
-
-class SeparateUnicode(Unicode):
- r"""A Unicode subclass to validate separate_in, separate_out, etc.
-
- This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
- """
-
- def validate(self, obj, value):
- if value == '0': value = ''
- value = value.replace('\\n','\n')
- return super(SeparateUnicode, self).validate(obj, value)
-
-
-@undoc
-class DummyMod(object):
- """A dummy module used for IPython's interactive module when
- a namespace must be assigned to the module's __dict__."""
- pass
-
-
-class ExecutionResult(object):
- """The result of a call to :meth:`InteractiveShell.run_cell`
-
- Stores information about what took place.
- """
- execution_count = None
- error_before_exec = None
- error_in_exec = None
- result = None
-
- @property
- def success(self):
- return (self.error_before_exec is None) and (self.error_in_exec is None)
-
- def raise_error(self):
- """Reraises error if `success` is `False`, otherwise does nothing"""
- if self.error_before_exec is not None:
- raise self.error_before_exec
- if self.error_in_exec is not None:
- raise self.error_in_exec
-
+
+
+class SeparateUnicode(Unicode):
+ r"""A Unicode subclass to validate separate_in, separate_out, etc.
+
+ This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
+ """
+
+ def validate(self, obj, value):
+ if value == '0': value = ''
+ value = value.replace('\\n','\n')
+ return super(SeparateUnicode, self).validate(obj, value)
+
+
+@undoc
+class DummyMod(object):
+ """A dummy module used for IPython's interactive module when
+ a namespace must be assigned to the module's __dict__."""
+ pass
+
+
+class ExecutionResult(object):
+ """The result of a call to :meth:`InteractiveShell.run_cell`
+
+ Stores information about what took place.
+ """
+ execution_count = None
+ error_before_exec = None
+ error_in_exec = None
+ result = None
+
+ @property
+ def success(self):
+ return (self.error_before_exec is None) and (self.error_in_exec is None)
+
+ def raise_error(self):
+ """Reraises error if `success` is `False`, otherwise does nothing"""
+ if self.error_before_exec is not None:
+ raise self.error_before_exec
+ if self.error_in_exec is not None:
+ raise self.error_in_exec
+
def __repr__(self):
if sys.version_info > (3,):
name = self.__class__.__qualname__
@@ -194,67 +194,67 @@ class ExecutionResult(object):
name = self.__class__.__name__
return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s result=%s>' %\
(name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.result))
-
-class InteractiveShell(SingletonConfigurable):
- """An enhanced, interactive shell for Python."""
-
- _instance = None
+
+class InteractiveShell(SingletonConfigurable):
+ """An enhanced, interactive shell for Python."""
+
+ _instance = None
ast_transformers = List([], help=
- """
- A list of ast.NodeTransformer subclass instances, which will be applied
- to user input before code is run.
- """
+ """
+ A list of ast.NodeTransformer subclass instances, which will be applied
+ to user input before code is run.
+ """
).tag(config=True)
-
+
autocall = Enum((0,1,2), default_value=0, help=
- """
- Make IPython automatically call any callable object even if you didn't
- type explicit parentheses. For example, 'str 43' becomes 'str(43)'
- automatically. The value can be '0' to disable the feature, '1' for
- 'smart' autocall, where it is not applied if there are no more
- arguments on the line, and '2' for 'full' autocall, where all callable
- objects are automatically called (even if no arguments are present).
- """
+ """
+ Make IPython automatically call any callable object even if you didn't
+ type explicit parentheses. For example, 'str 43' becomes 'str(43)'
+ automatically. The value can be '0' to disable the feature, '1' for
+ 'smart' autocall, where it is not applied if there are no more
+ arguments on the line, and '2' for 'full' autocall, where all callable
+ objects are automatically called (even if no arguments are present).
+ """
).tag(config=True)
- # TODO: remove all autoindent logic and put into frontends.
- # We can't do this yet because even runlines uses the autoindent.
+ # TODO: remove all autoindent logic and put into frontends.
+ # We can't do this yet because even runlines uses the autoindent.
autoindent = Bool(True, help=
- """
- Autoindent IPython code entered interactively.
- """
+ """
+ Autoindent IPython code entered interactively.
+ """
).tag(config=True)
automagic = Bool(True, help=
- """
- Enable magic commands to be called without the leading %.
- """
+ """
+ Enable magic commands to be called without the leading %.
+ """
).tag(config=True)
banner1 = Unicode(default_banner,
- help="""The part of the banner to be printed before the profile"""
+ help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
- help="""The part of the banner to be printed after the profile"""
+ help="""The part of the banner to be printed after the profile"""
).tag(config=True)
-
+
cache_size = Integer(1000, help=
- """
- Set the size of the output cache. The default is 1000, you can
- change it permanently in your config file. Setting it to 0 completely
- disables the caching system, and the minimum value accepted is 20 (if
- you provide a value less than 20, it is reset to 0 and a warning is
- issued). This limit is defined because otherwise you'll spend more
- time re-flushing a too small cache than working
- """
+ """
+ Set the size of the output cache. The default is 1000, you can
+ change it permanently in your config file. Setting it to 0 completely
+ disables the caching system, and the minimum value accepted is 20 (if
+ you provide a value less than 20, it is reset to 0 and a warning is
+ issued). This limit is defined because otherwise you'll spend more
+ time re-flushing a too small cache than working
+ """
).tag(config=True)
color_info = Bool(True, help=
- """
- Use colors for displaying information about objects. Because this
- information is passed through a pager (like 'less'), and some pagers
- get confused with color codes, this capability can be turned off.
- """
+ """
+ Use colors for displaying information about objects. Because this
+ information is passed through a pager (like 'less'), and some pagers
+ get confused with color codes, this capability can be turned off.
+ """
).tag(config=True)
colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
default_value='Neutral',
@@ -262,26 +262,26 @@ class InteractiveShell(SingletonConfigurable):
).tag(config=True)
debug = Bool(False).tag(config=True)
deep_reload = Bool(False, help=
- """
- **Deprecated**
-
- Will be removed in IPython 6.0
-
- Enable deep (recursive) reloading by default. IPython can use the
- deep_reload module which reloads changes in modules recursively (it
- replaces the reload() function, so you don't need to change anything to
- use it). `deep_reload` forces a full reload of modules whose code may
- have changed, which the default reload() function does not. When
- deep_reload is off, IPython will use the normal reload(), but
- deep_reload will still be available as dreload().
- """
+ """
+ **Deprecated**
+
+ Will be removed in IPython 6.0
+
+ Enable deep (recursive) reloading by default. IPython can use the
+ deep_reload module which reloads changes in modules recursively (it
+ replaces the reload() function, so you don't need to change anything to
+ use it). `deep_reload` forces a full reload of modules whose code may
+ have changed, which the default reload() function does not. When
+ deep_reload is off, IPython will use the normal reload(), but
+ deep_reload will still be available as dreload().
+ """
).tag(config=True)
disable_failing_post_execute = Bool(False,
- help="Don't call post-execute functions that have failed in the past."
+ help="Don't call post-execute functions that have failed in the past."
).tag(config=True)
- display_formatter = Instance(DisplayFormatter, allow_none=True)
- displayhook_class = Type(DisplayHook)
- display_pub_class = Type(DisplayPublisher)
+ display_formatter = Instance(DisplayFormatter, allow_none=True)
+ displayhook_class = Type(DisplayHook)
+ display_pub_class = Type(DisplayPublisher)
sphinxify_docstring = Bool(False, help=
"""
@@ -305,58 +305,58 @@ class InteractiveShell(SingletonConfigurable):
if change['new']:
warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
- data_pub_class = None
-
+ data_pub_class = None
+
exit_now = Bool(False)
- exiter = Instance(ExitAutocall)
+ exiter = Instance(ExitAutocall)
@default('exiter')
- def _exiter_default(self):
- return ExitAutocall(self)
- # Monotonically increasing execution counter
- execution_count = Integer(1)
- filename = Unicode("<ipython console>")
+ def _exiter_default(self):
+ return ExitAutocall(self)
+ # Monotonically increasing execution counter
+ execution_count = Integer(1)
+ filename = Unicode("<ipython console>")
ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
-
- # Input splitter, to transform input line by line and detect when a block
- # is ready to be executed.
- input_splitter = Instance('IPython.core.inputsplitter.IPythonInputSplitter',
- (), {'line_input_checker': True})
-
- # This InputSplitter instance is used to transform completed cells before
- # running them. It allows cell magics to contain blank lines.
- input_transformer_manager = Instance('IPython.core.inputsplitter.IPythonInputSplitter',
- (), {'line_input_checker': False})
-
+
+ # Input splitter, to transform input line by line and detect when a block
+ # is ready to be executed.
+ input_splitter = Instance('IPython.core.inputsplitter.IPythonInputSplitter',
+ (), {'line_input_checker': True})
+
+ # This InputSplitter instance is used to transform completed cells before
+ # running them. It allows cell magics to contain blank lines.
+ input_transformer_manager = Instance('IPython.core.inputsplitter.IPythonInputSplitter',
+ (), {'line_input_checker': False})
+
logstart = Bool(False, help=
- """
- Start logging to the default log file in overwrite mode.
- Use `logappend` to specify a log file to **append** logs to.
- """
+ """
+ Start logging to the default log file in overwrite mode.
+ Use `logappend` to specify a log file to **append** logs to.
+ """
).tag(config=True)
logfile = Unicode('', help=
- """
- The name of the logfile to use.
- """
+ """
+ The name of the logfile to use.
+ """
).tag(config=True)
logappend = Unicode('', help=
- """
- Start logging to the given file in append mode.
- Use `logfile` to specify a log file to **overwrite** logs to.
- """
+ """
+ Start logging to the given file in append mode.
+ Use `logfile` to specify a log file to **overwrite** logs to.
+ """
).tag(config=True)
- object_info_string_level = Enum((0,1,2), default_value=0,
+ object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
- """
- Automatically call the pdb debugger after every exception.
- """
+ """
+ Automatically call the pdb debugger after every exception.
+ """
).tag(config=True)
display_page = Bool(False,
- help="""If True, anything that would be passed to the pager
- will be displayed as regular output instead."""
+ help="""If True, anything that would be passed to the pager
+ will be displayed as regular output instead."""
).tag(config=True)
-
- # deprecated prompt traits:
+
+ # deprecated prompt traits:
prompt_in1 = Unicode('In [\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
@@ -376,1183 +376,1183 @@ class InteractiveShell(SingletonConfigurable):
name = change['name']
warn("InteractiveShell.{name} is deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly.".format(
name=name)
- )
- # protect against weird cases where self.config may not exist:
-
+ )
+ # protect against weird cases where self.config may not exist:
+
show_rewritten_input = Bool(True,
- help="Show rewritten input, e.g. for autocall."
+ help="Show rewritten input, e.g. for autocall."
).tag(config=True)
-
+
quiet = Bool(False).tag(config=True)
-
+
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
-
+
history_load_length = Integer(1000, help=
- """
- The number of saved history entries to be loaded
+ """
+ The number of saved history entries to be loaded
into the history buffer at startup.
- """
+ """
).tag(config=True)
-
- ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none'],
+
+ ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none'],
default_value='last_expr',
- help="""
- 'all', 'last', 'last_expr' or 'none', specifying which nodes should be
+ help="""
+ 'all', 'last', 'last_expr' or 'none', specifying which nodes should be
run interactively (displaying output from expressions)."""
).tag(config=True)
-
- # TODO: this part of prompt management should be moved to the frontends.
- # Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
+
+ # TODO: this part of prompt management should be moved to the frontends.
+ # Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
- xmode = CaselessStrEnum(('Context','Plain', 'Verbose'),
+ xmode = CaselessStrEnum(('Context','Plain', 'Verbose'),
default_value='Context').tag(config=True)
-
- # Subcomponents of InteractiveShell
- alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
- prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
- builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
- display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
- extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
- payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
- history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
- magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
-
- profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
- @property
- def profile(self):
- if self.profile_dir is not None:
- name = os.path.basename(self.profile_dir.location)
- return name.replace('profile_','')
-
-
- # Private interface
- _post_execute = Dict()
-
- # Tracks any GUI loop loaded for pylab
- pylab_gui_select = None
-
+
+ # Subcomponents of InteractiveShell
+ alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+ builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
+ display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
+ extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
+ payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
+ history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
+ magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
+
+ profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
+ @property
+ def profile(self):
+ if self.profile_dir is not None:
+ name = os.path.basename(self.profile_dir.location)
+ return name.replace('profile_','')
+
+
+ # Private interface
+ _post_execute = Dict()
+
+ # Tracks any GUI loop loaded for pylab
+ pylab_gui_select = None
+
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
- def __init__(self, ipython_dir=None, profile_dir=None,
- user_module=None, user_ns=None,
- custom_exceptions=((), None), **kwargs):
-
- # This is where traits with a config_key argument are updated
- # from the values on config.
- super(InteractiveShell, self).__init__(**kwargs)
+ def __init__(self, ipython_dir=None, profile_dir=None,
+ user_module=None, user_ns=None,
+ custom_exceptions=((), None), **kwargs):
+
+ # This is where traits with a config_key argument are updated
+ # from the values on config.
+ super(InteractiveShell, self).__init__(**kwargs)
if 'PromptManager' in self.config:
warn('As of IPython 5.0 `PromptManager` config will have no effect'
' and has been replaced by TerminalInteractiveShell.prompts_class')
- self.configurables = [self]
-
- # These are relatively independent and stateless
- self.init_ipython_dir(ipython_dir)
- self.init_profile_dir(profile_dir)
- self.init_instance_attrs()
- self.init_environment()
+ self.configurables = [self]
+
+ # These are relatively independent and stateless
+ self.init_ipython_dir(ipython_dir)
+ self.init_profile_dir(profile_dir)
+ self.init_instance_attrs()
+ self.init_environment()
- # Check if we're in a virtualenv, and set up sys.path.
- self.init_virtualenv()
-
- # Create namespaces (user_ns, user_global_ns, etc.)
- self.init_create_namespaces(user_module, user_ns)
- # This has to be done after init_create_namespaces because it uses
- # something in self.user_ns, but before init_sys_modules, which
- # is the first thing to modify sys.
- # TODO: When we override sys.stdout and sys.stderr before this class
- # is created, we are saving the overridden ones here. Not sure if this
- # is what we want to do.
- self.save_sys_module_state()
- self.init_sys_modules()
-
- # While we're trying to have each part of the code directly access what
- # it needs without keeping redundant references to objects, we have too
- # much legacy code that expects ip.db to exist.
- self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
-
- self.init_history()
- self.init_encoding()
- self.init_prefilter()
-
- self.init_syntax_highlighting()
- self.init_hooks()
- self.init_events()
- self.init_pushd_popd_magic()
- self.init_user_ns()
- self.init_logger()
- self.init_builtins()
-
- # The following was in post_config_initialization
- self.init_inspector()
- if py3compat.PY3:
- self.raw_input_original = input
- else:
- self.raw_input_original = raw_input
- self.init_completer()
- # TODO: init_io() needs to happen before init_traceback handlers
- # because the traceback handlers hardcode the stdout/stderr streams.
- # This logic in in debugger.Pdb and should eventually be changed.
- self.init_io()
- self.init_traceback_handlers(custom_exceptions)
- self.init_prompts()
- self.init_display_formatter()
- self.init_display_pub()
- self.init_data_pub()
- self.init_displayhook()
- self.init_magics()
- self.init_alias()
- self.init_logstart()
- self.init_pdb()
- self.init_extension_manager()
- self.init_payload()
- self.init_deprecation_warnings()
- self.hooks.late_startup_hook()
- self.events.trigger('shell_initialized', self)
- atexit.register(self.atexit_operations)
-
- def get_ipython(self):
- """Return the currently running IPython instance."""
- return self
-
- #-------------------------------------------------------------------------
- # Trait changed handlers
- #-------------------------------------------------------------------------
+ # Check if we're in a virtualenv, and set up sys.path.
+ self.init_virtualenv()
+
+ # Create namespaces (user_ns, user_global_ns, etc.)
+ self.init_create_namespaces(user_module, user_ns)
+ # This has to be done after init_create_namespaces because it uses
+ # something in self.user_ns, but before init_sys_modules, which
+ # is the first thing to modify sys.
+ # TODO: When we override sys.stdout and sys.stderr before this class
+ # is created, we are saving the overridden ones here. Not sure if this
+ # is what we want to do.
+ self.save_sys_module_state()
+ self.init_sys_modules()
+
+ # While we're trying to have each part of the code directly access what
+ # it needs without keeping redundant references to objects, we have too
+ # much legacy code that expects ip.db to exist.
+ self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
+
+ self.init_history()
+ self.init_encoding()
+ self.init_prefilter()
+
+ self.init_syntax_highlighting()
+ self.init_hooks()
+ self.init_events()
+ self.init_pushd_popd_magic()
+ self.init_user_ns()
+ self.init_logger()
+ self.init_builtins()
+
+ # The following was in post_config_initialization
+ self.init_inspector()
+ if py3compat.PY3:
+ self.raw_input_original = input
+ else:
+ self.raw_input_original = raw_input
+ self.init_completer()
+ # TODO: init_io() needs to happen before init_traceback handlers
+ # because the traceback handlers hardcode the stdout/stderr streams.
+ # This logic in in debugger.Pdb and should eventually be changed.
+ self.init_io()
+ self.init_traceback_handlers(custom_exceptions)
+ self.init_prompts()
+ self.init_display_formatter()
+ self.init_display_pub()
+ self.init_data_pub()
+ self.init_displayhook()
+ self.init_magics()
+ self.init_alias()
+ self.init_logstart()
+ self.init_pdb()
+ self.init_extension_manager()
+ self.init_payload()
+ self.init_deprecation_warnings()
+ self.hooks.late_startup_hook()
+ self.events.trigger('shell_initialized', self)
+ atexit.register(self.atexit_operations)
+
+ def get_ipython(self):
+ """Return the currently running IPython instance."""
+ return self
+
+ #-------------------------------------------------------------------------
+ # Trait changed handlers
+ #-------------------------------------------------------------------------
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
ensure_dir_exists(change['new'])
-
- def set_autoindent(self,value=None):
+
+ def set_autoindent(self,value=None):
"""Set the autoindent flag.
-
- If called with no arguments, it acts as a toggle."""
- if value is None:
- self.autoindent = not self.autoindent
- else:
- self.autoindent = value
-
- #-------------------------------------------------------------------------
- # init_* methods called by __init__
- #-------------------------------------------------------------------------
-
- def init_ipython_dir(self, ipython_dir):
- if ipython_dir is not None:
- self.ipython_dir = ipython_dir
- return
-
- self.ipython_dir = get_ipython_dir()
-
- def init_profile_dir(self, profile_dir):
- if profile_dir is not None:
- self.profile_dir = profile_dir
- return
- self.profile_dir =\
- ProfileDir.create_profile_dir_by_name(self.ipython_dir, 'default')
-
- def init_instance_attrs(self):
- self.more = False
-
- # command compiler
- self.compile = CachingCompiler()
-
- # Make an empty namespace, which extension writers can rely on both
- # existing and NEVER being used by ipython itself. This gives them a
- # convenient location for storing additional information and state
- # their extensions may require, without fear of collisions with other
- # ipython names that may develop later.
- self.meta = Struct()
-
- # Temporary files used for various purposes. Deleted at exit.
- self.tempfiles = []
- self.tempdirs = []
-
- # keep track of where we started running (mainly for crash post-mortem)
- # This is not being used anywhere currently.
- self.starting_dir = py3compat.getcwd()
-
- # Indentation management
- self.indent_current_nsp = 0
-
- # Dict to track post-execution functions that have been registered
- self._post_execute = {}
-
- def init_environment(self):
- """Any changes we need to make to the user's environment."""
- pass
-
- def init_encoding(self):
- # Get system encoding at startup time. Certain terminals (like Emacs
- # under Win32 have it set to None, and we need to have a known valid
- # encoding to use in the raw_input() method
- try:
- self.stdin_encoding = sys.stdin.encoding or 'ascii'
- except AttributeError:
- self.stdin_encoding = 'ascii'
-
- def init_syntax_highlighting(self):
- # Python source parser/formatter for syntax highlighting
- pyformat = PyColorize.Parser().format
- self.pycolorize = lambda src: pyformat(src,'str',self.colors)
-
+
+ If called with no arguments, it acts as a toggle."""
+ if value is None:
+ self.autoindent = not self.autoindent
+ else:
+ self.autoindent = value
+
+ #-------------------------------------------------------------------------
+ # init_* methods called by __init__
+ #-------------------------------------------------------------------------
+
+ def init_ipython_dir(self, ipython_dir):
+ if ipython_dir is not None:
+ self.ipython_dir = ipython_dir
+ return
+
+ self.ipython_dir = get_ipython_dir()
+
+ def init_profile_dir(self, profile_dir):
+ if profile_dir is not None:
+ self.profile_dir = profile_dir
+ return
+ self.profile_dir =\
+ ProfileDir.create_profile_dir_by_name(self.ipython_dir, 'default')
+
+ def init_instance_attrs(self):
+ self.more = False
+
+ # command compiler
+ self.compile = CachingCompiler()
+
+ # Make an empty namespace, which extension writers can rely on both
+ # existing and NEVER being used by ipython itself. This gives them a
+ # convenient location for storing additional information and state
+ # their extensions may require, without fear of collisions with other
+ # ipython names that may develop later.
+ self.meta = Struct()
+
+ # Temporary files used for various purposes. Deleted at exit.
+ self.tempfiles = []
+ self.tempdirs = []
+
+ # keep track of where we started running (mainly for crash post-mortem)
+ # This is not being used anywhere currently.
+ self.starting_dir = py3compat.getcwd()
+
+ # Indentation management
+ self.indent_current_nsp = 0
+
+ # Dict to track post-execution functions that have been registered
+ self._post_execute = {}
+
+ def init_environment(self):
+ """Any changes we need to make to the user's environment."""
+ pass
+
+ def init_encoding(self):
+ # Get system encoding at startup time. Certain terminals (like Emacs
+ # under Win32 have it set to None, and we need to have a known valid
+ # encoding to use in the raw_input() method
+ try:
+ self.stdin_encoding = sys.stdin.encoding or 'ascii'
+ except AttributeError:
+ self.stdin_encoding = 'ascii'
+
+ def init_syntax_highlighting(self):
+ # Python source parser/formatter for syntax highlighting
+ pyformat = PyColorize.Parser().format
+ self.pycolorize = lambda src: pyformat(src,'str',self.colors)
+
def refresh_style(self):
# No-op here, used in subclass
pass
- def init_pushd_popd_magic(self):
- # for pushd/popd management
- self.home_dir = get_home_dir()
-
- self.dir_stack = []
-
- def init_logger(self):
- self.logger = Logger(self.home_dir, logfname='ipython_log.py',
- logmode='rotate')
-
- def init_logstart(self):
- """Initialize logging in case it was requested at the command line.
- """
- if self.logappend:
- self.magic('logstart %s append' % self.logappend)
- elif self.logfile:
- self.magic('logstart %s' % self.logfile)
- elif self.logstart:
- self.magic('logstart')
-
- def init_deprecation_warnings(self):
- """
- register default filter for deprecation warning.
-
- This will allow deprecation warning of function used interactively to show
- warning to users, and still hide deprecation warning from libraries import.
- """
- warnings.filterwarnings("default", category=DeprecationWarning, module=self.user_ns.get("__name__"))
-
- def init_builtins(self):
- # A single, static flag that we set to True. Its presence indicates
- # that an IPython shell has been created, and we make no attempts at
- # removing on exit or representing the existence of more than one
- # IPython at a time.
- builtin_mod.__dict__['__IPYTHON__'] = True
+ def init_pushd_popd_magic(self):
+ # for pushd/popd management
+ self.home_dir = get_home_dir()
+
+ self.dir_stack = []
+
+ def init_logger(self):
+ self.logger = Logger(self.home_dir, logfname='ipython_log.py',
+ logmode='rotate')
+
+ def init_logstart(self):
+ """Initialize logging in case it was requested at the command line.
+ """
+ if self.logappend:
+ self.magic('logstart %s append' % self.logappend)
+ elif self.logfile:
+ self.magic('logstart %s' % self.logfile)
+ elif self.logstart:
+ self.magic('logstart')
+
+ def init_deprecation_warnings(self):
+ """
+ register default filter for deprecation warning.
+
+ This will allow deprecation warning of function used interactively to show
+ warning to users, and still hide deprecation warning from libraries import.
+ """
+ warnings.filterwarnings("default", category=DeprecationWarning, module=self.user_ns.get("__name__"))
+
+ def init_builtins(self):
+ # A single, static flag that we set to True. Its presence indicates
+ # that an IPython shell has been created, and we make no attempts at
+ # removing on exit or representing the existence of more than one
+ # IPython at a time.
+ builtin_mod.__dict__['__IPYTHON__'] = True
builtin_mod.__dict__['display'] = display
-
- self.builtin_trap = BuiltinTrap(shell=self)
-
- def init_inspector(self):
- # Object inspector
- self.inspector = oinspect.Inspector(oinspect.InspectColors,
- PyColorize.ANSICodeColors,
- 'NoColor',
- self.object_info_string_level)
-
- def init_io(self):
- # This will just use sys.stdout and sys.stderr. If you want to
- # override sys.stdout and sys.stderr themselves, you need to do that
- # *before* instantiating this class, because io holds onto
- # references to the underlying streams.
+
+ self.builtin_trap = BuiltinTrap(shell=self)
+
+ def init_inspector(self):
+ # Object inspector
+ self.inspector = oinspect.Inspector(oinspect.InspectColors,
+ PyColorize.ANSICodeColors,
+ 'NoColor',
+ self.object_info_string_level)
+
+ def init_io(self):
+ # This will just use sys.stdout and sys.stderr. If you want to
+ # override sys.stdout and sys.stderr themselves, you need to do that
+ # *before* instantiating this class, because io holds onto
+ # references to the underlying streams.
# io.std* are deprecated, but don't show our own deprecation warnings
# during initialization of the deprecated API.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
- io.stdout = io.IOStream(sys.stdout)
- io.stderr = io.IOStream(sys.stderr)
-
- def init_prompts(self):
- # Set system prompts, so that scripts can decide if they are running
- # interactively.
- sys.ps1 = 'In : '
- sys.ps2 = '...: '
- sys.ps3 = 'Out: '
-
- def init_display_formatter(self):
- self.display_formatter = DisplayFormatter(parent=self)
- self.configurables.append(self.display_formatter)
-
- def init_display_pub(self):
- self.display_pub = self.display_pub_class(parent=self)
- self.configurables.append(self.display_pub)
-
- def init_data_pub(self):
- if not self.data_pub_class:
- self.data_pub = None
- return
- self.data_pub = self.data_pub_class(parent=self)
- self.configurables.append(self.data_pub)
-
- def init_displayhook(self):
- # Initialize displayhook, set in/out prompts and printing system
- self.displayhook = self.displayhook_class(
- parent=self,
- shell=self,
- cache_size=self.cache_size,
- )
- self.configurables.append(self.displayhook)
- # This is a context manager that installs/revmoes the displayhook at
- # the appropriate time.
- self.display_trap = DisplayTrap(hook=self.displayhook)
-
- def init_virtualenv(self):
- """Add a virtualenv to sys.path so the user can import modules from it.
- This isn't perfect: it doesn't use the Python interpreter with which the
- virtualenv was built, and it ignores the --no-site-packages option. A
- warning will appear suggesting the user installs IPython in the
- virtualenv, but for many cases, it probably works well enough.
+ io.stdout = io.IOStream(sys.stdout)
+ io.stderr = io.IOStream(sys.stderr)
+
+ def init_prompts(self):
+ # Set system prompts, so that scripts can decide if they are running
+ # interactively.
+ sys.ps1 = 'In : '
+ sys.ps2 = '...: '
+ sys.ps3 = 'Out: '
+
+ def init_display_formatter(self):
+ self.display_formatter = DisplayFormatter(parent=self)
+ self.configurables.append(self.display_formatter)
+
+ def init_display_pub(self):
+ self.display_pub = self.display_pub_class(parent=self)
+ self.configurables.append(self.display_pub)
+
+ def init_data_pub(self):
+ if not self.data_pub_class:
+ self.data_pub = None
+ return
+ self.data_pub = self.data_pub_class(parent=self)
+ self.configurables.append(self.data_pub)
+
+ def init_displayhook(self):
+ # Initialize displayhook, set in/out prompts and printing system
+ self.displayhook = self.displayhook_class(
+ parent=self,
+ shell=self,
+ cache_size=self.cache_size,
+ )
+ self.configurables.append(self.displayhook)
+ # This is a context manager that installs/revmoes the displayhook at
+ # the appropriate time.
+ self.display_trap = DisplayTrap(hook=self.displayhook)
+
+ def init_virtualenv(self):
+ """Add a virtualenv to sys.path so the user can import modules from it.
+ This isn't perfect: it doesn't use the Python interpreter with which the
+ virtualenv was built, and it ignores the --no-site-packages option. A
+ warning will appear suggesting the user installs IPython in the
+ virtualenv, but for many cases, it probably works well enough.
- Adapted from code snippets online.
+ Adapted from code snippets online.
- http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
- """
- if 'VIRTUAL_ENV' not in os.environ:
- # Not in a virtualenv
- return
+ http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
+ """
+ if 'VIRTUAL_ENV' not in os.environ:
+ # Not in a virtualenv
+ return
- # venv detection:
- # stdlib venv may symlink sys.executable, so we can't use realpath.
- # but others can symlink *to* the venv Python, so we can't just use sys.executable.
- # So we just check every item in the symlink tree (generally <= 3)
- p = os.path.normcase(sys.executable)
- paths = [p]
- while os.path.islink(p):
- p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
- paths.append(p)
- p_venv = os.path.normcase(os.environ['VIRTUAL_ENV'])
- if any(p.startswith(p_venv) for p in paths):
- # Running properly in the virtualenv, don't need to do anything
- return
+ # venv detection:
+ # stdlib venv may symlink sys.executable, so we can't use realpath.
+ # but others can symlink *to* the venv Python, so we can't just use sys.executable.
+ # So we just check every item in the symlink tree (generally <= 3)
+ p = os.path.normcase(sys.executable)
+ paths = [p]
+ while os.path.islink(p):
+ p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
+ paths.append(p)
+ p_venv = os.path.normcase(os.environ['VIRTUAL_ENV'])
+ if any(p.startswith(p_venv) for p in paths):
+ # Running properly in the virtualenv, don't need to do anything
+ return
- warn("Attempting to work in a virtualenv. If you encounter problems, please "
- "install IPython inside the virtualenv.")
- if sys.platform == "win32":
+ warn("Attempting to work in a virtualenv. If you encounter problems, please "
+ "install IPython inside the virtualenv.")
+ if sys.platform == "win32":
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'Lib', 'site-packages')
- else:
- virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'lib',
- 'python%d.%d' % sys.version_info[:2], 'site-packages')
+ else:
+ virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'lib',
+ 'python%d.%d' % sys.version_info[:2], 'site-packages')
- import site
- sys.path.insert(0, virtual_env)
- site.addsitedir(virtual_env)
-
- #-------------------------------------------------------------------------
- # Things related to injections into the sys module
- #-------------------------------------------------------------------------
-
- def save_sys_module_state(self):
- """Save the state of hooks in the sys module.
-
- This has to be called after self.user_module is created.
- """
- self._orig_sys_module_state = {'stdin': sys.stdin,
- 'stdout': sys.stdout,
- 'stderr': sys.stderr,
- 'excepthook': sys.excepthook}
- self._orig_sys_modules_main_name = self.user_module.__name__
- self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
-
- def restore_sys_module_state(self):
- """Restore the state of the sys module."""
- try:
- for k, v in iteritems(self._orig_sys_module_state):
- setattr(sys, k, v)
- except AttributeError:
- pass
- # Reset what what done in self.init_sys_modules
- if self._orig_sys_modules_main_mod is not None:
- sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
-
- #-------------------------------------------------------------------------
- # Things related to the banner
- #-------------------------------------------------------------------------
+ import site
+ sys.path.insert(0, virtual_env)
+ site.addsitedir(virtual_env)
+
+ #-------------------------------------------------------------------------
+ # Things related to injections into the sys module
+ #-------------------------------------------------------------------------
+
+ def save_sys_module_state(self):
+ """Save the state of hooks in the sys module.
+
+ This has to be called after self.user_module is created.
+ """
+ self._orig_sys_module_state = {'stdin': sys.stdin,
+ 'stdout': sys.stdout,
+ 'stderr': sys.stderr,
+ 'excepthook': sys.excepthook}
+ self._orig_sys_modules_main_name = self.user_module.__name__
+ self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
+
+ def restore_sys_module_state(self):
+ """Restore the state of the sys module."""
+ try:
+ for k, v in iteritems(self._orig_sys_module_state):
+ setattr(sys, k, v)
+ except AttributeError:
+ pass
+ # Reset what what done in self.init_sys_modules
+ if self._orig_sys_modules_main_mod is not None:
+ sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
+
+ #-------------------------------------------------------------------------
+ # Things related to the banner
+ #-------------------------------------------------------------------------
- @property
- def banner(self):
- banner = self.banner1
- if self.profile and self.profile != 'default':
- banner += '\nIPython profile: %s\n' % self.profile
- if self.banner2:
- banner += '\n' + self.banner2
- return banner
-
- def show_banner(self, banner=None):
- if banner is None:
- banner = self.banner
+ @property
+ def banner(self):
+ banner = self.banner1
+ if self.profile and self.profile != 'default':
+ banner += '\nIPython profile: %s\n' % self.profile
+ if self.banner2:
+ banner += '\n' + self.banner2
+ return banner
+
+ def show_banner(self, banner=None):
+ if banner is None:
+ banner = self.banner
sys.stdout.write(banner)
- #-------------------------------------------------------------------------
- # Things related to hooks
- #-------------------------------------------------------------------------
-
- def init_hooks(self):
- # hooks holds pointers used for user-side customizations
- self.hooks = Struct()
-
- self.strdispatchers = {}
-
- # Set all default hooks, defined in the IPython.hooks module.
- hooks = IPython.core.hooks
- for hook_name in hooks.__all__:
- # default hooks have priority 100, i.e. low; user hooks should have
- # 0-100 priority
- self.set_hook(hook_name,getattr(hooks,hook_name), 100, _warn_deprecated=False)
+ #-------------------------------------------------------------------------
+ # Things related to hooks
+ #-------------------------------------------------------------------------
+
+ def init_hooks(self):
+ # hooks holds pointers used for user-side customizations
+ self.hooks = Struct()
+
+ self.strdispatchers = {}
+
+ # Set all default hooks, defined in the IPython.hooks module.
+ hooks = IPython.core.hooks
+ for hook_name in hooks.__all__:
+ # default hooks have priority 100, i.e. low; user hooks should have
+ # 0-100 priority
+ self.set_hook(hook_name,getattr(hooks,hook_name), 100, _warn_deprecated=False)
- if self.display_page:
- self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
+ if self.display_page:
+ self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
- def set_hook(self,name,hook, priority=50, str_key=None, re_key=None,
- _warn_deprecated=True):
- """set_hook(name,hook) -> sets an internal IPython hook.
-
- IPython exposes some of its internal API as user-modifiable hooks. By
- adding your function to one of these hooks, you can modify IPython's
- behavior to call at runtime your own routines."""
-
- # At some point in the future, this should validate the hook before it
- # accepts it. Probably at least check that the hook takes the number
- # of args it's supposed to.
-
- f = types.MethodType(hook,self)
-
- # check if the hook is for strdispatcher first
- if str_key is not None:
- sdp = self.strdispatchers.get(name, StrDispatch())
- sdp.add_s(str_key, f, priority )
- self.strdispatchers[name] = sdp
- return
- if re_key is not None:
- sdp = self.strdispatchers.get(name, StrDispatch())
- sdp.add_re(re.compile(re_key), f, priority )
- self.strdispatchers[name] = sdp
- return
-
- dp = getattr(self.hooks, name, None)
- if name not in IPython.core.hooks.__all__:
- print("Warning! Hook '%s' is not one of %s" % \
- (name, IPython.core.hooks.__all__ ))
-
- if _warn_deprecated and (name in IPython.core.hooks.deprecated):
- alternative = IPython.core.hooks.deprecated[name]
- warn("Hook {} is deprecated. Use {} instead.".format(name, alternative))
-
- if not dp:
- dp = IPython.core.hooks.CommandChainDispatcher()
-
- try:
- dp.add(f,priority)
- except AttributeError:
- # it was not commandchain, plain old func - replace
- dp = f
-
- setattr(self.hooks,name, dp)
-
- #-------------------------------------------------------------------------
- # Things related to events
- #-------------------------------------------------------------------------
-
- def init_events(self):
- self.events = EventManager(self, available_events)
-
- self.events.register("pre_execute", self._clear_warning_registry)
-
- def register_post_execute(self, func):
- """DEPRECATED: Use ip.events.register('post_run_cell', func)
+ def set_hook(self,name,hook, priority=50, str_key=None, re_key=None,
+ _warn_deprecated=True):
+ """set_hook(name,hook) -> sets an internal IPython hook.
+
+ IPython exposes some of its internal API as user-modifiable hooks. By
+ adding your function to one of these hooks, you can modify IPython's
+ behavior to call at runtime your own routines."""
+
+ # At some point in the future, this should validate the hook before it
+ # accepts it. Probably at least check that the hook takes the number
+ # of args it's supposed to.
+
+ f = types.MethodType(hook,self)
+
+ # check if the hook is for strdispatcher first
+ if str_key is not None:
+ sdp = self.strdispatchers.get(name, StrDispatch())
+ sdp.add_s(str_key, f, priority )
+ self.strdispatchers[name] = sdp
+ return
+ if re_key is not None:
+ sdp = self.strdispatchers.get(name, StrDispatch())
+ sdp.add_re(re.compile(re_key), f, priority )
+ self.strdispatchers[name] = sdp
+ return
+
+ dp = getattr(self.hooks, name, None)
+ if name not in IPython.core.hooks.__all__:
+ print("Warning! Hook '%s' is not one of %s" % \
+ (name, IPython.core.hooks.__all__ ))
+
+ if _warn_deprecated and (name in IPython.core.hooks.deprecated):
+ alternative = IPython.core.hooks.deprecated[name]
+ warn("Hook {} is deprecated. Use {} instead.".format(name, alternative))
+
+ if not dp:
+ dp = IPython.core.hooks.CommandChainDispatcher()
+
+ try:
+ dp.add(f,priority)
+ except AttributeError:
+ # it was not commandchain, plain old func - replace
+ dp = f
+
+ setattr(self.hooks,name, dp)
+
+ #-------------------------------------------------------------------------
+ # Things related to events
+ #-------------------------------------------------------------------------
+
+ def init_events(self):
+ self.events = EventManager(self, available_events)
+
+ self.events.register("pre_execute", self._clear_warning_registry)
+
+ def register_post_execute(self, func):
+ """DEPRECATED: Use ip.events.register('post_run_cell', func)
- Register a function for calling after code execution.
- """
- warn("ip.register_post_execute is deprecated, use "
- "ip.events.register('post_run_cell', func) instead.")
- self.events.register('post_run_cell', func)
+ Register a function for calling after code execution.
+ """
+ warn("ip.register_post_execute is deprecated, use "
+ "ip.events.register('post_run_cell', func) instead.")
+ self.events.register('post_run_cell', func)
- def _clear_warning_registry(self):
- # clear the warning registry, so that different code blocks with
- # overlapping line number ranges don't cause spurious suppression of
- # warnings (see gh-6611 for details)
- if "__warningregistry__" in self.user_global_ns:
- del self.user_global_ns["__warningregistry__"]
-
- #-------------------------------------------------------------------------
- # Things related to the "main" module
- #-------------------------------------------------------------------------
-
- def new_main_mod(self, filename, modname):
- """Return a new 'main' module object for user code execution.
+ def _clear_warning_registry(self):
+ # clear the warning registry, so that different code blocks with
+ # overlapping line number ranges don't cause spurious suppression of
+ # warnings (see gh-6611 for details)
+ if "__warningregistry__" in self.user_global_ns:
+ del self.user_global_ns["__warningregistry__"]
+
+ #-------------------------------------------------------------------------
+ # Things related to the "main" module
+ #-------------------------------------------------------------------------
+
+ def new_main_mod(self, filename, modname):
+ """Return a new 'main' module object for user code execution.
- ``filename`` should be the path of the script which will be run in the
- module. Requests with the same filename will get the same module, with
- its namespace cleared.
+ ``filename`` should be the path of the script which will be run in the
+ module. Requests with the same filename will get the same module, with
+ its namespace cleared.
- ``modname`` should be the module name - normally either '__main__' or
- the basename of the file without the extension.
+ ``modname`` should be the module name - normally either '__main__' or
+ the basename of the file without the extension.
- When scripts are executed via %run, we must keep a reference to their
- __main__ module around so that Python doesn't
- clear it, rendering references to module globals useless.
-
- This method keeps said reference in a private dict, keyed by the
- absolute path of the script. This way, for multiple executions of the
- same script we only keep one copy of the namespace (the last one),
- thus preventing memory leaks from old references while allowing the
- objects from the last execution to be accessible.
- """
- filename = os.path.abspath(filename)
- try:
- main_mod = self._main_mod_cache[filename]
- except KeyError:
- main_mod = self._main_mod_cache[filename] = types.ModuleType(
- py3compat.cast_bytes_py2(modname),
- doc="Module created for script run in IPython")
- else:
- main_mod.__dict__.clear()
- main_mod.__name__ = modname
+ When scripts are executed via %run, we must keep a reference to their
+ __main__ module around so that Python doesn't
+ clear it, rendering references to module globals useless.
+
+ This method keeps said reference in a private dict, keyed by the
+ absolute path of the script. This way, for multiple executions of the
+ same script we only keep one copy of the namespace (the last one),
+ thus preventing memory leaks from old references while allowing the
+ objects from the last execution to be accessible.
+ """
+ filename = os.path.abspath(filename)
+ try:
+ main_mod = self._main_mod_cache[filename]
+ except KeyError:
+ main_mod = self._main_mod_cache[filename] = types.ModuleType(
+ py3compat.cast_bytes_py2(modname),
+ doc="Module created for script run in IPython")
+ else:
+ main_mod.__dict__.clear()
+ main_mod.__name__ = modname
- main_mod.__file__ = filename
- # It seems pydoc (and perhaps others) needs any module instance to
- # implement a __nonzero__ method
- main_mod.__nonzero__ = lambda : True
+ main_mod.__file__ = filename
+ # It seems pydoc (and perhaps others) needs any module instance to
+ # implement a __nonzero__ method
+ main_mod.__nonzero__ = lambda : True
- return main_mod
-
- def clear_main_mod_cache(self):
- """Clear the cache of main modules.
-
- Mainly for use by utilities like %reset.
-
- Examples
- --------
-
- In [15]: import IPython
-
- In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
-
- In [17]: len(_ip._main_mod_cache) > 0
- Out[17]: True
-
- In [18]: _ip.clear_main_mod_cache()
-
- In [19]: len(_ip._main_mod_cache) == 0
- Out[19]: True
- """
- self._main_mod_cache.clear()
-
- #-------------------------------------------------------------------------
- # Things related to debugging
- #-------------------------------------------------------------------------
-
- def init_pdb(self):
- # Set calling of pdb on exceptions
- # self.call_pdb is a property
- self.call_pdb = self.pdb
-
- def _get_call_pdb(self):
- return self._call_pdb
-
- def _set_call_pdb(self,val):
-
- if val not in (0,1,False,True):
- raise ValueError('new call_pdb value must be boolean')
-
- # store value in instance
- self._call_pdb = val
-
- # notify the actual exception handlers
- self.InteractiveTB.call_pdb = val
-
- call_pdb = property(_get_call_pdb,_set_call_pdb,None,
- 'Control auto-activation of pdb at exceptions')
-
- def debugger(self,force=False):
+ return main_mod
+
+ def clear_main_mod_cache(self):
+ """Clear the cache of main modules.
+
+ Mainly for use by utilities like %reset.
+
+ Examples
+ --------
+
+ In [15]: import IPython
+
+ In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
+
+ In [17]: len(_ip._main_mod_cache) > 0
+ Out[17]: True
+
+ In [18]: _ip.clear_main_mod_cache()
+
+ In [19]: len(_ip._main_mod_cache) == 0
+ Out[19]: True
+ """
+ self._main_mod_cache.clear()
+
+ #-------------------------------------------------------------------------
+ # Things related to debugging
+ #-------------------------------------------------------------------------
+
+ def init_pdb(self):
+ # Set calling of pdb on exceptions
+ # self.call_pdb is a property
+ self.call_pdb = self.pdb
+
+ def _get_call_pdb(self):
+ return self._call_pdb
+
+ def _set_call_pdb(self,val):
+
+ if val not in (0,1,False,True):
+ raise ValueError('new call_pdb value must be boolean')
+
+ # store value in instance
+ self._call_pdb = val
+
+ # notify the actual exception handlers
+ self.InteractiveTB.call_pdb = val
+
+ call_pdb = property(_get_call_pdb,_set_call_pdb,None,
+ 'Control auto-activation of pdb at exceptions')
+
+ def debugger(self,force=False):
"""Call the pdb debugger.
-
- Keywords:
-
- - force(False): by default, this routine checks the instance call_pdb
- flag and does not actually invoke the debugger if the flag is false.
- The 'force' option forces the debugger to activate even if the flag
- is false.
- """
-
- if not (force or self.call_pdb):
- return
-
- if not hasattr(sys,'last_traceback'):
- error('No traceback has been produced, nothing to debug.')
- return
-
+
+ Keywords:
+
+ - force(False): by default, this routine checks the instance call_pdb
+ flag and does not actually invoke the debugger if the flag is false.
+ The 'force' option forces the debugger to activate even if the flag
+ is false.
+ """
+
+ if not (force or self.call_pdb):
+ return
+
+ if not hasattr(sys,'last_traceback'):
+ error('No traceback has been produced, nothing to debug.')
+ return
+
self.InteractiveTB.debugger(force=True)
-
- #-------------------------------------------------------------------------
- # Things related to IPython's various namespaces
- #-------------------------------------------------------------------------
- default_user_namespaces = True
-
- def init_create_namespaces(self, user_module=None, user_ns=None):
- # Create the namespace where the user will operate. user_ns is
- # normally the only one used, and it is passed to the exec calls as
- # the locals argument. But we do carry a user_global_ns namespace
- # given as the exec 'globals' argument, This is useful in embedding
- # situations where the ipython shell opens in a context where the
- # distinction between locals and globals is meaningful. For
- # non-embedded contexts, it is just the same object as the user_ns dict.
-
- # FIXME. For some strange reason, __builtins__ is showing up at user
- # level as a dict instead of a module. This is a manual fix, but I
- # should really track down where the problem is coming from. Alex
- # Schmolck reported this problem first.
-
- # A useful post by Alex Martelli on this topic:
- # Re: inconsistent value from __builtins__
- # Von: Alex Martelli <aleaxit@yahoo.com>
- # Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
- # Gruppen: comp.lang.python
-
- # Michael Hohn <hohn@hooknose.lbl.gov> wrote:
- # > >>> print type(builtin_check.get_global_binding('__builtins__'))
- # > <type 'dict'>
- # > >>> print type(__builtins__)
- # > <type 'module'>
- # > Is this difference in return value intentional?
-
- # Well, it's documented that '__builtins__' can be either a dictionary
- # or a module, and it's been that way for a long time. Whether it's
- # intentional (or sensible), I don't know. In any case, the idea is
- # that if you need to access the built-in namespace directly, you
- # should start with "import __builtin__" (note, no 's') which will
- # definitely give you a module. Yeah, it's somewhat confusing:-(.
-
- # These routines return a properly built module and dict as needed by
- # the rest of the code, and can also be used by extension writers to
- # generate properly initialized namespaces.
- if (user_ns is not None) or (user_module is not None):
- self.default_user_namespaces = False
- self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
-
- # A record of hidden variables we have added to the user namespace, so
- # we can list later only variables defined in actual interactive use.
- self.user_ns_hidden = {}
-
- # Now that FakeModule produces a real module, we've run into a nasty
- # problem: after script execution (via %run), the module where the user
- # code ran is deleted. Now that this object is a true module (needed
- # so doctest and other tools work correctly), the Python module
- # teardown mechanism runs over it, and sets to None every variable
- # present in that module. Top-level references to objects from the
- # script survive, because the user_ns is updated with them. However,
- # calling functions defined in the script that use other things from
- # the script will fail, because the function's closure had references
- # to the original objects, which are now all None. So we must protect
- # these modules from deletion by keeping a cache.
- #
- # To avoid keeping stale modules around (we only need the one from the
- # last run), we use a dict keyed with the full path to the script, so
- # only the last version of the module is held in the cache. Note,
- # however, that we must cache the module *namespace contents* (their
- # __dict__). Because if we try to cache the actual modules, old ones
- # (uncached) could be destroyed while still holding references (such as
- # those held by GUI objects that tend to be long-lived)>
- #
- # The %reset command will flush this cache. See the cache_main_mod()
- # and clear_main_mod_cache() methods for details on use.
-
- # This is the cache used for 'main' namespaces
- self._main_mod_cache = {}
-
- # A table holding all the namespaces IPython deals with, so that
- # introspection facilities can search easily.
- self.ns_table = {'user_global':self.user_module.__dict__,
- 'user_local':self.user_ns,
- 'builtin':builtin_mod.__dict__
- }
+
+ #-------------------------------------------------------------------------
+ # Things related to IPython's various namespaces
+ #-------------------------------------------------------------------------
+ default_user_namespaces = True
+
+ def init_create_namespaces(self, user_module=None, user_ns=None):
+ # Create the namespace where the user will operate. user_ns is
+ # normally the only one used, and it is passed to the exec calls as
+ # the locals argument. But we do carry a user_global_ns namespace
+ # given as the exec 'globals' argument, This is useful in embedding
+ # situations where the ipython shell opens in a context where the
+ # distinction between locals and globals is meaningful. For
+ # non-embedded contexts, it is just the same object as the user_ns dict.
+
+ # FIXME. For some strange reason, __builtins__ is showing up at user
+ # level as a dict instead of a module. This is a manual fix, but I
+ # should really track down where the problem is coming from. Alex
+ # Schmolck reported this problem first.
+
+ # A useful post by Alex Martelli on this topic:
+ # Re: inconsistent value from __builtins__
+ # Von: Alex Martelli <aleaxit@yahoo.com>
+ # Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
+ # Gruppen: comp.lang.python
+
+ # Michael Hohn <hohn@hooknose.lbl.gov> wrote:
+ # > >>> print type(builtin_check.get_global_binding('__builtins__'))
+ # > <type 'dict'>
+ # > >>> print type(__builtins__)
+ # > <type 'module'>
+ # > Is this difference in return value intentional?
+
+ # Well, it's documented that '__builtins__' can be either a dictionary
+ # or a module, and it's been that way for a long time. Whether it's
+ # intentional (or sensible), I don't know. In any case, the idea is
+ # that if you need to access the built-in namespace directly, you
+ # should start with "import __builtin__" (note, no 's') which will
+ # definitely give you a module. Yeah, it's somewhat confusing:-(.
+
+ # These routines return a properly built module and dict as needed by
+ # the rest of the code, and can also be used by extension writers to
+ # generate properly initialized namespaces.
+ if (user_ns is not None) or (user_module is not None):
+ self.default_user_namespaces = False
+ self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
+
+ # A record of hidden variables we have added to the user namespace, so
+ # we can list later only variables defined in actual interactive use.
+ self.user_ns_hidden = {}
+
+ # Now that FakeModule produces a real module, we've run into a nasty
+ # problem: after script execution (via %run), the module where the user
+ # code ran is deleted. Now that this object is a true module (needed
+ # so doctest and other tools work correctly), the Python module
+ # teardown mechanism runs over it, and sets to None every variable
+ # present in that module. Top-level references to objects from the
+ # script survive, because the user_ns is updated with them. However,
+ # calling functions defined in the script that use other things from
+ # the script will fail, because the function's closure had references
+ # to the original objects, which are now all None. So we must protect
+ # these modules from deletion by keeping a cache.
+ #
+ # To avoid keeping stale modules around (we only need the one from the
+ # last run), we use a dict keyed with the full path to the script, so
+ # only the last version of the module is held in the cache. Note,
+ # however, that we must cache the module *namespace contents* (their
+ # __dict__). Because if we try to cache the actual modules, old ones
+ # (uncached) could be destroyed while still holding references (such as
+ # those held by GUI objects that tend to be long-lived)>
+ #
+ # The %reset command will flush this cache. See the cache_main_mod()
+ # and clear_main_mod_cache() methods for details on use.
+
+ # This is the cache used for 'main' namespaces
+ self._main_mod_cache = {}
+
+ # A table holding all the namespaces IPython deals with, so that
+ # introspection facilities can search easily.
+ self.ns_table = {'user_global':self.user_module.__dict__,
+ 'user_local':self.user_ns,
+ 'builtin':builtin_mod.__dict__
+ }
- @property
- def user_global_ns(self):
- return self.user_module.__dict__
-
- def prepare_user_module(self, user_module=None, user_ns=None):
- """Prepare the module and namespace in which user code will be run.
+ @property
+ def user_global_ns(self):
+ return self.user_module.__dict__
+
+ def prepare_user_module(self, user_module=None, user_ns=None):
+ """Prepare the module and namespace in which user code will be run.
- When IPython is started normally, both parameters are None: a new module
- is created automatically, and its __dict__ used as the namespace.
+ When IPython is started normally, both parameters are None: a new module
+ is created automatically, and its __dict__ used as the namespace.
- If only user_module is provided, its __dict__ is used as the namespace.
- If only user_ns is provided, a dummy module is created, and user_ns
- becomes the global namespace. If both are provided (as they may be
- when embedding), user_ns is the local namespace, and user_module
- provides the global namespace.
-
- Parameters
- ----------
- user_module : module, optional
- The current user module in which IPython is being run. If None,
- a clean module will be created.
- user_ns : dict, optional
- A namespace in which to run interactive commands.
-
- Returns
- -------
- A tuple of user_module and user_ns, each properly initialised.
- """
- if user_module is None and user_ns is not None:
- user_ns.setdefault("__name__", "__main__")
- user_module = DummyMod()
- user_module.__dict__ = user_ns
+ If only user_module is provided, its __dict__ is used as the namespace.
+ If only user_ns is provided, a dummy module is created, and user_ns
+ becomes the global namespace. If both are provided (as they may be
+ when embedding), user_ns is the local namespace, and user_module
+ provides the global namespace.
+
+ Parameters
+ ----------
+ user_module : module, optional
+ The current user module in which IPython is being run. If None,
+ a clean module will be created.
+ user_ns : dict, optional
+ A namespace in which to run interactive commands.
+
+ Returns
+ -------
+ A tuple of user_module and user_ns, each properly initialised.
+ """
+ if user_module is None and user_ns is not None:
+ user_ns.setdefault("__name__", "__main__")
+ user_module = DummyMod()
+ user_module.__dict__ = user_ns
- if user_module is None:
- user_module = types.ModuleType("__main__",
- doc="Automatically created module for IPython interactive environment")
+ if user_module is None:
+ user_module = types.ModuleType("__main__",
+ doc="Automatically created module for IPython interactive environment")
- # We must ensure that __builtin__ (without the final 's') is always
- # available and pointing to the __builtin__ *module*. For more details:
- # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
- user_module.__dict__.setdefault('__builtin__', builtin_mod)
- user_module.__dict__.setdefault('__builtins__', builtin_mod)
+ # We must ensure that __builtin__ (without the final 's') is always
+ # available and pointing to the __builtin__ *module*. For more details:
+ # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
+ user_module.__dict__.setdefault('__builtin__', builtin_mod)
+ user_module.__dict__.setdefault('__builtins__', builtin_mod)
- if user_ns is None:
- user_ns = user_module.__dict__
-
- return user_module, user_ns
-
- def init_sys_modules(self):
- # We need to insert into sys.modules something that looks like a
- # module but which accesses the IPython namespace, for shelve and
- # pickle to work interactively. Normally they rely on getting
- # everything out of __main__, but for embedding purposes each IPython
- # instance has its own private namespace, so we can't go shoving
- # everything into __main__.
-
- # note, however, that we should only do this for non-embedded
- # ipythons, which really mimic the __main__.__dict__ with their own
- # namespace. Embedded instances, on the other hand, should not do
- # this because they need to manage the user local/global namespaces
- # only, but they live within a 'normal' __main__ (meaning, they
- # shouldn't overtake the execution environment of the script they're
- # embedded in).
-
- # This is overridden in the InteractiveShellEmbed subclass to a no-op.
- main_name = self.user_module.__name__
- sys.modules[main_name] = self.user_module
-
- def init_user_ns(self):
- """Initialize all user-visible namespaces to their minimum defaults.
-
- Certain history lists are also initialized here, as they effectively
- act as user namespaces.
-
- Notes
- -----
- All data structures here are only filled in, they are NOT reset by this
- method. If they were not empty before, data will simply be added to
- therm.
- """
- # This function works in two parts: first we put a few things in
- # user_ns, and we sync that contents into user_ns_hidden so that these
- # initial variables aren't shown by %who. After the sync, we add the
- # rest of what we *do* want the user to see with %who even on a new
- # session (probably nothing, so they really only see their own stuff)
-
- # The user dict must *always* have a __builtin__ reference to the
- # Python standard __builtin__ namespace, which must be imported.
- # This is so that certain operations in prompt evaluation can be
- # reliably executed with builtins. Note that we can NOT use
- # __builtins__ (note the 's'), because that can either be a dict or a
- # module, and can even mutate at runtime, depending on the context
- # (Python makes no guarantees on it). In contrast, __builtin__ is
- # always a module object, though it must be explicitly imported.
-
- # For more details:
- # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
- ns = dict()
+ if user_ns is None:
+ user_ns = user_module.__dict__
+
+ return user_module, user_ns
+
+ def init_sys_modules(self):
+ # We need to insert into sys.modules something that looks like a
+ # module but which accesses the IPython namespace, for shelve and
+ # pickle to work interactively. Normally they rely on getting
+ # everything out of __main__, but for embedding purposes each IPython
+ # instance has its own private namespace, so we can't go shoving
+ # everything into __main__.
+
+ # note, however, that we should only do this for non-embedded
+ # ipythons, which really mimic the __main__.__dict__ with their own
+ # namespace. Embedded instances, on the other hand, should not do
+ # this because they need to manage the user local/global namespaces
+ # only, but they live within a 'normal' __main__ (meaning, they
+ # shouldn't overtake the execution environment of the script they're
+ # embedded in).
+
+ # This is overridden in the InteractiveShellEmbed subclass to a no-op.
+ main_name = self.user_module.__name__
+ sys.modules[main_name] = self.user_module
+
+ def init_user_ns(self):
+ """Initialize all user-visible namespaces to their minimum defaults.
+
+ Certain history lists are also initialized here, as they effectively
+ act as user namespaces.
+
+ Notes
+ -----
+ All data structures here are only filled in, they are NOT reset by this
+ method. If they were not empty before, data will simply be added to
+ therm.
+ """
+ # This function works in two parts: first we put a few things in
+ # user_ns, and we sync that contents into user_ns_hidden so that these
+ # initial variables aren't shown by %who. After the sync, we add the
+ # rest of what we *do* want the user to see with %who even on a new
+ # session (probably nothing, so they really only see their own stuff)
+
+ # The user dict must *always* have a __builtin__ reference to the
+ # Python standard __builtin__ namespace, which must be imported.
+ # This is so that certain operations in prompt evaluation can be
+ # reliably executed with builtins. Note that we can NOT use
+ # __builtins__ (note the 's'), because that can either be a dict or a
+ # module, and can even mutate at runtime, depending on the context
+ # (Python makes no guarantees on it). In contrast, __builtin__ is
+ # always a module object, though it must be explicitly imported.
+
+ # For more details:
+ # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
+ ns = dict()
- # make global variables for user access to the histories
- ns['_ih'] = self.history_manager.input_hist_parsed
- ns['_oh'] = self.history_manager.output_hist
- ns['_dh'] = self.history_manager.dir_hist
-
- ns['_sh'] = shadowns
-
- # user aliases to input and output histories. These shouldn't show up
- # in %who, as they can have very large reprs.
- ns['In'] = self.history_manager.input_hist_parsed
- ns['Out'] = self.history_manager.output_hist
-
- # Store myself as the public api!!!
- ns['get_ipython'] = self.get_ipython
+ # make global variables for user access to the histories
+ ns['_ih'] = self.history_manager.input_hist_parsed
+ ns['_oh'] = self.history_manager.output_hist
+ ns['_dh'] = self.history_manager.dir_hist
+
+ ns['_sh'] = shadowns
+
+ # user aliases to input and output histories. These shouldn't show up
+ # in %who, as they can have very large reprs.
+ ns['In'] = self.history_manager.input_hist_parsed
+ ns['Out'] = self.history_manager.output_hist
+
+ # Store myself as the public api!!!
+ ns['get_ipython'] = self.get_ipython
- ns['exit'] = self.exiter
- ns['quit'] = self.exiter
-
- # Sync what we've added so far to user_ns_hidden so these aren't seen
- # by %who
- self.user_ns_hidden.update(ns)
-
- # Anything put into ns now would show up in %who. Think twice before
- # putting anything here, as we really want %who to show the user their
- # stuff, not our variables.
-
- # Finally, update the real user's namespace
- self.user_ns.update(ns)
+ ns['exit'] = self.exiter
+ ns['quit'] = self.exiter
+
+ # Sync what we've added so far to user_ns_hidden so these aren't seen
+ # by %who
+ self.user_ns_hidden.update(ns)
+
+ # Anything put into ns now would show up in %who. Think twice before
+ # putting anything here, as we really want %who to show the user their
+ # stuff, not our variables.
+
+ # Finally, update the real user's namespace
+ self.user_ns.update(ns)
- @property
- def all_ns_refs(self):
- """Get a list of references to all the namespace dictionaries in which
- IPython might store a user-created object.
+ @property
+ def all_ns_refs(self):
+ """Get a list of references to all the namespace dictionaries in which
+ IPython might store a user-created object.
- Note that this does not include the displayhook, which also caches
- objects from the output."""
- return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
- [m.__dict__ for m in self._main_mod_cache.values()]
-
- def reset(self, new_session=True):
- """Clear all internal namespaces, and attempt to release references to
- user objects.
-
- If new_session is True, a new history session will be opened.
- """
- # Clear histories
- self.history_manager.reset(new_session)
- # Reset counter used to index all histories
- if new_session:
- self.execution_count = 1
-
- # Flush cached output items
- if self.displayhook.do_full_cache:
- self.displayhook.flush()
-
- # The main execution namespaces must be cleared very carefully,
- # skipping the deletion of the builtin-related keys, because doing so
- # would cause errors in many object's __del__ methods.
- if self.user_ns is not self.user_global_ns:
- self.user_ns.clear()
- ns = self.user_global_ns
- drop_keys = set(ns.keys())
- drop_keys.discard('__builtin__')
- drop_keys.discard('__builtins__')
- drop_keys.discard('__name__')
- for k in drop_keys:
- del ns[k]
+ Note that this does not include the displayhook, which also caches
+ objects from the output."""
+ return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
+ [m.__dict__ for m in self._main_mod_cache.values()]
+
+ def reset(self, new_session=True):
+ """Clear all internal namespaces, and attempt to release references to
+ user objects.
+
+ If new_session is True, a new history session will be opened.
+ """
+ # Clear histories
+ self.history_manager.reset(new_session)
+ # Reset counter used to index all histories
+ if new_session:
+ self.execution_count = 1
+
+ # Flush cached output items
+ if self.displayhook.do_full_cache:
+ self.displayhook.flush()
+
+ # The main execution namespaces must be cleared very carefully,
+ # skipping the deletion of the builtin-related keys, because doing so
+ # would cause errors in many object's __del__ methods.
+ if self.user_ns is not self.user_global_ns:
+ self.user_ns.clear()
+ ns = self.user_global_ns
+ drop_keys = set(ns.keys())
+ drop_keys.discard('__builtin__')
+ drop_keys.discard('__builtins__')
+ drop_keys.discard('__name__')
+ for k in drop_keys:
+ del ns[k]
- self.user_ns_hidden.clear()
+ self.user_ns_hidden.clear()
- # Restore the user namespaces to minimal usability
- self.init_user_ns()
-
- # Restore the default and user aliases
- self.alias_manager.clear_aliases()
- self.alias_manager.init_aliases()
-
- # Flush the private list of module references kept for script
- # execution protection
- self.clear_main_mod_cache()
-
- def del_var(self, varname, by_name=False):
- """Delete a variable from the various namespaces, so that, as
- far as possible, we're not keeping any hidden references to it.
-
- Parameters
- ----------
- varname : str
- The name of the variable to delete.
- by_name : bool
- If True, delete variables with the given name in each
- namespace. If False (default), find the variable in the user
- namespace, and delete references to it.
- """
- if varname in ('__builtin__', '__builtins__'):
- raise ValueError("Refusing to delete %s" % varname)
-
- ns_refs = self.all_ns_refs
+ # Restore the user namespaces to minimal usability
+ self.init_user_ns()
+
+ # Restore the default and user aliases
+ self.alias_manager.clear_aliases()
+ self.alias_manager.init_aliases()
+
+ # Flush the private list of module references kept for script
+ # execution protection
+ self.clear_main_mod_cache()
+
+ def del_var(self, varname, by_name=False):
+ """Delete a variable from the various namespaces, so that, as
+ far as possible, we're not keeping any hidden references to it.
+
+ Parameters
+ ----------
+ varname : str
+ The name of the variable to delete.
+ by_name : bool
+ If True, delete variables with the given name in each
+ namespace. If False (default), find the variable in the user
+ namespace, and delete references to it.
+ """
+ if varname in ('__builtin__', '__builtins__'):
+ raise ValueError("Refusing to delete %s" % varname)
+
+ ns_refs = self.all_ns_refs
- if by_name: # Delete by name
- for ns in ns_refs:
- try:
- del ns[varname]
- except KeyError:
- pass
- else: # Delete by object
- try:
- obj = self.user_ns[varname]
- except KeyError:
- raise NameError("name '%s' is not defined" % varname)
- # Also check in output history
- ns_refs.append(self.history_manager.output_hist)
- for ns in ns_refs:
- to_delete = [n for n, o in iteritems(ns) if o is obj]
- for name in to_delete:
- del ns[name]
-
- # displayhook keeps extra references, but not in a dictionary
- for name in ('_', '__', '___'):
- if getattr(self.displayhook, name) is obj:
- setattr(self.displayhook, name, None)
-
- def reset_selective(self, regex=None):
- """Clear selective variables from internal namespaces based on a
- specified regular expression.
-
- Parameters
- ----------
- regex : string or compiled pattern, optional
- A regular expression pattern that will be used in searching
- variable names in the users namespaces.
- """
- if regex is not None:
- try:
- m = re.compile(regex)
- except TypeError:
- raise TypeError('regex must be a string or compiled pattern')
- # Search for keys in each namespace that match the given regex
- # If a match is found, delete the key/value pair.
- for ns in self.all_ns_refs:
- for var in ns:
- if m.search(var):
- del ns[var]
-
- def push(self, variables, interactive=True):
- """Inject a group of variables into the IPython user namespace.
-
- Parameters
- ----------
- variables : dict, str or list/tuple of str
- The variables to inject into the user's namespace. If a dict, a
- simple update is done. If a str, the string is assumed to have
- variable names separated by spaces. A list/tuple of str can also
- be used to give the variable names. If just the variable names are
- give (list/tuple/str) then the variable values looked up in the
- callers frame.
- interactive : bool
- If True (default), the variables will be listed with the ``who``
- magic.
- """
- vdict = None
-
- # We need a dict of name/value pairs to do namespace updates.
- if isinstance(variables, dict):
- vdict = variables
- elif isinstance(variables, string_types+(list, tuple)):
- if isinstance(variables, string_types):
- vlist = variables.split()
- else:
- vlist = variables
- vdict = {}
- cf = sys._getframe(1)
- for name in vlist:
- try:
- vdict[name] = eval(name, cf.f_globals, cf.f_locals)
- except:
- print('Could not get variable %s from %s' %
- (name,cf.f_code.co_name))
- else:
- raise ValueError('variables must be a dict/str/list/tuple')
-
- # Propagate variables to user namespace
- self.user_ns.update(vdict)
-
- # And configure interactive visibility
- user_ns_hidden = self.user_ns_hidden
- if interactive:
- for name in vdict:
- user_ns_hidden.pop(name, None)
- else:
- user_ns_hidden.update(vdict)
-
- def drop_by_id(self, variables):
- """Remove a dict of variables from the user namespace, if they are the
- same as the values in the dictionary.
+ if by_name: # Delete by name
+ for ns in ns_refs:
+ try:
+ del ns[varname]
+ except KeyError:
+ pass
+ else: # Delete by object
+ try:
+ obj = self.user_ns[varname]
+ except KeyError:
+ raise NameError("name '%s' is not defined" % varname)
+ # Also check in output history
+ ns_refs.append(self.history_manager.output_hist)
+ for ns in ns_refs:
+ to_delete = [n for n, o in iteritems(ns) if o is obj]
+ for name in to_delete:
+ del ns[name]
+
+ # displayhook keeps extra references, but not in a dictionary
+ for name in ('_', '__', '___'):
+ if getattr(self.displayhook, name) is obj:
+ setattr(self.displayhook, name, None)
+
+ def reset_selective(self, regex=None):
+ """Clear selective variables from internal namespaces based on a
+ specified regular expression.
+
+ Parameters
+ ----------
+ regex : string or compiled pattern, optional
+ A regular expression pattern that will be used in searching
+ variable names in the users namespaces.
+ """
+ if regex is not None:
+ try:
+ m = re.compile(regex)
+ except TypeError:
+ raise TypeError('regex must be a string or compiled pattern')
+ # Search for keys in each namespace that match the given regex
+ # If a match is found, delete the key/value pair.
+ for ns in self.all_ns_refs:
+ for var in ns:
+ if m.search(var):
+ del ns[var]
+
+ def push(self, variables, interactive=True):
+ """Inject a group of variables into the IPython user namespace.
+
+ Parameters
+ ----------
+ variables : dict, str or list/tuple of str
+ The variables to inject into the user's namespace. If a dict, a
+ simple update is done. If a str, the string is assumed to have
+ variable names separated by spaces. A list/tuple of str can also
+ be used to give the variable names. If just the variable names are
+ give (list/tuple/str) then the variable values looked up in the
+ callers frame.
+ interactive : bool
+ If True (default), the variables will be listed with the ``who``
+ magic.
+ """
+ vdict = None
+
+ # We need a dict of name/value pairs to do namespace updates.
+ if isinstance(variables, dict):
+ vdict = variables
+ elif isinstance(variables, string_types+(list, tuple)):
+ if isinstance(variables, string_types):
+ vlist = variables.split()
+ else:
+ vlist = variables
+ vdict = {}
+ cf = sys._getframe(1)
+ for name in vlist:
+ try:
+ vdict[name] = eval(name, cf.f_globals, cf.f_locals)
+ except:
+ print('Could not get variable %s from %s' %
+ (name,cf.f_code.co_name))
+ else:
+ raise ValueError('variables must be a dict/str/list/tuple')
+
+ # Propagate variables to user namespace
+ self.user_ns.update(vdict)
+
+ # And configure interactive visibility
+ user_ns_hidden = self.user_ns_hidden
+ if interactive:
+ for name in vdict:
+ user_ns_hidden.pop(name, None)
+ else:
+ user_ns_hidden.update(vdict)
+
+ def drop_by_id(self, variables):
+ """Remove a dict of variables from the user namespace, if they are the
+ same as the values in the dictionary.
- This is intended for use by extensions: variables that they've added can
- be taken back out if they are unloaded, without removing any that the
- user has overwritten.
+ This is intended for use by extensions: variables that they've added can
+ be taken back out if they are unloaded, without removing any that the
+ user has overwritten.
- Parameters
- ----------
- variables : dict
- A dictionary mapping object names (as strings) to the objects.
- """
- for name, obj in iteritems(variables):
- if name in self.user_ns and self.user_ns[name] is obj:
- del self.user_ns[name]
- self.user_ns_hidden.pop(name, None)
-
- #-------------------------------------------------------------------------
- # Things related to object introspection
- #-------------------------------------------------------------------------
-
- def _ofind(self, oname, namespaces=None):
- """Find an object in the available namespaces.
-
- self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
-
- Has special code to detect magic functions.
- """
- oname = oname.strip()
- #print '1- oname: <%r>' % oname # dbg
- if not oname.startswith(ESC_MAGIC) and \
- not oname.startswith(ESC_MAGIC2) and \
- not py3compat.isidentifier(oname, dotted=True):
- return dict(found=False)
-
- if namespaces is None:
- # Namespaces to search in:
- # Put them in a list. The order is important so that we
- # find things in the same order that Python finds them.
- namespaces = [ ('Interactive', self.user_ns),
- ('Interactive (global)', self.user_global_ns),
- ('Python builtin', builtin_mod.__dict__),
- ]
-
- # initialize results to 'null'
- found = False; obj = None; ospace = None;
- ismagic = False; isalias = False; parent = None
-
- # We need to special-case 'print', which as of python2.6 registers as a
- # function but should only be treated as one if print_function was
- # loaded with a future import. In this case, just bail.
- if (oname == 'print' and not py3compat.PY3 and not \
- (self.compile.compiler_flags & __future__.CO_FUTURE_PRINT_FUNCTION)):
- return {'found':found, 'obj':obj, 'namespace':ospace,
- 'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
-
- # Look for the given name by splitting it in parts. If the head is
- # found, then we look for all the remaining parts as members, and only
- # declare success if we can find them all.
- oname_parts = oname.split('.')
- oname_head, oname_rest = oname_parts[0],oname_parts[1:]
- for nsname,ns in namespaces:
- try:
- obj = ns[oname_head]
- except KeyError:
- continue
- else:
- #print 'oname_rest:', oname_rest # dbg
- for idx, part in enumerate(oname_rest):
- try:
- parent = obj
- # The last part is looked up in a special way to avoid
- # descriptor invocation as it may raise or have side
- # effects.
- if idx == len(oname_rest) - 1:
- obj = self._getattr_property(obj, part)
- else:
- obj = getattr(obj, part)
- except:
- # Blanket except b/c some badly implemented objects
- # allow __getattr__ to raise exceptions other than
- # AttributeError, which then crashes IPython.
- break
- else:
- # If we finish the for loop (no break), we got all members
- found = True
- ospace = nsname
- break # namespace loop
-
- # Try to see if it's magic
- if not found:
- obj = None
- if oname.startswith(ESC_MAGIC2):
- oname = oname.lstrip(ESC_MAGIC2)
- obj = self.find_cell_magic(oname)
- elif oname.startswith(ESC_MAGIC):
- oname = oname.lstrip(ESC_MAGIC)
- obj = self.find_line_magic(oname)
- else:
- # search without prefix, so run? will find %run?
- obj = self.find_line_magic(oname)
- if obj is None:
- obj = self.find_cell_magic(oname)
- if obj is not None:
- found = True
- ospace = 'IPython internal'
- ismagic = True
- isalias = isinstance(obj, Alias)
-
- # Last try: special-case some literals like '', [], {}, etc:
- if not found and oname_head in ["''",'""','[]','{}','()']:
- obj = eval(oname_head)
- found = True
- ospace = 'Interactive'
-
- return {'found':found, 'obj':obj, 'namespace':ospace,
- 'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
-
- @staticmethod
- def _getattr_property(obj, attrname):
- """Property-aware getattr to use in object finding.
-
- If attrname represents a property, return it unevaluated (in case it has
- side effects or raises an error.
-
- """
- if not isinstance(obj, type):
- try:
- # `getattr(type(obj), attrname)` is not guaranteed to return
- # `obj`, but does so for property:
- #
- # property.__get__(self, None, cls) -> self
- #
- # The universal alternative is to traverse the mro manually
- # searching for attrname in class dicts.
- attr = getattr(type(obj), attrname)
- except AttributeError:
- pass
- else:
- # This relies on the fact that data descriptors (with both
- # __get__ & __set__ magic methods) take precedence over
- # instance-level attributes:
- #
- # class A(object):
- # @property
- # def foobar(self): return 123
- # a = A()
- # a.__dict__['foobar'] = 345
- # a.foobar # == 123
- #
- # So, a property may be returned right away.
- if isinstance(attr, property):
- return attr
-
- # Nothing helped, fall back.
- return getattr(obj, attrname)
-
- def _object_find(self, oname, namespaces=None):
- """Find an object and return a struct with info about it."""
- return Struct(self._ofind(oname, namespaces))
-
- def _inspect(self, meth, oname, namespaces=None, **kw):
- """Generic interface to the inspector system.
-
+ Parameters
+ ----------
+ variables : dict
+ A dictionary mapping object names (as strings) to the objects.
+ """
+ for name, obj in iteritems(variables):
+ if name in self.user_ns and self.user_ns[name] is obj:
+ del self.user_ns[name]
+ self.user_ns_hidden.pop(name, None)
+
+ #-------------------------------------------------------------------------
+ # Things related to object introspection
+ #-------------------------------------------------------------------------
+
+ def _ofind(self, oname, namespaces=None):
+ """Find an object in the available namespaces.
+
+ self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
+
+ Has special code to detect magic functions.
+ """
+ oname = oname.strip()
+ #print '1- oname: <%r>' % oname # dbg
+ if not oname.startswith(ESC_MAGIC) and \
+ not oname.startswith(ESC_MAGIC2) and \
+ not py3compat.isidentifier(oname, dotted=True):
+ return dict(found=False)
+
+ if namespaces is None:
+ # Namespaces to search in:
+ # Put them in a list. The order is important so that we
+ # find things in the same order that Python finds them.
+ namespaces = [ ('Interactive', self.user_ns),
+ ('Interactive (global)', self.user_global_ns),
+ ('Python builtin', builtin_mod.__dict__),
+ ]
+
+ # initialize results to 'null'
+ found = False; obj = None; ospace = None;
+ ismagic = False; isalias = False; parent = None
+
+ # We need to special-case 'print', which as of python2.6 registers as a
+ # function but should only be treated as one if print_function was
+ # loaded with a future import. In this case, just bail.
+ if (oname == 'print' and not py3compat.PY3 and not \
+ (self.compile.compiler_flags & __future__.CO_FUTURE_PRINT_FUNCTION)):
+ return {'found':found, 'obj':obj, 'namespace':ospace,
+ 'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
+
+ # Look for the given name by splitting it in parts. If the head is
+ # found, then we look for all the remaining parts as members, and only
+ # declare success if we can find them all.
+ oname_parts = oname.split('.')
+ oname_head, oname_rest = oname_parts[0],oname_parts[1:]
+ for nsname,ns in namespaces:
+ try:
+ obj = ns[oname_head]
+ except KeyError:
+ continue
+ else:
+ #print 'oname_rest:', oname_rest # dbg
+ for idx, part in enumerate(oname_rest):
+ try:
+ parent = obj
+ # The last part is looked up in a special way to avoid
+ # descriptor invocation as it may raise or have side
+ # effects.
+ if idx == len(oname_rest) - 1:
+ obj = self._getattr_property(obj, part)
+ else:
+ obj = getattr(obj, part)
+ except:
+ # Blanket except b/c some badly implemented objects
+ # allow __getattr__ to raise exceptions other than
+ # AttributeError, which then crashes IPython.
+ break
+ else:
+ # If we finish the for loop (no break), we got all members
+ found = True
+ ospace = nsname
+ break # namespace loop
+
+ # Try to see if it's magic
+ if not found:
+ obj = None
+ if oname.startswith(ESC_MAGIC2):
+ oname = oname.lstrip(ESC_MAGIC2)
+ obj = self.find_cell_magic(oname)
+ elif oname.startswith(ESC_MAGIC):
+ oname = oname.lstrip(ESC_MAGIC)
+ obj = self.find_line_magic(oname)
+ else:
+ # search without prefix, so run? will find %run?
+ obj = self.find_line_magic(oname)
+ if obj is None:
+ obj = self.find_cell_magic(oname)
+ if obj is not None:
+ found = True
+ ospace = 'IPython internal'
+ ismagic = True
+ isalias = isinstance(obj, Alias)
+
+ # Last try: special-case some literals like '', [], {}, etc:
+ if not found and oname_head in ["''",'""','[]','{}','()']:
+ obj = eval(oname_head)
+ found = True
+ ospace = 'Interactive'
+
+ return {'found':found, 'obj':obj, 'namespace':ospace,
+ 'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
+
+ @staticmethod
+ def _getattr_property(obj, attrname):
+ """Property-aware getattr to use in object finding.
+
+ If attrname represents a property, return it unevaluated (in case it has
+ side effects or raises an error.
+
+ """
+ if not isinstance(obj, type):
+ try:
+ # `getattr(type(obj), attrname)` is not guaranteed to return
+ # `obj`, but does so for property:
+ #
+ # property.__get__(self, None, cls) -> self
+ #
+ # The universal alternative is to traverse the mro manually
+ # searching for attrname in class dicts.
+ attr = getattr(type(obj), attrname)
+ except AttributeError:
+ pass
+ else:
+ # This relies on the fact that data descriptors (with both
+ # __get__ & __set__ magic methods) take precedence over
+ # instance-level attributes:
+ #
+ # class A(object):
+ # @property
+ # def foobar(self): return 123
+ # a = A()
+ # a.__dict__['foobar'] = 345
+ # a.foobar # == 123
+ #
+ # So, a property may be returned right away.
+ if isinstance(attr, property):
+ return attr
+
+ # Nothing helped, fall back.
+ return getattr(obj, attrname)
+
+ def _object_find(self, oname, namespaces=None):
+ """Find an object and return a struct with info about it."""
+ return Struct(self._ofind(oname, namespaces))
+
+ def _inspect(self, meth, oname, namespaces=None, **kw):
+ """Generic interface to the inspector system.
+
This function is meant to be called by pdef, pdoc & friends.
"""
- info = self._object_find(oname, namespaces)
+ info = self._object_find(oname, namespaces)
docformat = sphinxify if self.sphinxify_docstring else None
- if info.found:
- pmethod = getattr(self.inspector, meth)
+ if info.found:
+ pmethod = getattr(self.inspector, meth)
# TODO: only apply format_screen to the plain/text repr of the mime
# bundle.
formatter = format_screen if info.ismagic else docformat
- if meth == 'pdoc':
- pmethod(info.obj, oname, formatter)
- elif meth == 'pinfo':
+ if meth == 'pdoc':
+ pmethod(info.obj, oname, formatter)
+ elif meth == 'pinfo':
pmethod(info.obj, oname, formatter, info,
enable_html_pager=self.enable_html_pager, **kw)
- else:
- pmethod(info.obj, oname)
- else:
- print('Object `%s` not found.' % oname)
- return 'not found' # so callers can take other action
-
- def object_inspect(self, oname, detail_level=0):
- """Get object info about oname"""
- with self.builtin_trap:
- info = self._object_find(oname)
- if info.found:
- return self.inspector.info(info.obj, oname, info=info,
- detail_level=detail_level
- )
- else:
- return oinspect.object_info(name=oname, found=False)
-
- def object_inspect_text(self, oname, detail_level=0):
- """Get object info as formatted text"""
+ else:
+ pmethod(info.obj, oname)
+ else:
+ print('Object `%s` not found.' % oname)
+ return 'not found' # so callers can take other action
+
+ def object_inspect(self, oname, detail_level=0):
+ """Get object info about oname"""
+ with self.builtin_trap:
+ info = self._object_find(oname)
+ if info.found:
+ return self.inspector.info(info.obj, oname, info=info,
+ detail_level=detail_level
+ )
+ else:
+ return oinspect.object_info(name=oname, found=False)
+
+ def object_inspect_text(self, oname, detail_level=0):
+ """Get object info as formatted text"""
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0):
@@ -1561,1345 +1561,1345 @@ class InteractiveShell(SingletonConfigurable):
A mimebundle is a dictionary, keyed by mime-type.
It must always have the key `'text/plain'`.
"""
- with self.builtin_trap:
- info = self._object_find(oname)
- if info.found:
+ with self.builtin_trap:
+ info = self._object_find(oname)
+ if info.found:
return self.inspector._get_info(info.obj, oname, info=info,
- detail_level=detail_level
- )
- else:
- raise KeyError(oname)
-
- #-------------------------------------------------------------------------
- # Things related to history management
- #-------------------------------------------------------------------------
-
- def init_history(self):
- """Sets up the command history, and starts regular autosaves."""
- self.history_manager = HistoryManager(shell=self, parent=self)
- self.configurables.append(self.history_manager)
-
- #-------------------------------------------------------------------------
- # Things related to exception handling and tracebacks (not debugging)
- #-------------------------------------------------------------------------
-
+ detail_level=detail_level
+ )
+ else:
+ raise KeyError(oname)
+
+ #-------------------------------------------------------------------------
+ # Things related to history management
+ #-------------------------------------------------------------------------
+
+ def init_history(self):
+ """Sets up the command history, and starts regular autosaves."""
+ self.history_manager = HistoryManager(shell=self, parent=self)
+ self.configurables.append(self.history_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to exception handling and tracebacks (not debugging)
+ #-------------------------------------------------------------------------
+
debugger_cls = Pdb
- def init_traceback_handlers(self, custom_exceptions):
- # Syntax error handler.
- self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor')
-
- # The interactive one is initialized with an offset, meaning we always
- # want to remove the topmost item in the traceback, which is our own
- # internal code. Valid modes: ['Plain','Context','Verbose']
- self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
- color_scheme='NoColor',
- tb_offset = 1,
+ def init_traceback_handlers(self, custom_exceptions):
+ # Syntax error handler.
+ self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor')
+
+ # The interactive one is initialized with an offset, meaning we always
+ # want to remove the topmost item in the traceback, which is our own
+ # internal code. Valid modes: ['Plain','Context','Verbose']
+ self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
+ color_scheme='NoColor',
+ tb_offset = 1,
check_cache=check_linecache_ipython,
debugger_cls=self.debugger_cls)
-
- # The instance will store a pointer to the system-wide exception hook,
- # so that runtime code (such as magics) can access it. This is because
- # during the read-eval loop, it may get temporarily overwritten.
- self.sys_excepthook = sys.excepthook
-
- # and add any custom exception handlers the user may have specified
- self.set_custom_exc(*custom_exceptions)
-
- # Set the exception mode
- self.InteractiveTB.set_mode(mode=self.xmode)
-
- def set_custom_exc(self, exc_tuple, handler):
+
+ # The instance will store a pointer to the system-wide exception hook,
+ # so that runtime code (such as magics) can access it. This is because
+ # during the read-eval loop, it may get temporarily overwritten.
+ self.sys_excepthook = sys.excepthook
+
+ # and add any custom exception handlers the user may have specified
+ self.set_custom_exc(*custom_exceptions)
+
+ # Set the exception mode
+ self.InteractiveTB.set_mode(mode=self.xmode)
+
+ def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple, handler)
-
- Set a custom exception handler, which will be called if any of the
- exceptions in exc_tuple occur in the mainloop (specifically, in the
- run_code() method).
-
- Parameters
- ----------
-
- exc_tuple : tuple of exception classes
- A *tuple* of exception classes, for which to call the defined
- handler. It is very important that you use a tuple, and NOT A
- LIST here, because of the way Python's except statement works. If
- you only want to trap a single exception, use a singleton tuple::
-
- exc_tuple == (MyCustomException,)
-
- handler : callable
- handler must have the following signature::
-
- def my_handler(self, etype, value, tb, tb_offset=None):
- ...
- return structured_traceback
-
- Your handler must return a structured traceback (a list of strings),
- or None.
-
- This will be made into an instance method (via types.MethodType)
- of IPython itself, and it will be called if any of the exceptions
- listed in the exc_tuple are caught. If the handler is None, an
- internal basic one is used, which just prints basic info.
-
- To protect IPython from crashes, if your handler ever raises an
- exception or returns an invalid result, it will be immediately
- disabled.
-
- WARNING: by putting in your own exception handler into IPython's main
- execution loop, you run a very good chance of nasty crashes. This
- facility should only be used if you really know what you are doing."""
-
- assert type(exc_tuple)==type(()) , \
- "The custom exceptions must be given AS A TUPLE."
-
+
+ Set a custom exception handler, which will be called if any of the
+ exceptions in exc_tuple occur in the mainloop (specifically, in the
+ run_code() method).
+
+ Parameters
+ ----------
+
+ exc_tuple : tuple of exception classes
+ A *tuple* of exception classes, for which to call the defined
+ handler. It is very important that you use a tuple, and NOT A
+ LIST here, because of the way Python's except statement works. If
+ you only want to trap a single exception, use a singleton tuple::
+
+ exc_tuple == (MyCustomException,)
+
+ handler : callable
+ handler must have the following signature::
+
+ def my_handler(self, etype, value, tb, tb_offset=None):
+ ...
+ return structured_traceback
+
+ Your handler must return a structured traceback (a list of strings),
+ or None.
+
+ This will be made into an instance method (via types.MethodType)
+ of IPython itself, and it will be called if any of the exceptions
+ listed in the exc_tuple are caught. If the handler is None, an
+ internal basic one is used, which just prints basic info.
+
+ To protect IPython from crashes, if your handler ever raises an
+ exception or returns an invalid result, it will be immediately
+ disabled.
+
+ WARNING: by putting in your own exception handler into IPython's main
+ execution loop, you run a very good chance of nasty crashes. This
+ facility should only be used if you really know what you are doing."""
+
+ assert type(exc_tuple)==type(()) , \
+ "The custom exceptions must be given AS A TUPLE."
+
def dummy_handler(self, etype, value, tb, tb_offset=None):
- print('*** Simple custom exception handler ***')
- print('Exception type :',etype)
- print('Exception value:',value)
- print('Traceback :',tb)
- #print 'Source code :','\n'.join(self.buffer)
+ print('*** Simple custom exception handler ***')
+ print('Exception type :',etype)
+ print('Exception value:',value)
+ print('Traceback :',tb)
+ #print 'Source code :','\n'.join(self.buffer)
- def validate_stb(stb):
- """validate structured traceback return type
+ def validate_stb(stb):
+ """validate structured traceback return type
- return type of CustomTB *should* be a list of strings, but allow
- single strings or None, which are harmless.
+ return type of CustomTB *should* be a list of strings, but allow
+ single strings or None, which are harmless.
- This function will *always* return a list of strings,
- and will raise a TypeError if stb is inappropriate.
- """
- msg = "CustomTB must return list of strings, not %r" % stb
- if stb is None:
- return []
- elif isinstance(stb, string_types):
- return [stb]
- elif not isinstance(stb, list):
- raise TypeError(msg)
- # it's a list
- for line in stb:
- # check every element
- if not isinstance(line, string_types):
- raise TypeError(msg)
- return stb
-
- if handler is None:
- wrapped = dummy_handler
- else:
- def wrapped(self,etype,value,tb,tb_offset=None):
- """wrap CustomTB handler, to protect IPython from user code
+ This function will *always* return a list of strings,
+ and will raise a TypeError if stb is inappropriate.
+ """
+ msg = "CustomTB must return list of strings, not %r" % stb
+ if stb is None:
+ return []
+ elif isinstance(stb, string_types):
+ return [stb]
+ elif not isinstance(stb, list):
+ raise TypeError(msg)
+ # it's a list
+ for line in stb:
+ # check every element
+ if not isinstance(line, string_types):
+ raise TypeError(msg)
+ return stb
+
+ if handler is None:
+ wrapped = dummy_handler
+ else:
+ def wrapped(self,etype,value,tb,tb_offset=None):
+ """wrap CustomTB handler, to protect IPython from user code
- This makes it harder (but not impossible) for custom exception
- handlers to crash IPython.
- """
- try:
- stb = handler(self,etype,value,tb,tb_offset=tb_offset)
- return validate_stb(stb)
- except:
- # clear custom handler immediately
- self.set_custom_exc((), None)
+ This makes it harder (but not impossible) for custom exception
+ handlers to crash IPython.
+ """
+ try:
+ stb = handler(self,etype,value,tb,tb_offset=tb_offset)
+ return validate_stb(stb)
+ except:
+ # clear custom handler immediately
+ self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
- # show the exception in handler first
- stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
+ # show the exception in handler first
+ stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
- stb = self.InteractiveTB.structured_traceback(
- (etype,value,tb), tb_offset=tb_offset
- )
- return stb
-
- self.CustomTB = types.MethodType(wrapped,self)
- self.custom_exceptions = exc_tuple
-
- def excepthook(self, etype, value, tb):
- """One more defense for GUI apps that call sys.excepthook.
-
- GUI frameworks like wxPython trap exceptions and call
- sys.excepthook themselves. I guess this is a feature that
- enables them to keep running after exceptions that would
- otherwise kill their mainloop. This is a bother for IPython
- which excepts to catch all of the program exceptions with a try:
- except: statement.
-
- Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
- any app directly invokes sys.excepthook, it will look to the user like
- IPython crashed. In order to work around this, we can disable the
- CrashHandler and replace it with this excepthook instead, which prints a
- regular traceback using our InteractiveTB. In this fashion, apps which
- call sys.excepthook will generate a regular-looking exception from
- IPython, and the CrashHandler will only be triggered by real IPython
- crashes.
-
- This hook should be used sparingly, only in places which are not likely
- to be true IPython errors.
- """
- self.showtraceback((etype, value, tb), tb_offset=0)
-
- def _get_exc_info(self, exc_tuple=None):
- """get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
+ stb = self.InteractiveTB.structured_traceback(
+ (etype,value,tb), tb_offset=tb_offset
+ )
+ return stb
+
+ self.CustomTB = types.MethodType(wrapped,self)
+ self.custom_exceptions = exc_tuple
+
+ def excepthook(self, etype, value, tb):
+ """One more defense for GUI apps that call sys.excepthook.
+
+ GUI frameworks like wxPython trap exceptions and call
+ sys.excepthook themselves. I guess this is a feature that
+ enables them to keep running after exceptions that would
+ otherwise kill their mainloop. This is a bother for IPython
+ which excepts to catch all of the program exceptions with a try:
+ except: statement.
+
+ Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
+ any app directly invokes sys.excepthook, it will look to the user like
+ IPython crashed. In order to work around this, we can disable the
+ CrashHandler and replace it with this excepthook instead, which prints a
+ regular traceback using our InteractiveTB. In this fashion, apps which
+ call sys.excepthook will generate a regular-looking exception from
+ IPython, and the CrashHandler will only be triggered by real IPython
+ crashes.
+
+ This hook should be used sparingly, only in places which are not likely
+ to be true IPython errors.
+ """
+ self.showtraceback((etype, value, tb), tb_offset=0)
+
+ def _get_exc_info(self, exc_tuple=None):
+ """get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
- Ensures sys.last_type,value,traceback hold the exc_info we found,
- from whichever source.
+ Ensures sys.last_type,value,traceback hold the exc_info we found,
+ from whichever source.
- raises ValueError if none of these contain any information
- """
- if exc_tuple is None:
- etype, value, tb = sys.exc_info()
- else:
- etype, value, tb = exc_tuple
-
- if etype is None:
- if hasattr(sys, 'last_type'):
- etype, value, tb = sys.last_type, sys.last_value, \
- sys.last_traceback
+ raises ValueError if none of these contain any information
+ """
+ if exc_tuple is None:
+ etype, value, tb = sys.exc_info()
+ else:
+ etype, value, tb = exc_tuple
+
+ if etype is None:
+ if hasattr(sys, 'last_type'):
+ etype, value, tb = sys.last_type, sys.last_value, \
+ sys.last_traceback
- if etype is None:
- raise ValueError("No exception to find")
+ if etype is None:
+ raise ValueError("No exception to find")
- # Now store the exception info in sys.last_type etc.
- # WARNING: these variables are somewhat deprecated and not
- # necessarily safe to use in a threaded environment, but tools
- # like pdb depend on their existence, so let's set them. If we
- # find problems in the field, we'll need to revisit their use.
- sys.last_type = etype
- sys.last_value = value
- sys.last_traceback = tb
+ # Now store the exception info in sys.last_type etc.
+ # WARNING: these variables are somewhat deprecated and not
+ # necessarily safe to use in a threaded environment, but tools
+ # like pdb depend on their existence, so let's set them. If we
+ # find problems in the field, we'll need to revisit their use.
+ sys.last_type = etype
+ sys.last_value = value
+ sys.last_traceback = tb
- return etype, value, tb
+ return etype, value, tb
- def show_usage_error(self, exc):
- """Show a short message for UsageErrors
+ def show_usage_error(self, exc):
+ """Show a short message for UsageErrors
- These are special exceptions that shouldn't show a traceback.
- """
+ These are special exceptions that shouldn't show a traceback.
+ """
print("UsageError: %s" % exc, file=sys.stderr)
- def get_exception_only(self, exc_tuple=None):
- """
- Return as a string (ending with a newline) the exception that
- just occurred, without any traceback.
- """
- etype, value, tb = self._get_exc_info(exc_tuple)
- msg = traceback.format_exception_only(etype, value)
- return ''.join(msg)
-
- def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
- exception_only=False):
- """Display the exception that just occurred.
-
- If nothing is known about the exception, this is the method which
- should be used throughout the code for presenting user tracebacks,
- rather than directly invoking the InteractiveTB object.
-
- A specific showsyntaxerror() also exists, but this method can take
- care of calling it if needed, so unless you are explicitly catching a
- SyntaxError exception, don't try to analyze the stack manually and
- simply call this method."""
-
- try:
- try:
- etype, value, tb = self._get_exc_info(exc_tuple)
- except ValueError:
+ def get_exception_only(self, exc_tuple=None):
+ """
+ Return as a string (ending with a newline) the exception that
+ just occurred, without any traceback.
+ """
+ etype, value, tb = self._get_exc_info(exc_tuple)
+ msg = traceback.format_exception_only(etype, value)
+ return ''.join(msg)
+
+ def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
+ exception_only=False):
+ """Display the exception that just occurred.
+
+ If nothing is known about the exception, this is the method which
+ should be used throughout the code for presenting user tracebacks,
+ rather than directly invoking the InteractiveTB object.
+
+ A specific showsyntaxerror() also exists, but this method can take
+ care of calling it if needed, so unless you are explicitly catching a
+ SyntaxError exception, don't try to analyze the stack manually and
+ simply call this method."""
+
+ try:
+ try:
+ etype, value, tb = self._get_exc_info(exc_tuple)
+ except ValueError:
print('No traceback available to show.', file=sys.stderr)
- return
+ return
- if issubclass(etype, SyntaxError):
- # Though this won't be called by syntax errors in the input
- # line, there may be SyntaxError cases with imported code.
- self.showsyntaxerror(filename)
- elif etype is UsageError:
- self.show_usage_error(value)
- else:
- if exception_only:
- stb = ['An exception has occurred, use %tb to see '
- 'the full traceback.\n']
- stb.extend(self.InteractiveTB.get_exception_only(etype,
- value))
- else:
- try:
- # Exception classes can customise their traceback - we
- # use this in IPython.parallel for exceptions occurring
- # in the engines. This should return a list of strings.
- stb = value._render_traceback_()
- except Exception:
- stb = self.InteractiveTB.structured_traceback(etype,
- value, tb, tb_offset=tb_offset)
-
- self._showtraceback(etype, value, stb)
- if self.call_pdb:
- # drop into debugger
- self.debugger(force=True)
- return
-
- # Actually show the traceback
- self._showtraceback(etype, value, stb)
-
- except KeyboardInterrupt:
+ if issubclass(etype, SyntaxError):
+ # Though this won't be called by syntax errors in the input
+ # line, there may be SyntaxError cases with imported code.
+ self.showsyntaxerror(filename)
+ elif etype is UsageError:
+ self.show_usage_error(value)
+ else:
+ if exception_only:
+ stb = ['An exception has occurred, use %tb to see '
+ 'the full traceback.\n']
+ stb.extend(self.InteractiveTB.get_exception_only(etype,
+ value))
+ else:
+ try:
+ # Exception classes can customise their traceback - we
+ # use this in IPython.parallel for exceptions occurring
+ # in the engines. This should return a list of strings.
+ stb = value._render_traceback_()
+ except Exception:
+ stb = self.InteractiveTB.structured_traceback(etype,
+ value, tb, tb_offset=tb_offset)
+
+ self._showtraceback(etype, value, stb)
+ if self.call_pdb:
+ # drop into debugger
+ self.debugger(force=True)
+ return
+
+ # Actually show the traceback
+ self._showtraceback(etype, value, stb)
+
+ except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
-
- def _showtraceback(self, etype, evalue, stb):
- """Actually show a traceback.
-
- Subclasses may override this method to put the traceback on a different
- place, like a side channel.
- """
+
+ def _showtraceback(self, etype, evalue, stb):
+ """Actually show a traceback.
+
+ Subclasses may override this method to put the traceback on a different
+ place, like a side channel.
+ """
print(self.InteractiveTB.stb2text(stb))
-
- def showsyntaxerror(self, filename=None):
- """Display the syntax error that just occurred.
-
- This doesn't display a stack trace because there isn't one.
-
- If a filename is given, it is stuffed in the exception instead
- of what was there before (because Python's parser always uses
- "<string>" when reading from a string).
- """
- etype, value, last_traceback = self._get_exc_info()
-
- if filename and issubclass(etype, SyntaxError):
- try:
- value.filename = filename
- except:
- # Not the format we expect; leave it alone
- pass
+
+ def showsyntaxerror(self, filename=None):
+ """Display the syntax error that just occurred.
+
+ This doesn't display a stack trace because there isn't one.
+
+ If a filename is given, it is stuffed in the exception instead
+ of what was there before (because Python's parser always uses
+ "<string>" when reading from a string).
+ """
+ etype, value, last_traceback = self._get_exc_info()
+
+ if filename and issubclass(etype, SyntaxError):
+ try:
+ value.filename = filename
+ except:
+ # Not the format we expect; leave it alone
+ pass
- stb = self.SyntaxTB.structured_traceback(etype, value, [])
- self._showtraceback(etype, value, stb)
-
- # This is overridden in TerminalInteractiveShell to show a message about
- # the %paste magic.
- def showindentationerror(self):
- """Called by run_cell when there's an IndentationError in code entered
- at the prompt.
-
- This is overridden in TerminalInteractiveShell to show a message about
- the %paste magic."""
- self.showsyntaxerror()
-
- #-------------------------------------------------------------------------
- # Things related to readline
- #-------------------------------------------------------------------------
-
- def init_readline(self):
+ stb = self.SyntaxTB.structured_traceback(etype, value, [])
+ self._showtraceback(etype, value, stb)
+
+ # This is overridden in TerminalInteractiveShell to show a message about
+ # the %paste magic.
+ def showindentationerror(self):
+ """Called by run_cell when there's an IndentationError in code entered
+ at the prompt.
+
+ This is overridden in TerminalInteractiveShell to show a message about
+ the %paste magic."""
+ self.showsyntaxerror()
+
+ #-------------------------------------------------------------------------
+ # Things related to readline
+ #-------------------------------------------------------------------------
+
+ def init_readline(self):
"""DEPRECATED
Moved to terminal subclass, here only to simplify the init logic."""
- # Set a number of methods that depend on readline to be no-op
+ # Set a number of methods that depend on readline to be no-op
warnings.warn('`init_readline` is no-op since IPython 5.0 and is Deprecated',
DeprecationWarning, stacklevel=2)
- self.set_custom_completer = no_op
-
- @skip_doctest
- def set_next_input(self, s, replace=False):
- """ Sets the 'default' input string for the next command line.
-
- Example::
-
- In [1]: _ip.set_next_input("Hello Word")
- In [2]: Hello Word_ # cursor is here
- """
- self.rl_next_input = py3compat.cast_bytes_py2(s)
-
- def _indent_current_str(self):
- """return the current level of indentation as a string"""
- return self.input_splitter.indent_spaces * ' '
-
- #-------------------------------------------------------------------------
- # Things related to text completion
- #-------------------------------------------------------------------------
-
- def init_completer(self):
- """Initialize the completion machinery.
-
- This creates completion machinery that can be used by client code,
- either interactively in-process (typically triggered by the readline
- library), programmatically (such as in test suites) or out-of-process
- (typically over the network by remote frontends).
- """
- from IPython.core.completer import IPCompleter
- from IPython.core.completerlib import (module_completer,
- magic_run_completer, cd_completer, reset_completer)
-
- self.Completer = IPCompleter(shell=self,
- namespace=self.user_ns,
- global_namespace=self.user_global_ns,
+ self.set_custom_completer = no_op
+
+ @skip_doctest
+ def set_next_input(self, s, replace=False):
+ """ Sets the 'default' input string for the next command line.
+
+ Example::
+
+ In [1]: _ip.set_next_input("Hello Word")
+ In [2]: Hello Word_ # cursor is here
+ """
+ self.rl_next_input = py3compat.cast_bytes_py2(s)
+
+ def _indent_current_str(self):
+ """return the current level of indentation as a string"""
+ return self.input_splitter.indent_spaces * ' '
+
+ #-------------------------------------------------------------------------
+ # Things related to text completion
+ #-------------------------------------------------------------------------
+
+ def init_completer(self):
+ """Initialize the completion machinery.
+
+ This creates completion machinery that can be used by client code,
+ either interactively in-process (typically triggered by the readline
+ library), programmatically (such as in test suites) or out-of-process
+ (typically over the network by remote frontends).
+ """
+ from IPython.core.completer import IPCompleter
+ from IPython.core.completerlib import (module_completer,
+ magic_run_completer, cd_completer, reset_completer)
+
+ self.Completer = IPCompleter(shell=self,
+ namespace=self.user_ns,
+ global_namespace=self.user_global_ns,
use_readline=False,
- parent=self,
- )
- self.configurables.append(self.Completer)
-
- # Add custom completers to the basic ones built into IPCompleter
- sdisp = self.strdispatchers.get('complete_command', StrDispatch())
- self.strdispatchers['complete_command'] = sdisp
- self.Completer.custom_completers = sdisp
-
- self.set_hook('complete_command', module_completer, str_key = 'import')
- self.set_hook('complete_command', module_completer, str_key = 'from')
- self.set_hook('complete_command', module_completer, str_key = '%aimport')
- self.set_hook('complete_command', magic_run_completer, str_key = '%run')
- self.set_hook('complete_command', cd_completer, str_key = '%cd')
- self.set_hook('complete_command', reset_completer, str_key = '%reset')
-
-
+ parent=self,
+ )
+ self.configurables.append(self.Completer)
+
+ # Add custom completers to the basic ones built into IPCompleter
+ sdisp = self.strdispatchers.get('complete_command', StrDispatch())
+ self.strdispatchers['complete_command'] = sdisp
+ self.Completer.custom_completers = sdisp
+
+ self.set_hook('complete_command', module_completer, str_key = 'import')
+ self.set_hook('complete_command', module_completer, str_key = 'from')
+ self.set_hook('complete_command', module_completer, str_key = '%aimport')
+ self.set_hook('complete_command', magic_run_completer, str_key = '%run')
+ self.set_hook('complete_command', cd_completer, str_key = '%cd')
+ self.set_hook('complete_command', reset_completer, str_key = '%reset')
+
+
@skip_doctest_py2
- def complete(self, text, line=None, cursor_pos=None):
- """Return the completed text and a list of completions.
-
- Parameters
- ----------
-
- text : string
- A string of text to be completed on. It can be given as empty and
- instead a line/position pair are given. In this case, the
- completer itself will split the line like readline does.
-
- line : string, optional
- The complete line that text is part of.
-
- cursor_pos : int, optional
- The position of the cursor on the input line.
-
- Returns
- -------
- text : string
- The actual text that was completed.
-
- matches : list
- A sorted list with all possible completions.
-
- The optional arguments allow the completion to take more context into
- account, and are part of the low-level completion API.
-
- This is a wrapper around the completion mechanism, similar to what
- readline does at the command line when the TAB key is hit. By
- exposing it as a method, it can be used by other non-readline
- environments (such as GUIs) for text completion.
-
- Simple usage example:
-
- In [1]: x = 'hello'
-
- In [2]: _ip.complete('x.l')
- Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
- """
-
- # Inject names into __builtin__ so we can complete on the added names.
- with self.builtin_trap:
- return self.Completer.complete(text, line, cursor_pos)
-
- def set_custom_completer(self, completer, pos=0):
- """Adds a new custom completer function.
-
- The position argument (defaults to 0) is the index in the completers
- list where you want the completer to be inserted."""
-
- newcomp = types.MethodType(completer,self.Completer)
- self.Completer.matchers.insert(pos,newcomp)
-
- def set_completer_frame(self, frame=None):
- """Set the frame of the completer."""
- if frame:
- self.Completer.namespace = frame.f_locals
- self.Completer.global_namespace = frame.f_globals
- else:
- self.Completer.namespace = self.user_ns
- self.Completer.global_namespace = self.user_global_ns
-
- #-------------------------------------------------------------------------
- # Things related to magics
- #-------------------------------------------------------------------------
-
- def init_magics(self):
- from IPython.core import magics as m
- self.magics_manager = magic.MagicsManager(shell=self,
- parent=self,
- user_magics=m.UserMagics(self))
- self.configurables.append(self.magics_manager)
-
- # Expose as public API from the magics manager
- self.register_magics = self.magics_manager.register
-
- self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
+ def complete(self, text, line=None, cursor_pos=None):
+ """Return the completed text and a list of completions.
+
+ Parameters
+ ----------
+
+ text : string
+ A string of text to be completed on. It can be given as empty and
+ instead a line/position pair are given. In this case, the
+ completer itself will split the line like readline does.
+
+ line : string, optional
+ The complete line that text is part of.
+
+ cursor_pos : int, optional
+ The position of the cursor on the input line.
+
+ Returns
+ -------
+ text : string
+ The actual text that was completed.
+
+ matches : list
+ A sorted list with all possible completions.
+
+ The optional arguments allow the completion to take more context into
+ account, and are part of the low-level completion API.
+
+ This is a wrapper around the completion mechanism, similar to what
+ readline does at the command line when the TAB key is hit. By
+ exposing it as a method, it can be used by other non-readline
+ environments (such as GUIs) for text completion.
+
+ Simple usage example:
+
+ In [1]: x = 'hello'
+
+ In [2]: _ip.complete('x.l')
+ Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
+ """
+
+ # Inject names into __builtin__ so we can complete on the added names.
+ with self.builtin_trap:
+ return self.Completer.complete(text, line, cursor_pos)
+
+ def set_custom_completer(self, completer, pos=0):
+ """Adds a new custom completer function.
+
+ The position argument (defaults to 0) is the index in the completers
+ list where you want the completer to be inserted."""
+
+ newcomp = types.MethodType(completer,self.Completer)
+ self.Completer.matchers.insert(pos,newcomp)
+
+ def set_completer_frame(self, frame=None):
+ """Set the frame of the completer."""
+ if frame:
+ self.Completer.namespace = frame.f_locals
+ self.Completer.global_namespace = frame.f_globals
+ else:
+ self.Completer.namespace = self.user_ns
+ self.Completer.global_namespace = self.user_global_ns
+
+ #-------------------------------------------------------------------------
+ # Things related to magics
+ #-------------------------------------------------------------------------
+
+ def init_magics(self):
+ from IPython.core import magics as m
+ self.magics_manager = magic.MagicsManager(shell=self,
+ parent=self,
+ user_magics=m.UserMagics(self))
+ self.configurables.append(self.magics_manager)
+
+ # Expose as public API from the magics manager
+ self.register_magics = self.magics_manager.register
+
+ self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
- m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
- m.NamespaceMagics, m.OSMagics, m.PylabMagics, m.ScriptMagics,
- )
-
- # Register Magic Aliases
- mman = self.magics_manager
- # FIXME: magic aliases should be defined by the Magics classes
- # or in MagicsManager, not here
- mman.register_alias('ed', 'edit')
- mman.register_alias('hist', 'history')
- mman.register_alias('rep', 'recall')
- mman.register_alias('SVG', 'svg', 'cell')
- mman.register_alias('HTML', 'html', 'cell')
- mman.register_alias('file', 'writefile', 'cell')
-
- # FIXME: Move the color initialization to the DisplayHook, which
- # should be split into a prompt manager and displayhook. We probably
- # even need a centralize colors management object.
- self.magic('colors %s' % self.colors)
+ m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
+ m.NamespaceMagics, m.OSMagics, m.PylabMagics, m.ScriptMagics,
+ )
+
+ # Register Magic Aliases
+ mman = self.magics_manager
+ # FIXME: magic aliases should be defined by the Magics classes
+ # or in MagicsManager, not here
+ mman.register_alias('ed', 'edit')
+ mman.register_alias('hist', 'history')
+ mman.register_alias('rep', 'recall')
+ mman.register_alias('SVG', 'svg', 'cell')
+ mman.register_alias('HTML', 'html', 'cell')
+ mman.register_alias('file', 'writefile', 'cell')
+
+ # FIXME: Move the color initialization to the DisplayHook, which
+ # should be split into a prompt manager and displayhook. We probably
+ # even need a centralize colors management object.
+ self.magic('colors %s' % self.colors)
- # Defined here so that it's included in the documentation
- @functools.wraps(magic.MagicsManager.register_function)
- def register_magic_function(self, func, magic_kind='line', magic_name=None):
+ # Defined here so that it's included in the documentation
+ @functools.wraps(magic.MagicsManager.register_function)
+ def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(func,
- magic_kind=magic_kind, magic_name=magic_name)
-
- def run_line_magic(self, magic_name, line):
- """Execute the given line magic.
-
- Parameters
- ----------
- magic_name : str
- Name of the desired magic function, without '%' prefix.
-
- line : str
- The rest of the input line as a single string.
- """
- fn = self.find_line_magic(magic_name)
- if fn is None:
- cm = self.find_cell_magic(magic_name)
- etpl = "Line magic function `%%%s` not found%s."
- extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
- 'did you mean that instead?)' % magic_name )
+ magic_kind=magic_kind, magic_name=magic_name)
+
+ def run_line_magic(self, magic_name, line):
+ """Execute the given line magic.
+
+ Parameters
+ ----------
+ magic_name : str
+ Name of the desired magic function, without '%' prefix.
+
+ line : str
+ The rest of the input line as a single string.
+ """
+ fn = self.find_line_magic(magic_name)
+ if fn is None:
+ cm = self.find_cell_magic(magic_name)
+ etpl = "Line magic function `%%%s` not found%s."
+ extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
+ 'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
- else:
- # Note: this is the distance in the stack to the user's frame.
- # This will need to be updated if the internal calling logic gets
- # refactored, or else we'll be expanding the wrong variables.
- stack_depth = 2
- magic_arg_s = self.var_expand(line, stack_depth)
- # Put magic args in a list so we can call with f(*a) syntax
- args = [magic_arg_s]
- kwargs = {}
- # Grab local namespace if we need it:
- if getattr(fn, "needs_local_scope", False):
- kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
- with self.builtin_trap:
- result = fn(*args,**kwargs)
- return result
-
- def run_cell_magic(self, magic_name, line, cell):
- """Execute the given cell magic.
+ else:
+ # Note: this is the distance in the stack to the user's frame.
+ # This will need to be updated if the internal calling logic gets
+ # refactored, or else we'll be expanding the wrong variables.
+ stack_depth = 2
+ magic_arg_s = self.var_expand(line, stack_depth)
+ # Put magic args in a list so we can call with f(*a) syntax
+ args = [magic_arg_s]
+ kwargs = {}
+ # Grab local namespace if we need it:
+ if getattr(fn, "needs_local_scope", False):
+ kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
+ with self.builtin_trap:
+ result = fn(*args,**kwargs)
+ return result
+
+ def run_cell_magic(self, magic_name, line, cell):
+ """Execute the given cell magic.
- Parameters
- ----------
- magic_name : str
- Name of the desired magic function, without '%' prefix.
-
- line : str
- The rest of the first input line as a single string.
-
- cell : str
- The body of the cell as a (possibly multiline) string.
- """
- fn = self.find_cell_magic(magic_name)
- if fn is None:
- lm = self.find_line_magic(magic_name)
- etpl = "Cell magic `%%{0}` not found{1}."
- extra = '' if lm is None else (' (But line magic `%{0}` exists, '
- 'did you mean that instead?)'.format(magic_name))
+ Parameters
+ ----------
+ magic_name : str
+ Name of the desired magic function, without '%' prefix.
+
+ line : str
+ The rest of the first input line as a single string.
+
+ cell : str
+ The body of the cell as a (possibly multiline) string.
+ """
+ fn = self.find_cell_magic(magic_name)
+ if fn is None:
+ lm = self.find_line_magic(magic_name)
+ etpl = "Cell magic `%%{0}` not found{1}."
+ extra = '' if lm is None else (' (But line magic `%{0}` exists, '
+ 'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
- elif cell == '':
- message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
- if self.find_line_magic(magic_name) is not None:
- message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
- raise UsageError(message)
- else:
- # Note: this is the distance in the stack to the user's frame.
- # This will need to be updated if the internal calling logic gets
- # refactored, or else we'll be expanding the wrong variables.
- stack_depth = 2
- magic_arg_s = self.var_expand(line, stack_depth)
- with self.builtin_trap:
- result = fn(magic_arg_s, cell)
- return result
-
- def find_line_magic(self, magic_name):
- """Find and return a line magic by name.
-
- Returns None if the magic isn't found."""
- return self.magics_manager.magics['line'].get(magic_name)
-
- def find_cell_magic(self, magic_name):
- """Find and return a cell magic by name.
-
- Returns None if the magic isn't found."""
- return self.magics_manager.magics['cell'].get(magic_name)
-
- def find_magic(self, magic_name, magic_kind='line'):
- """Find and return a magic of the given type by name.
-
- Returns None if the magic isn't found."""
- return self.magics_manager.magics[magic_kind].get(magic_name)
-
- def magic(self, arg_s):
- """DEPRECATED. Use run_line_magic() instead.
-
- Call a magic function by name.
-
- Input: a string containing the name of the magic function to call and
- any additional arguments to be passed to the magic.
-
- magic('name -opt foo bar') is equivalent to typing at the ipython
- prompt:
-
- In[1]: %name -opt foo bar
-
- To call a magic without arguments, simply use magic('name').
-
- This provides a proper Python function to call IPython's magics in any
- valid Python code you can type at the interpreter, including loops and
- compound statements.
- """
- # TODO: should we issue a loud deprecation warning here?
- magic_name, _, magic_arg_s = arg_s.partition(' ')
- magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
- return self.run_line_magic(magic_name, magic_arg_s)
-
- #-------------------------------------------------------------------------
- # Things related to macros
- #-------------------------------------------------------------------------
-
- def define_macro(self, name, themacro):
- """Define a new macro
-
- Parameters
- ----------
- name : str
- The name of the macro.
- themacro : str or Macro
- The action to do upon invoking the macro. If a string, a new
- Macro object is created by passing the string to it.
- """
-
- from IPython.core import macro
-
- if isinstance(themacro, string_types):
- themacro = macro.Macro(themacro)
- if not isinstance(themacro, macro.Macro):
- raise ValueError('A macro must be a string or a Macro instance.')
- self.user_ns[name] = themacro
-
- #-------------------------------------------------------------------------
- # Things related to the running of system commands
- #-------------------------------------------------------------------------
-
- def system_piped(self, cmd):
- """Call the given cmd in a subprocess, piping stdout/err
-
- Parameters
- ----------
- cmd : str
- Command to execute (can not end in '&', as background processes are
- not supported. Should not be a command that expects input
- other than simple text.
- """
- if cmd.rstrip().endswith('&'):
- # this is *far* from a rigorous test
- # We do not support backgrounding processes because we either use
- # pexpect or pipes to read from. Users can always just call
- # os.system() or use ip.system=ip.system_raw
- # if they really want a background process.
- raise OSError("Background processes not supported.")
-
- # we explicitly do NOT return the subprocess status code, because
- # a non-None value would trigger :func:`sys.displayhook` calls.
- # Instead, we store the exit_code in user_ns.
- self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
-
- def system_raw(self, cmd):
- """Call the given cmd in a subprocess using os.system on Windows or
- subprocess.call using the system shell on other platforms.
-
- Parameters
- ----------
- cmd : str
- Command to execute.
- """
- cmd = self.var_expand(cmd, depth=1)
- # protect os.system from UNC paths on Windows, which it can't handle:
- if sys.platform == 'win32':
- from IPython.utils._process_win32 import AvoidUNCPath
- with AvoidUNCPath() as path:
- if path is not None:
- cmd = '"pushd %s &&"%s' % (path, cmd)
- cmd = py3compat.unicode_to_str(cmd)
- try:
- ec = os.system(cmd)
- except KeyboardInterrupt:
+ elif cell == '':
+ message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
+ if self.find_line_magic(magic_name) is not None:
+ message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
+ raise UsageError(message)
+ else:
+ # Note: this is the distance in the stack to the user's frame.
+ # This will need to be updated if the internal calling logic gets
+ # refactored, or else we'll be expanding the wrong variables.
+ stack_depth = 2
+ magic_arg_s = self.var_expand(line, stack_depth)
+ with self.builtin_trap:
+ result = fn(magic_arg_s, cell)
+ return result
+
+ def find_line_magic(self, magic_name):
+ """Find and return a line magic by name.
+
+ Returns None if the magic isn't found."""
+ return self.magics_manager.magics['line'].get(magic_name)
+
+ def find_cell_magic(self, magic_name):
+ """Find and return a cell magic by name.
+
+ Returns None if the magic isn't found."""
+ return self.magics_manager.magics['cell'].get(magic_name)
+
+ def find_magic(self, magic_name, magic_kind='line'):
+ """Find and return a magic of the given type by name.
+
+ Returns None if the magic isn't found."""
+ return self.magics_manager.magics[magic_kind].get(magic_name)
+
+ def magic(self, arg_s):
+ """DEPRECATED. Use run_line_magic() instead.
+
+ Call a magic function by name.
+
+ Input: a string containing the name of the magic function to call and
+ any additional arguments to be passed to the magic.
+
+ magic('name -opt foo bar') is equivalent to typing at the ipython
+ prompt:
+
+ In[1]: %name -opt foo bar
+
+ To call a magic without arguments, simply use magic('name').
+
+ This provides a proper Python function to call IPython's magics in any
+ valid Python code you can type at the interpreter, including loops and
+ compound statements.
+ """
+ # TODO: should we issue a loud deprecation warning here?
+ magic_name, _, magic_arg_s = arg_s.partition(' ')
+ magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
+ return self.run_line_magic(magic_name, magic_arg_s)
+
+ #-------------------------------------------------------------------------
+ # Things related to macros
+ #-------------------------------------------------------------------------
+
+ def define_macro(self, name, themacro):
+ """Define a new macro
+
+ Parameters
+ ----------
+ name : str
+ The name of the macro.
+ themacro : str or Macro
+ The action to do upon invoking the macro. If a string, a new
+ Macro object is created by passing the string to it.
+ """
+
+ from IPython.core import macro
+
+ if isinstance(themacro, string_types):
+ themacro = macro.Macro(themacro)
+ if not isinstance(themacro, macro.Macro):
+ raise ValueError('A macro must be a string or a Macro instance.')
+ self.user_ns[name] = themacro
+
+ #-------------------------------------------------------------------------
+ # Things related to the running of system commands
+ #-------------------------------------------------------------------------
+
+ def system_piped(self, cmd):
+ """Call the given cmd in a subprocess, piping stdout/err
+
+ Parameters
+ ----------
+ cmd : str
+ Command to execute (can not end in '&', as background processes are
+ not supported. Should not be a command that expects input
+ other than simple text.
+ """
+ if cmd.rstrip().endswith('&'):
+ # this is *far* from a rigorous test
+ # We do not support backgrounding processes because we either use
+ # pexpect or pipes to read from. Users can always just call
+ # os.system() or use ip.system=ip.system_raw
+ # if they really want a background process.
+ raise OSError("Background processes not supported.")
+
+ # we explicitly do NOT return the subprocess status code, because
+ # a non-None value would trigger :func:`sys.displayhook` calls.
+ # Instead, we store the exit_code in user_ns.
+ self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
+
+ def system_raw(self, cmd):
+ """Call the given cmd in a subprocess using os.system on Windows or
+ subprocess.call using the system shell on other platforms.
+
+ Parameters
+ ----------
+ cmd : str
+ Command to execute.
+ """
+ cmd = self.var_expand(cmd, depth=1)
+ # protect os.system from UNC paths on Windows, which it can't handle:
+ if sys.platform == 'win32':
+ from IPython.utils._process_win32 import AvoidUNCPath
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ cmd = py3compat.unicode_to_str(cmd)
+ try:
+ ec = os.system(cmd)
+ except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
- ec = -2
- else:
- cmd = py3compat.unicode_to_str(cmd)
- # For posix the result of the subprocess.call() below is an exit
- # code, which by convention is zero for success, positive for
- # program failure. Exit codes above 128 are reserved for signals,
- # and the formula for converting a signal to an exit code is usually
- # signal_number+128. To more easily differentiate between exit
- # codes and signals, ipython uses negative numbers. For instance
- # since control-c is signal 2 but exit code 130, ipython's
- # _exit_code variable will read -2. Note that some shells like
- # csh and fish don't follow sh/bash conventions for exit codes.
- executable = os.environ.get('SHELL', None)
- try:
- # Use env shell instead of default /bin/sh
- ec = subprocess.call(cmd, shell=True, executable=executable)
- except KeyboardInterrupt:
- # intercept control-C; a long traceback is not useful here
+ ec = -2
+ else:
+ cmd = py3compat.unicode_to_str(cmd)
+ # For posix the result of the subprocess.call() below is an exit
+ # code, which by convention is zero for success, positive for
+ # program failure. Exit codes above 128 are reserved for signals,
+ # and the formula for converting a signal to an exit code is usually
+ # signal_number+128. To more easily differentiate between exit
+ # codes and signals, ipython uses negative numbers. For instance
+ # since control-c is signal 2 but exit code 130, ipython's
+ # _exit_code variable will read -2. Note that some shells like
+ # csh and fish don't follow sh/bash conventions for exit codes.
+ executable = os.environ.get('SHELL', None)
+ try:
+ # Use env shell instead of default /bin/sh
+ ec = subprocess.call(cmd, shell=True, executable=executable)
+ except KeyboardInterrupt:
+ # intercept control-C; a long traceback is not useful here
print('\n' + self.get_exception_only(), file=sys.stderr)
- ec = 130
- if ec > 128:
- ec = -(ec - 128)
+ ec = 130
+ if ec > 128:
+ ec = -(ec - 128)
- # We explicitly do NOT return the subprocess status code, because
- # a non-None value would trigger :func:`sys.displayhook` calls.
- # Instead, we store the exit_code in user_ns. Note the semantics
- # of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
- # but raising SystemExit(_exit_code) will give status 254!
- self.user_ns['_exit_code'] = ec
-
- # use piped system by default, because it is better behaved
- system = system_piped
-
- def getoutput(self, cmd, split=True, depth=0):
- """Get output (possibly including stderr) from a subprocess.
-
- Parameters
- ----------
- cmd : str
- Command to execute (can not end in '&', as background processes are
- not supported.
- split : bool, optional
- If True, split the output into an IPython SList. Otherwise, an
- IPython LSString is returned. These are objects similar to normal
- lists and strings, with a few convenience attributes for easier
- manipulation of line-based output. You can use '?' on them for
- details.
- depth : int, optional
- How many frames above the caller are the local variables which should
- be expanded in the command string? The default (0) assumes that the
- expansion variables are in the stack frame calling this function.
- """
- if cmd.rstrip().endswith('&'):
- # this is *far* from a rigorous test
- raise OSError("Background processes not supported.")
- out = getoutput(self.var_expand(cmd, depth=depth+1))
- if split:
- out = SList(out.splitlines())
- else:
- out = LSString(out)
- return out
-
- #-------------------------------------------------------------------------
- # Things related to aliases
- #-------------------------------------------------------------------------
-
- def init_alias(self):
- self.alias_manager = AliasManager(shell=self, parent=self)
- self.configurables.append(self.alias_manager)
-
- #-------------------------------------------------------------------------
- # Things related to extensions
- #-------------------------------------------------------------------------
-
- def init_extension_manager(self):
- self.extension_manager = ExtensionManager(shell=self, parent=self)
- self.configurables.append(self.extension_manager)
-
- #-------------------------------------------------------------------------
- # Things related to payloads
- #-------------------------------------------------------------------------
-
- def init_payload(self):
- self.payload_manager = PayloadManager(parent=self)
- self.configurables.append(self.payload_manager)
+ # We explicitly do NOT return the subprocess status code, because
+ # a non-None value would trigger :func:`sys.displayhook` calls.
+ # Instead, we store the exit_code in user_ns. Note the semantics
+ # of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
+ # but raising SystemExit(_exit_code) will give status 254!
+ self.user_ns['_exit_code'] = ec
+
+ # use piped system by default, because it is better behaved
+ system = system_piped
+
+ def getoutput(self, cmd, split=True, depth=0):
+ """Get output (possibly including stderr) from a subprocess.
+
+ Parameters
+ ----------
+ cmd : str
+ Command to execute (can not end in '&', as background processes are
+ not supported.
+ split : bool, optional
+ If True, split the output into an IPython SList. Otherwise, an
+ IPython LSString is returned. These are objects similar to normal
+ lists and strings, with a few convenience attributes for easier
+ manipulation of line-based output. You can use '?' on them for
+ details.
+ depth : int, optional
+ How many frames above the caller are the local variables which should
+ be expanded in the command string? The default (0) assumes that the
+ expansion variables are in the stack frame calling this function.
+ """
+ if cmd.rstrip().endswith('&'):
+ # this is *far* from a rigorous test
+ raise OSError("Background processes not supported.")
+ out = getoutput(self.var_expand(cmd, depth=depth+1))
+ if split:
+ out = SList(out.splitlines())
+ else:
+ out = LSString(out)
+ return out
+
+ #-------------------------------------------------------------------------
+ # Things related to aliases
+ #-------------------------------------------------------------------------
+
+ def init_alias(self):
+ self.alias_manager = AliasManager(shell=self, parent=self)
+ self.configurables.append(self.alias_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to extensions
+ #-------------------------------------------------------------------------
+
+ def init_extension_manager(self):
+ self.extension_manager = ExtensionManager(shell=self, parent=self)
+ self.configurables.append(self.extension_manager)
+
+ #-------------------------------------------------------------------------
+ # Things related to payloads
+ #-------------------------------------------------------------------------
+
+ def init_payload(self):
+ self.payload_manager = PayloadManager(parent=self)
+ self.configurables.append(self.payload_manager)
- #-------------------------------------------------------------------------
- # Things related to the prefilter
- #-------------------------------------------------------------------------
-
- def init_prefilter(self):
- self.prefilter_manager = PrefilterManager(shell=self, parent=self)
- self.configurables.append(self.prefilter_manager)
- # Ultimately this will be refactored in the new interpreter code, but
- # for now, we should expose the main prefilter method (there's legacy
- # code out there that may rely on this).
- self.prefilter = self.prefilter_manager.prefilter_lines
-
- def auto_rewrite_input(self, cmd):
- """Print to the screen the rewritten form of the user's command.
-
- This shows visual feedback by rewriting input lines that cause
- automatic calling to kick in, like::
-
- /f x
-
- into::
-
- ------> f(x)
-
- after the user's input prompt. This helps the user understand that the
- input line was transformed automatically by IPython.
- """
- if not self.show_rewritten_input:
- return
-
+ #-------------------------------------------------------------------------
+ # Things related to the prefilter
+ #-------------------------------------------------------------------------
+
+ def init_prefilter(self):
+ self.prefilter_manager = PrefilterManager(shell=self, parent=self)
+ self.configurables.append(self.prefilter_manager)
+ # Ultimately this will be refactored in the new interpreter code, but
+ # for now, we should expose the main prefilter method (there's legacy
+ # code out there that may rely on this).
+ self.prefilter = self.prefilter_manager.prefilter_lines
+
+ def auto_rewrite_input(self, cmd):
+ """Print to the screen the rewritten form of the user's command.
+
+ This shows visual feedback by rewriting input lines that cause
+ automatic calling to kick in, like::
+
+ /f x
+
+ into::
+
+ ------> f(x)
+
+ after the user's input prompt. This helps the user understand that the
+ input line was transformed automatically by IPython.
+ """
+ if not self.show_rewritten_input:
+ return
+
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
-
- #-------------------------------------------------------------------------
- # Things related to extracting values/expressions from kernel and user_ns
- #-------------------------------------------------------------------------
-
- def _user_obj_error(self):
- """return simple exception dict
+
+ #-------------------------------------------------------------------------
+ # Things related to extracting values/expressions from kernel and user_ns
+ #-------------------------------------------------------------------------
+
+ def _user_obj_error(self):
+ """return simple exception dict
- for use in user_expressions
- """
+ for use in user_expressions
+ """
- etype, evalue, tb = self._get_exc_info()
- stb = self.InteractiveTB.get_exception_only(etype, evalue)
+ etype, evalue, tb = self._get_exc_info()
+ stb = self.InteractiveTB.get_exception_only(etype, evalue)
- exc_info = {
- u'status' : 'error',
- u'traceback' : stb,
- u'ename' : unicode_type(etype.__name__),
- u'evalue' : py3compat.safe_unicode(evalue),
- }
-
- return exc_info
+ exc_info = {
+ u'status' : 'error',
+ u'traceback' : stb,
+ u'ename' : unicode_type(etype.__name__),
+ u'evalue' : py3compat.safe_unicode(evalue),
+ }
+
+ return exc_info
- def _format_user_obj(self, obj):
- """format a user object to display dict
+ def _format_user_obj(self, obj):
+ """format a user object to display dict
- for use in user_expressions
- """
+ for use in user_expressions
+ """
- data, md = self.display_formatter.format(obj)
- value = {
- 'status' : 'ok',
- 'data' : data,
- 'metadata' : md,
- }
- return value
+ data, md = self.display_formatter.format(obj)
+ value = {
+ 'status' : 'ok',
+ 'data' : data,
+ 'metadata' : md,
+ }
+ return value
- def user_expressions(self, expressions):
- """Evaluate a dict of expressions in the user's namespace.
-
- Parameters
- ----------
- expressions : dict
- A dict with string keys and string values. The expression values
- should be valid Python expressions, each of which will be evaluated
- in the user namespace.
-
- Returns
- -------
- A dict, keyed like the input expressions dict, with the rich mime-typed
- display_data of each value.
- """
- out = {}
- user_ns = self.user_ns
- global_ns = self.user_global_ns
+ def user_expressions(self, expressions):
+ """Evaluate a dict of expressions in the user's namespace.
+
+ Parameters
+ ----------
+ expressions : dict
+ A dict with string keys and string values. The expression values
+ should be valid Python expressions, each of which will be evaluated
+ in the user namespace.
+
+ Returns
+ -------
+ A dict, keyed like the input expressions dict, with the rich mime-typed
+ display_data of each value.
+ """
+ out = {}
+ user_ns = self.user_ns
+ global_ns = self.user_global_ns
- for key, expr in iteritems(expressions):
- try:
- value = self._format_user_obj(eval(expr, global_ns, user_ns))
- except:
- value = self._user_obj_error()
- out[key] = value
- return out
-
- #-------------------------------------------------------------------------
- # Things related to the running of code
- #-------------------------------------------------------------------------
-
- def ex(self, cmd):
- """Execute a normal python statement in user namespace."""
- with self.builtin_trap:
- exec(cmd, self.user_global_ns, self.user_ns)
-
- def ev(self, expr):
- """Evaluate python expression expr in user namespace.
-
- Returns the result of evaluation
- """
- with self.builtin_trap:
- return eval(expr, self.user_global_ns, self.user_ns)
-
- def safe_execfile(self, fname, *where, **kw):
- """A safe version of the builtin execfile().
-
- This version will never throw an exception, but instead print
- helpful error messages to the screen. This only works on pure
- Python files with the .py extension.
-
- Parameters
- ----------
- fname : string
- The name of the file to be executed.
- where : tuple
- One or two namespaces, passed to execfile() as (globals,locals).
- If only one is given, it is passed as both.
- exit_ignore : bool (False)
- If True, then silence SystemExit for non-zero status (it is always
- silenced for zero status, as it is so common).
- raise_exceptions : bool (False)
- If True raise exceptions everywhere. Meant for testing.
- shell_futures : bool (False)
- If True, the code will share future statements with the interactive
- shell. It will both be affected by previous __future__ imports, and
- any __future__ imports in the code will affect the shell. If False,
- __future__ imports are not shared in either direction.
-
- """
- kw.setdefault('exit_ignore', False)
- kw.setdefault('raise_exceptions', False)
- kw.setdefault('shell_futures', False)
-
- fname = os.path.abspath(os.path.expanduser(fname))
-
- # Make sure we can open the file
- try:
- with open(fname):
- pass
- except:
- warn('Could not open file <%s> for safe execution.' % fname)
- return
-
- # Find things also in current directory. This is needed to mimic the
- # behavior of running a script from the system command line, where
- # Python inserts the script's directory into sys.path
- dname = os.path.dirname(fname)
-
+ for key, expr in iteritems(expressions):
+ try:
+ value = self._format_user_obj(eval(expr, global_ns, user_ns))
+ except:
+ value = self._user_obj_error()
+ out[key] = value
+ return out
+
+ #-------------------------------------------------------------------------
+ # Things related to the running of code
+ #-------------------------------------------------------------------------
+
+ def ex(self, cmd):
+ """Execute a normal python statement in user namespace."""
+ with self.builtin_trap:
+ exec(cmd, self.user_global_ns, self.user_ns)
+
+ def ev(self, expr):
+ """Evaluate python expression expr in user namespace.
+
+ Returns the result of evaluation
+ """
+ with self.builtin_trap:
+ return eval(expr, self.user_global_ns, self.user_ns)
+
+ def safe_execfile(self, fname, *where, **kw):
+ """A safe version of the builtin execfile().
+
+ This version will never throw an exception, but instead print
+ helpful error messages to the screen. This only works on pure
+ Python files with the .py extension.
+
+ Parameters
+ ----------
+ fname : string
+ The name of the file to be executed.
+ where : tuple
+ One or two namespaces, passed to execfile() as (globals,locals).
+ If only one is given, it is passed as both.
+ exit_ignore : bool (False)
+ If True, then silence SystemExit for non-zero status (it is always
+ silenced for zero status, as it is so common).
+ raise_exceptions : bool (False)
+ If True raise exceptions everywhere. Meant for testing.
+ shell_futures : bool (False)
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+
+ """
+ kw.setdefault('exit_ignore', False)
+ kw.setdefault('raise_exceptions', False)
+ kw.setdefault('shell_futures', False)
+
+ fname = os.path.abspath(os.path.expanduser(fname))
+
+ # Make sure we can open the file
+ try:
+ with open(fname):
+ pass
+ except:
+ warn('Could not open file <%s> for safe execution.' % fname)
+ return
+
+ # Find things also in current directory. This is needed to mimic the
+ # behavior of running a script from the system command line, where
+ # Python inserts the script's directory into sys.path
+ dname = os.path.dirname(fname)
+
with prepended_to_syspath(dname), self.builtin_trap:
- try:
- glob, loc = (where + (None, ))[:2]
- py3compat.execfile(
- fname, glob, loc,
- self.compile if kw['shell_futures'] else None)
- except SystemExit as status:
- # If the call was made with 0 or None exit status (sys.exit(0)
- # or sys.exit() ), don't bother showing a traceback, as both of
- # these are considered normal by the OS:
- # > python -c'import sys;sys.exit(0)'; echo $?
- # 0
- # > python -c'import sys;sys.exit()'; echo $?
- # 0
- # For other exit status, we show the exception unless
- # explicitly silenced, but only in short form.
- if status.code:
- if kw['raise_exceptions']:
- raise
- if not kw['exit_ignore']:
- self.showtraceback(exception_only=True)
- except:
- if kw['raise_exceptions']:
- raise
- # tb offset is 2 because we wrap execfile
- self.showtraceback(tb_offset=2)
-
- def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
- """Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
-
- Parameters
- ----------
- fname : str
- The name of the file to execute. The filename must have a
- .ipy or .ipynb extension.
- shell_futures : bool (False)
- If True, the code will share future statements with the interactive
- shell. It will both be affected by previous __future__ imports, and
- any __future__ imports in the code will affect the shell. If False,
- __future__ imports are not shared in either direction.
- raise_exceptions : bool (False)
- If True raise exceptions everywhere. Meant for testing.
- """
- fname = os.path.abspath(os.path.expanduser(fname))
-
- # Make sure we can open the file
- try:
- with open(fname):
- pass
- except:
- warn('Could not open file <%s> for safe execution.' % fname)
- return
-
- # Find things also in current directory. This is needed to mimic the
- # behavior of running a script from the system command line, where
- # Python inserts the script's directory into sys.path
- dname = os.path.dirname(fname)
+ try:
+ glob, loc = (where + (None, ))[:2]
+ py3compat.execfile(
+ fname, glob, loc,
+ self.compile if kw['shell_futures'] else None)
+ except SystemExit as status:
+ # If the call was made with 0 or None exit status (sys.exit(0)
+ # or sys.exit() ), don't bother showing a traceback, as both of
+ # these are considered normal by the OS:
+ # > python -c'import sys;sys.exit(0)'; echo $?
+ # 0
+ # > python -c'import sys;sys.exit()'; echo $?
+ # 0
+ # For other exit status, we show the exception unless
+ # explicitly silenced, but only in short form.
+ if status.code:
+ if kw['raise_exceptions']:
+ raise
+ if not kw['exit_ignore']:
+ self.showtraceback(exception_only=True)
+ except:
+ if kw['raise_exceptions']:
+ raise
+ # tb offset is 2 because we wrap execfile
+ self.showtraceback(tb_offset=2)
+
+ def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
+ """Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
+
+ Parameters
+ ----------
+ fname : str
+ The name of the file to execute. The filename must have a
+ .ipy or .ipynb extension.
+ shell_futures : bool (False)
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+ raise_exceptions : bool (False)
+ If True raise exceptions everywhere. Meant for testing.
+ """
+ fname = os.path.abspath(os.path.expanduser(fname))
+
+ # Make sure we can open the file
+ try:
+ with open(fname):
+ pass
+ except:
+ warn('Could not open file <%s> for safe execution.' % fname)
+ return
+
+ # Find things also in current directory. This is needed to mimic the
+ # behavior of running a script from the system command line, where
+ # Python inserts the script's directory into sys.path
+ dname = os.path.dirname(fname)
- def get_cells():
- """generator for sequence of code blocks to run"""
- if fname.endswith('.ipynb'):
- from nbformat import read
+ def get_cells():
+ """generator for sequence of code blocks to run"""
+ if fname.endswith('.ipynb'):
+ from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
- else:
- with open(fname) as f:
- yield f.read()
-
- with prepended_to_syspath(dname):
- try:
- for cell in get_cells():
- result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
- if raise_exceptions:
- result.raise_error()
- elif not result.success:
- break
- except:
- if raise_exceptions:
- raise
- self.showtraceback()
- warn('Unknown failure executing file: <%s>' % fname)
-
- def safe_run_module(self, mod_name, where):
- """A safe version of runpy.run_module().
-
- This version will never throw an exception, but instead print
- helpful error messages to the screen.
-
- `SystemExit` exceptions with status code 0 or None are ignored.
-
- Parameters
- ----------
- mod_name : string
- The name of the module to be executed.
- where : dict
- The globals namespace.
- """
- try:
- try:
- where.update(
- runpy.run_module(str(mod_name), run_name="__main__",
- alter_sys=True)
- )
- except SystemExit as status:
- if status.code:
- raise
- except:
- self.showtraceback()
- warn('Unknown failure executing module: <%s>' % mod_name)
-
- def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
- """Run a complete IPython cell.
-
- Parameters
- ----------
- raw_cell : str
- The code (including IPython code such as %magic functions) to run.
- store_history : bool
- If True, the raw and translated cell will be stored in IPython's
- history. For user code calling back into IPython's machinery, this
- should be set to False.
- silent : bool
- If True, avoid side-effects, such as implicit displayhooks and
- and logging. silent=True forces store_history=False.
- shell_futures : bool
- If True, the code will share future statements with the interactive
- shell. It will both be affected by previous __future__ imports, and
- any __future__ imports in the code will affect the shell. If False,
- __future__ imports are not shared in either direction.
-
- Returns
- -------
- result : :class:`ExecutionResult`
- """
- result = ExecutionResult()
-
- if (not raw_cell) or raw_cell.isspace():
+ else:
+ with open(fname) as f:
+ yield f.read()
+
+ with prepended_to_syspath(dname):
+ try:
+ for cell in get_cells():
+ result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
+ if raise_exceptions:
+ result.raise_error()
+ elif not result.success:
+ break
+ except:
+ if raise_exceptions:
+ raise
+ self.showtraceback()
+ warn('Unknown failure executing file: <%s>' % fname)
+
+ def safe_run_module(self, mod_name, where):
+ """A safe version of runpy.run_module().
+
+ This version will never throw an exception, but instead print
+ helpful error messages to the screen.
+
+ `SystemExit` exceptions with status code 0 or None are ignored.
+
+ Parameters
+ ----------
+ mod_name : string
+ The name of the module to be executed.
+ where : dict
+ The globals namespace.
+ """
+ try:
+ try:
+ where.update(
+ runpy.run_module(str(mod_name), run_name="__main__",
+ alter_sys=True)
+ )
+ except SystemExit as status:
+ if status.code:
+ raise
+ except:
+ self.showtraceback()
+ warn('Unknown failure executing module: <%s>' % mod_name)
+
+ def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
+ """Run a complete IPython cell.
+
+ Parameters
+ ----------
+ raw_cell : str
+ The code (including IPython code such as %magic functions) to run.
+ store_history : bool
+ If True, the raw and translated cell will be stored in IPython's
+ history. For user code calling back into IPython's machinery, this
+ should be set to False.
+ silent : bool
+ If True, avoid side-effects, such as implicit displayhooks and
+ and logging. silent=True forces store_history=False.
+ shell_futures : bool
+ If True, the code will share future statements with the interactive
+ shell. It will both be affected by previous __future__ imports, and
+ any __future__ imports in the code will affect the shell. If False,
+ __future__ imports are not shared in either direction.
+
+ Returns
+ -------
+ result : :class:`ExecutionResult`
+ """
+ result = ExecutionResult()
+
+ if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
- return result
+ return result
- if silent:
- store_history = False
-
- if store_history:
- result.execution_count = self.execution_count
-
- def error_before_exec(value):
+ if silent:
+ store_history = False
+
+ if store_history:
+ result.execution_count = self.execution_count
+
+ def error_before_exec(value):
if store_history:
self.execution_count += 1
- result.error_before_exec = value
+ result.error_before_exec = value
self.last_execution_succeeded = False
- return result
-
- self.events.trigger('pre_execute')
- if not silent:
- self.events.trigger('pre_run_cell')
-
- # If any of our input transformation (input_transformer_manager or
- # prefilter_manager) raises an exception, we store it in this variable
- # so that we can display the error after logging the input and storing
- # it in the history.
- preprocessing_exc_tuple = None
- try:
- # Static input transformations
- cell = self.input_transformer_manager.transform_cell(raw_cell)
- except SyntaxError:
- preprocessing_exc_tuple = sys.exc_info()
- cell = raw_cell # cell has to exist so it can be stored/logged
- else:
- if len(cell.splitlines()) == 1:
- # Dynamic transformations - only applied for single line commands
- with self.builtin_trap:
- try:
- # use prefilter_lines to handle trailing newlines
- # restore trailing newline for ast.parse
- cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
- except Exception:
- # don't allow prefilter errors to crash IPython
- preprocessing_exc_tuple = sys.exc_info()
-
- # Store raw and processed history
- if store_history:
- self.history_manager.store_inputs(self.execution_count,
- cell, raw_cell)
- if not silent:
- self.logger.log(cell, raw_cell)
-
- # Display the exception if input processing failed.
- if preprocessing_exc_tuple is not None:
- self.showtraceback(preprocessing_exc_tuple)
- if store_history:
- self.execution_count += 1
- return error_before_exec(preprocessing_exc_tuple[2])
-
- # Our own compiler remembers the __future__ environment. If we want to
- # run code with a separate __future__ environment, use the default
- # compiler
- compiler = self.compile if shell_futures else CachingCompiler()
-
- with self.builtin_trap:
- cell_name = self.compile.cache(cell, self.execution_count)
-
- with self.display_trap:
- # Compile to bytecode
- try:
- code_ast = compiler.ast_parse(cell, filename=cell_name)
+ return result
+
+ self.events.trigger('pre_execute')
+ if not silent:
+ self.events.trigger('pre_run_cell')
+
+ # If any of our input transformation (input_transformer_manager or
+ # prefilter_manager) raises an exception, we store it in this variable
+ # so that we can display the error after logging the input and storing
+ # it in the history.
+ preprocessing_exc_tuple = None
+ try:
+ # Static input transformations
+ cell = self.input_transformer_manager.transform_cell(raw_cell)
+ except SyntaxError:
+ preprocessing_exc_tuple = sys.exc_info()
+ cell = raw_cell # cell has to exist so it can be stored/logged
+ else:
+ if len(cell.splitlines()) == 1:
+ # Dynamic transformations - only applied for single line commands
+ with self.builtin_trap:
+ try:
+ # use prefilter_lines to handle trailing newlines
+ # restore trailing newline for ast.parse
+ cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
+ except Exception:
+ # don't allow prefilter errors to crash IPython
+ preprocessing_exc_tuple = sys.exc_info()
+
+ # Store raw and processed history
+ if store_history:
+ self.history_manager.store_inputs(self.execution_count,
+ cell, raw_cell)
+ if not silent:
+ self.logger.log(cell, raw_cell)
+
+ # Display the exception if input processing failed.
+ if preprocessing_exc_tuple is not None:
+ self.showtraceback(preprocessing_exc_tuple)
+ if store_history:
+ self.execution_count += 1
+ return error_before_exec(preprocessing_exc_tuple[2])
+
+ # Our own compiler remembers the __future__ environment. If we want to
+ # run code with a separate __future__ environment, use the default
+ # compiler
+ compiler = self.compile if shell_futures else CachingCompiler()
+
+ with self.builtin_trap:
+ cell_name = self.compile.cache(cell, self.execution_count)
+
+ with self.display_trap:
+ # Compile to bytecode
+ try:
+ code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
- except IndentationError as e:
- self.showindentationerror()
- return error_before_exec(e)
- except (OverflowError, SyntaxError, ValueError, TypeError,
- MemoryError) as e:
- self.showsyntaxerror()
- return error_before_exec(e)
-
- # Apply AST transformations
- try:
- code_ast = self.transform_ast(code_ast)
- except InputRejected as e:
- self.showtraceback()
- return error_before_exec(e)
-
- # Give the displayhook a reference to our ExecutionResult so it
- # can fill in the output value.
- self.displayhook.exec_result = result
-
- # Execute the user code
- interactivity = "none" if silent else self.ast_node_interactivity
+ except IndentationError as e:
+ self.showindentationerror()
+ return error_before_exec(e)
+ except (OverflowError, SyntaxError, ValueError, TypeError,
+ MemoryError) as e:
+ self.showsyntaxerror()
+ return error_before_exec(e)
+
+ # Apply AST transformations
+ try:
+ code_ast = self.transform_ast(code_ast)
+ except InputRejected as e:
+ self.showtraceback()
+ return error_before_exec(e)
+
+ # Give the displayhook a reference to our ExecutionResult so it
+ # can fill in the output value.
+ self.displayhook.exec_result = result
+
+ # Execute the user code
+ interactivity = "none" if silent else self.ast_node_interactivity
has_raised = self.run_ast_nodes(code_ast.body, cell_name,
- interactivity=interactivity, compiler=compiler, result=result)
+ interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
-
- # Reset this so later displayed values do not modify the
- # ExecutionResult
- self.displayhook.exec_result = None
-
- self.events.trigger('post_execute')
- if not silent:
- self.events.trigger('post_run_cell')
-
- if store_history:
- # Write output to the database. Does nothing unless
- # history output logging is enabled.
- self.history_manager.store_output(self.execution_count)
- # Each cell is a *single* input, regardless of how many lines it has
- self.execution_count += 1
-
- return result
+
+ # Reset this so later displayed values do not modify the
+ # ExecutionResult
+ self.displayhook.exec_result = None
+
+ self.events.trigger('post_execute')
+ if not silent:
+ self.events.trigger('post_run_cell')
+
+ if store_history:
+ # Write output to the database. Does nothing unless
+ # history output logging is enabled.
+ self.history_manager.store_output(self.execution_count)
+ # Each cell is a *single* input, regardless of how many lines it has
+ self.execution_count += 1
+
+ return result
- def transform_ast(self, node):
- """Apply the AST transformations from self.ast_transformers
+ def transform_ast(self, node):
+ """Apply the AST transformations from self.ast_transformers
- Parameters
- ----------
- node : ast.Node
- The root node to be transformed. Typically called with the ast.Module
- produced by parsing user input.
+ Parameters
+ ----------
+ node : ast.Node
+ The root node to be transformed. Typically called with the ast.Module
+ produced by parsing user input.
- Returns
- -------
- An ast.Node corresponding to the node it was called with. Note that it
- may also modify the passed object, so don't rely on references to the
- original AST.
- """
- for transformer in self.ast_transformers:
- try:
- node = transformer.visit(node)
- except InputRejected:
- # User-supplied AST transformers can reject an input by raising
- # an InputRejected. Short-circuit in this case so that we
- # don't unregister the transform.
- raise
- except Exception:
- warn("AST transformer %r threw an error. It will be unregistered." % transformer)
- self.ast_transformers.remove(transformer)
+ Returns
+ -------
+ An ast.Node corresponding to the node it was called with. Note that it
+ may also modify the passed object, so don't rely on references to the
+ original AST.
+ """
+ for transformer in self.ast_transformers:
+ try:
+ node = transformer.visit(node)
+ except InputRejected:
+ # User-supplied AST transformers can reject an input by raising
+ # an InputRejected. Short-circuit in this case so that we
+ # don't unregister the transform.
+ raise
+ except Exception:
+ warn("AST transformer %r threw an error. It will be unregistered." % transformer)
+ self.ast_transformers.remove(transformer)
- if self.ast_transformers:
- ast.fix_missing_locations(node)
- return node
+ if self.ast_transformers:
+ ast.fix_missing_locations(node)
+ return node
-
- def run_ast_nodes(self, nodelist, cell_name, interactivity='last_expr',
- compiler=compile, result=None):
- """Run a sequence of AST nodes. The execution mode depends on the
- interactivity parameter.
-
- Parameters
- ----------
- nodelist : list
- A sequence of AST nodes to run.
- cell_name : str
- Will be passed to the compiler as the filename of the cell. Typically
- the value returned by ip.compile.cache(cell).
- interactivity : str
- 'all', 'last', 'last_expr' or 'none', specifying which nodes should be
- run interactively (displaying output from expressions). 'last_expr'
- will run the last node interactively only if it is an expression (i.e.
- expressions in loops or other blocks are not displayed. Other values
- for this parameter will raise a ValueError.
- compiler : callable
- A function with the same interface as the built-in compile(), to turn
- the AST nodes into code objects. Default is the built-in compile().
- result : ExecutionResult, optional
- An object to store exceptions that occur during execution.
-
- Returns
- -------
- True if an exception occurred while running code, False if it finished
- running.
- """
- if not nodelist:
- return
-
- if interactivity == 'last_expr':
- if isinstance(nodelist[-1], ast.Expr):
- interactivity = "last"
- else:
- interactivity = "none"
-
- if interactivity == 'none':
- to_run_exec, to_run_interactive = nodelist, []
- elif interactivity == 'last':
- to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
- elif interactivity == 'all':
- to_run_exec, to_run_interactive = [], nodelist
- else:
- raise ValueError("Interactivity was %r" % interactivity)
-
- try:
- for i, node in enumerate(to_run_exec):
- mod = ast.Module([node])
- code = compiler(mod, cell_name, "exec")
- if self.run_code(code, result):
- return True
-
- for i, node in enumerate(to_run_interactive):
- mod = ast.Interactive([node])
- code = compiler(mod, cell_name, "single")
- if self.run_code(code, result):
- return True
-
- # Flush softspace
- if softspace(sys.stdout, 0):
- print()
-
- except:
- # It's possible to have exceptions raised here, typically by
- # compilation of odd code (such as a naked 'return' outside a
- # function) that did parse but isn't valid. Typically the exception
- # is a SyntaxError, but it's safest just to catch anything and show
- # the user a traceback.
-
- # We do only one try/except outside the loop to minimize the impact
- # on runtime, and also because if any node in the node list is
- # broken, we should stop execution completely.
- if result:
- result.error_before_exec = sys.exc_info()[1]
- self.showtraceback()
- return True
-
- return False
-
- def run_code(self, code_obj, result=None):
- """Execute a code object.
-
- When an exception occurs, self.showtraceback() is called to display a
- traceback.
-
- Parameters
- ----------
- code_obj : code object
- A compiled code object, to be executed
- result : ExecutionResult, optional
- An object to store exceptions that occur during execution.
-
- Returns
- -------
- False : successful execution.
- True : an error occurred.
- """
- # Set our own excepthook in case the user code tries to call it
- # directly, so that the IPython crash handler doesn't get triggered
- old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
-
- # we save the original sys.excepthook in the instance, in case config
- # code (such as magics) needs access to it.
- self.sys_excepthook = old_excepthook
- outflag = 1 # happens in more places, so it's easier as default
- try:
- try:
- self.hooks.pre_run_code_hook()
- #rprint('Running code', repr(code_obj)) # dbg
- exec(code_obj, self.user_global_ns, self.user_ns)
- finally:
- # Reset our crash handler in place
- sys.excepthook = old_excepthook
- except SystemExit as e:
- if result is not None:
- result.error_in_exec = e
- self.showtraceback(exception_only=True)
+
+ def run_ast_nodes(self, nodelist, cell_name, interactivity='last_expr',
+ compiler=compile, result=None):
+ """Run a sequence of AST nodes. The execution mode depends on the
+ interactivity parameter.
+
+ Parameters
+ ----------
+ nodelist : list
+ A sequence of AST nodes to run.
+ cell_name : str
+ Will be passed to the compiler as the filename of the cell. Typically
+ the value returned by ip.compile.cache(cell).
+ interactivity : str
+ 'all', 'last', 'last_expr' or 'none', specifying which nodes should be
+ run interactively (displaying output from expressions). 'last_expr'
+ will run the last node interactively only if it is an expression (i.e.
+ expressions in loops or other blocks are not displayed. Other values
+ for this parameter will raise a ValueError.
+ compiler : callable
+ A function with the same interface as the built-in compile(), to turn
+ the AST nodes into code objects. Default is the built-in compile().
+ result : ExecutionResult, optional
+ An object to store exceptions that occur during execution.
+
+ Returns
+ -------
+ True if an exception occurred while running code, False if it finished
+ running.
+ """
+ if not nodelist:
+ return
+
+ if interactivity == 'last_expr':
+ if isinstance(nodelist[-1], ast.Expr):
+ interactivity = "last"
+ else:
+ interactivity = "none"
+
+ if interactivity == 'none':
+ to_run_exec, to_run_interactive = nodelist, []
+ elif interactivity == 'last':
+ to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
+ elif interactivity == 'all':
+ to_run_exec, to_run_interactive = [], nodelist
+ else:
+ raise ValueError("Interactivity was %r" % interactivity)
+
+ try:
+ for i, node in enumerate(to_run_exec):
+ mod = ast.Module([node])
+ code = compiler(mod, cell_name, "exec")
+ if self.run_code(code, result):
+ return True
+
+ for i, node in enumerate(to_run_interactive):
+ mod = ast.Interactive([node])
+ code = compiler(mod, cell_name, "single")
+ if self.run_code(code, result):
+ return True
+
+ # Flush softspace
+ if softspace(sys.stdout, 0):
+ print()
+
+ except:
+ # It's possible to have exceptions raised here, typically by
+ # compilation of odd code (such as a naked 'return' outside a
+ # function) that did parse but isn't valid. Typically the exception
+ # is a SyntaxError, but it's safest just to catch anything and show
+ # the user a traceback.
+
+ # We do only one try/except outside the loop to minimize the impact
+ # on runtime, and also because if any node in the node list is
+ # broken, we should stop execution completely.
+ if result:
+ result.error_before_exec = sys.exc_info()[1]
+ self.showtraceback()
+ return True
+
+ return False
+
+ def run_code(self, code_obj, result=None):
+ """Execute a code object.
+
+ When an exception occurs, self.showtraceback() is called to display a
+ traceback.
+
+ Parameters
+ ----------
+ code_obj : code object
+ A compiled code object, to be executed
+ result : ExecutionResult, optional
+ An object to store exceptions that occur during execution.
+
+ Returns
+ -------
+ False : successful execution.
+ True : an error occurred.
+ """
+ # Set our own excepthook in case the user code tries to call it
+ # directly, so that the IPython crash handler doesn't get triggered
+ old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
+
+ # we save the original sys.excepthook in the instance, in case config
+ # code (such as magics) needs access to it.
+ self.sys_excepthook = old_excepthook
+ outflag = 1 # happens in more places, so it's easier as default
+ try:
+ try:
+ self.hooks.pre_run_code_hook()
+ #rprint('Running code', repr(code_obj)) # dbg
+ exec(code_obj, self.user_global_ns, self.user_ns)
+ finally:
+ # Reset our crash handler in place
+ sys.excepthook = old_excepthook
+ except SystemExit as e:
+ if result is not None:
+ result.error_in_exec = e
+ self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
- except self.custom_exceptions:
- etype, value, tb = sys.exc_info()
- if result is not None:
- result.error_in_exec = value
- self.CustomTB(etype, value, tb)
- except:
- if result is not None:
- result.error_in_exec = sys.exc_info()[1]
- self.showtraceback()
- else:
- outflag = 0
- return outflag
-
- # For backwards compatibility
- runcode = run_code
-
+ except self.custom_exceptions:
+ etype, value, tb = sys.exc_info()
+ if result is not None:
+ result.error_in_exec = value
+ self.CustomTB(etype, value, tb)
+ except:
+ if result is not None:
+ result.error_in_exec = sys.exc_info()[1]
+ self.showtraceback()
+ else:
+ outflag = 0
+ return outflag
+
+ # For backwards compatibility
+ runcode = run_code
+
def check_complete(self, code):
"""Return whether a block of code is ready to execute, or should be continued
@@ -2920,345 +2920,345 @@ class InteractiveShell(SingletonConfigurable):
status, nspaces = self.input_splitter.check_complete(code)
return status, ' ' * (nspaces or 0)
- #-------------------------------------------------------------------------
- # Things related to GUI support and pylab
- #-------------------------------------------------------------------------
-
+ #-------------------------------------------------------------------------
+ # Things related to GUI support and pylab
+ #-------------------------------------------------------------------------
+
active_eventloop = None
- def enable_gui(self, gui=None):
- raise NotImplementedError('Implement enable_gui in a subclass')
+ def enable_gui(self, gui=None):
+ raise NotImplementedError('Implement enable_gui in a subclass')
- def enable_matplotlib(self, gui=None):
- """Enable interactive matplotlib and inline figure support.
+ def enable_matplotlib(self, gui=None):
+ """Enable interactive matplotlib and inline figure support.
- This takes the following steps:
+ This takes the following steps:
- 1. select the appropriate eventloop and matplotlib backend
- 2. set up matplotlib for interactive use with that backend
- 3. configure formatters for inline figure display
- 4. enable the selected gui eventloop
+ 1. select the appropriate eventloop and matplotlib backend
+ 2. set up matplotlib for interactive use with that backend
+ 3. configure formatters for inline figure display
+ 4. enable the selected gui eventloop
- Parameters
- ----------
- gui : optional, string
- If given, dictates the choice of matplotlib GUI backend to use
- (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
- 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
- matplotlib (as dictated by the matplotlib build-time options plus the
- user's matplotlibrc configuration file). Note that not all backends
- make sense in all contexts, for example a terminal ipython can't
- display figures inline.
- """
- from IPython.core import pylabtools as pt
- gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
+ Parameters
+ ----------
+ gui : optional, string
+ If given, dictates the choice of matplotlib GUI backend to use
+ (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
+ 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
+ matplotlib (as dictated by the matplotlib build-time options plus the
+ user's matplotlibrc configuration file). Note that not all backends
+ make sense in all contexts, for example a terminal ipython can't
+ display figures inline.
+ """
+ from IPython.core import pylabtools as pt
+ gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
- if gui != 'inline':
- # If we have our first gui selection, store it
- if self.pylab_gui_select is None:
- self.pylab_gui_select = gui
- # Otherwise if they are different
- elif gui != self.pylab_gui_select:
- print ('Warning: Cannot change to a different GUI toolkit: %s.'
- ' Using %s instead.' % (gui, self.pylab_gui_select))
- gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
+ if gui != 'inline':
+ # If we have our first gui selection, store it
+ if self.pylab_gui_select is None:
+ self.pylab_gui_select = gui
+ # Otherwise if they are different
+ elif gui != self.pylab_gui_select:
+ print ('Warning: Cannot change to a different GUI toolkit: %s.'
+ ' Using %s instead.' % (gui, self.pylab_gui_select))
+ gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
- pt.activate_matplotlib(backend)
- pt.configure_inline_support(self, backend)
+ pt.activate_matplotlib(backend)
+ pt.configure_inline_support(self, backend)
- # Now we must activate the gui pylab wants to use, and fix %run to take
- # plot updates into account
- self.enable_gui(gui)
- self.magics_manager.registry['ExecutionMagics'].default_runner = \
- pt.mpl_runner(self.safe_execfile)
+ # Now we must activate the gui pylab wants to use, and fix %run to take
+ # plot updates into account
+ self.enable_gui(gui)
+ self.magics_manager.registry['ExecutionMagics'].default_runner = \
+ pt.mpl_runner(self.safe_execfile)
- return gui, backend
-
- def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
- """Activate pylab support at runtime.
-
- This turns on support for matplotlib, preloads into the interactive
- namespace all of numpy and pylab, and configures IPython to correctly
- interact with the GUI event loop. The GUI backend to be used can be
- optionally selected with the optional ``gui`` argument.
+ return gui, backend
+
+ def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
+ """Activate pylab support at runtime.
+
+ This turns on support for matplotlib, preloads into the interactive
+ namespace all of numpy and pylab, and configures IPython to correctly
+ interact with the GUI event loop. The GUI backend to be used can be
+ optionally selected with the optional ``gui`` argument.
- This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
-
- Parameters
- ----------
- gui : optional, string
- If given, dictates the choice of matplotlib GUI backend to use
- (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
- 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
- matplotlib (as dictated by the matplotlib build-time options plus the
- user's matplotlibrc configuration file). Note that not all backends
- make sense in all contexts, for example a terminal ipython can't
- display figures inline.
- import_all : optional, bool, default: True
- Whether to do `from numpy import *` and `from pylab import *`
- in addition to module imports.
- welcome_message : deprecated
- This argument is ignored, no welcome message will be displayed.
- """
- from IPython.core.pylabtools import import_pylab
+ This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
+
+ Parameters
+ ----------
+ gui : optional, string
+ If given, dictates the choice of matplotlib GUI backend to use
+ (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
+ 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
+ matplotlib (as dictated by the matplotlib build-time options plus the
+ user's matplotlibrc configuration file). Note that not all backends
+ make sense in all contexts, for example a terminal ipython can't
+ display figures inline.
+ import_all : optional, bool, default: True
+ Whether to do `from numpy import *` and `from pylab import *`
+ in addition to module imports.
+ welcome_message : deprecated
+ This argument is ignored, no welcome message will be displayed.
+ """
+ from IPython.core.pylabtools import import_pylab
- gui, backend = self.enable_matplotlib(gui)
+ gui, backend = self.enable_matplotlib(gui)
- # We want to prevent the loading of pylab to pollute the user's
- # namespace as shown by the %who* magics, so we execute the activation
- # code in an empty namespace, and we update *both* user_ns and
- # user_ns_hidden with this information.
- ns = {}
- import_pylab(ns, import_all)
- # warn about clobbered names
- ignored = {"__builtins__"}
- both = set(ns).intersection(self.user_ns).difference(ignored)
- clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
- self.user_ns.update(ns)
- self.user_ns_hidden.update(ns)
- return gui, backend, clobbered
-
- #-------------------------------------------------------------------------
- # Utilities
- #-------------------------------------------------------------------------
-
- def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
- """Expand python variables in a string.
-
- The depth argument indicates how many frames above the caller should
- be walked to look for the local namespace where to expand variables.
-
- The global namespace for expansion is always the user's interactive
- namespace.
- """
- ns = self.user_ns.copy()
- try:
- frame = sys._getframe(depth+1)
- except ValueError:
- # This is thrown if there aren't that many frames on the stack,
- # e.g. if a script called run_line_magic() directly.
- pass
- else:
- ns.update(frame.f_locals)
-
- try:
- # We have to use .vformat() here, because 'self' is a valid and common
- # name, and expanding **ns for .format() would make it collide with
- # the 'self' argument of the method.
- cmd = formatter.vformat(cmd, args=[], kwargs=ns)
- except Exception:
- # if formatter couldn't format, just let it go untransformed
- pass
- return cmd
-
- def mktempfile(self, data=None, prefix='ipython_edit_'):
- """Make a new tempfile and return its filename.
-
- This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
- but it registers the created filename internally so ipython cleans it up
- at exit time.
-
- Optional inputs:
-
- - data(None): if data is given, it gets written out to the temp file
- immediately, and the file is closed again."""
-
- dirname = tempfile.mkdtemp(prefix=prefix)
- self.tempdirs.append(dirname)
-
- handle, filename = tempfile.mkstemp('.py', prefix, dir=dirname)
- os.close(handle) # On Windows, there can only be one open handle on a file
- self.tempfiles.append(filename)
-
- if data:
- tmp_file = open(filename,'w')
- tmp_file.write(data)
- tmp_file.close()
- return filename
-
+ # We want to prevent the loading of pylab to pollute the user's
+ # namespace as shown by the %who* magics, so we execute the activation
+ # code in an empty namespace, and we update *both* user_ns and
+ # user_ns_hidden with this information.
+ ns = {}
+ import_pylab(ns, import_all)
+ # warn about clobbered names
+ ignored = {"__builtins__"}
+ both = set(ns).intersection(self.user_ns).difference(ignored)
+ clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
+ self.user_ns.update(ns)
+ self.user_ns_hidden.update(ns)
+ return gui, backend, clobbered
+
+ #-------------------------------------------------------------------------
+ # Utilities
+ #-------------------------------------------------------------------------
+
+ def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
+ """Expand python variables in a string.
+
+ The depth argument indicates how many frames above the caller should
+ be walked to look for the local namespace where to expand variables.
+
+ The global namespace for expansion is always the user's interactive
+ namespace.
+ """
+ ns = self.user_ns.copy()
+ try:
+ frame = sys._getframe(depth+1)
+ except ValueError:
+ # This is thrown if there aren't that many frames on the stack,
+ # e.g. if a script called run_line_magic() directly.
+ pass
+ else:
+ ns.update(frame.f_locals)
+
+ try:
+ # We have to use .vformat() here, because 'self' is a valid and common
+ # name, and expanding **ns for .format() would make it collide with
+ # the 'self' argument of the method.
+ cmd = formatter.vformat(cmd, args=[], kwargs=ns)
+ except Exception:
+ # if formatter couldn't format, just let it go untransformed
+ pass
+ return cmd
+
+ def mktempfile(self, data=None, prefix='ipython_edit_'):
+ """Make a new tempfile and return its filename.
+
+ This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
+ but it registers the created filename internally so ipython cleans it up
+ at exit time.
+
+ Optional inputs:
+
+ - data(None): if data is given, it gets written out to the temp file
+ immediately, and the file is closed again."""
+
+ dirname = tempfile.mkdtemp(prefix=prefix)
+ self.tempdirs.append(dirname)
+
+ handle, filename = tempfile.mkstemp('.py', prefix, dir=dirname)
+ os.close(handle) # On Windows, there can only be one open handle on a file
+ self.tempfiles.append(filename)
+
+ if data:
+ tmp_file = open(filename,'w')
+ tmp_file.write(data)
+ tmp_file.close()
+ return filename
+
@undoc
- def write(self,data):
+ def write(self,data):
"""DEPRECATED: Write a string to the default output"""
warn('InteractiveShell.write() is deprecated, use sys.stdout instead',
DeprecationWarning, stacklevel=2)
sys.stdout.write(data)
-
+
@undoc
- def write_err(self,data):
+ def write_err(self,data):
"""DEPRECATED: Write a string to the default error output"""
warn('InteractiveShell.write_err() is deprecated, use sys.stderr instead',
DeprecationWarning, stacklevel=2)
sys.stderr.write(data)
-
- def ask_yes_no(self, prompt, default=None, interrupt=None):
- if self.quiet:
- return True
- return ask_yes_no(prompt,default,interrupt)
-
- def show_usage(self):
- """Show a usage message"""
- page.page(IPython.core.usage.interactive_usage)
-
- def extract_input_lines(self, range_str, raw=False):
- """Return as a string a set of input history slices.
-
- Parameters
- ----------
- range_str : string
- The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
- since this function is for use by magic functions which get their
- arguments as strings. The number before the / is the session
- number: ~n goes n back from the current session.
-
- raw : bool, optional
- By default, the processed input is used. If this is true, the raw
- input history is used instead.
-
- Notes
- -----
-
- Slices can be described with two notations:
-
- * ``N:M`` -> standard python form, means including items N...(M-1).
- * ``N-M`` -> include items N..M (closed endpoint).
- """
- lines = self.history_manager.get_range_by_str(range_str, raw=raw)
- return "\n".join(x for _, _, x in lines)
-
- def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
- """Get a code string from history, file, url, or a string or macro.
-
- This is mainly used by magic functions.
-
- Parameters
- ----------
-
- target : str
-
- A string specifying code to retrieve. This will be tried respectively
- as: ranges of input history (see %history for syntax), url,
- corresponding .py file, filename, or an expression evaluating to a
- string or Macro in the user namespace.
-
- raw : bool
- If true (default), retrieve raw history. Has no effect on the other
- retrieval mechanisms.
-
- py_only : bool (default False)
- Only try to fetch python code, do not try alternative methods to decode file
- if unicode fails.
-
- Returns
- -------
- A string of code.
-
- ValueError is raised if nothing is found, and TypeError if it evaluates
- to an object of another type. In each case, .args[0] is a printable
- message.
- """
- code = self.extract_input_lines(target, raw=raw) # Grab history
- if code:
- return code
- try:
+
+ def ask_yes_no(self, prompt, default=None, interrupt=None):
+ if self.quiet:
+ return True
+ return ask_yes_no(prompt,default,interrupt)
+
+ def show_usage(self):
+ """Show a usage message"""
+ page.page(IPython.core.usage.interactive_usage)
+
+ def extract_input_lines(self, range_str, raw=False):
+ """Return as a string a set of input history slices.
+
+ Parameters
+ ----------
+ range_str : string
+ The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
+ since this function is for use by magic functions which get their
+ arguments as strings. The number before the / is the session
+ number: ~n goes n back from the current session.
+
+ raw : bool, optional
+ By default, the processed input is used. If this is true, the raw
+ input history is used instead.
+
+ Notes
+ -----
+
+ Slices can be described with two notations:
+
+ * ``N:M`` -> standard python form, means including items N...(M-1).
+ * ``N-M`` -> include items N..M (closed endpoint).
+ """
+ lines = self.history_manager.get_range_by_str(range_str, raw=raw)
+ return "\n".join(x for _, _, x in lines)
+
+ def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
+ """Get a code string from history, file, url, or a string or macro.
+
+ This is mainly used by magic functions.
+
+ Parameters
+ ----------
+
+ target : str
+
+ A string specifying code to retrieve. This will be tried respectively
+ as: ranges of input history (see %history for syntax), url,
+ corresponding .py file, filename, or an expression evaluating to a
+ string or Macro in the user namespace.
+
+ raw : bool
+ If true (default), retrieve raw history. Has no effect on the other
+ retrieval mechanisms.
+
+ py_only : bool (default False)
+ Only try to fetch python code, do not try alternative methods to decode file
+ if unicode fails.
+
+ Returns
+ -------
+ A string of code.
+
+ ValueError is raised if nothing is found, and TypeError if it evaluates
+ to an object of another type. In each case, .args[0] is a printable
+ message.
+ """
+ code = self.extract_input_lines(target, raw=raw) # Grab history
+ if code:
+ return code
+ try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
- except UnicodeDecodeError:
- if not py_only :
- # Deferred import
- try:
- from urllib.request import urlopen # Py3
- except ImportError:
- from urllib import urlopen
- response = urlopen(target)
- return response.read().decode('latin1')
+ except UnicodeDecodeError:
+ if not py_only :
+ # Deferred import
+ try:
+ from urllib.request import urlopen # Py3
+ except ImportError:
+ from urllib import urlopen
+ response = urlopen(target)
+ return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target)
-
- potential_target = [target]
- try :
- potential_target.insert(0,get_py_filename(target))
- except IOError:
- pass
-
- for tgt in potential_target :
- if os.path.isfile(tgt): # Read file
- try :
- return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
- except UnicodeDecodeError :
- if not py_only :
- with io_open(tgt,'r', encoding='latin1') as f :
- return f.read()
- raise ValueError(("'%s' seem to be unreadable.") % target)
- elif os.path.isdir(os.path.expanduser(tgt)):
- raise ValueError("'%s' is a directory, not a regular file." % target)
-
- if search_ns:
- # Inspect namespace to load object source
- object_info = self.object_inspect(target, detail_level=1)
- if object_info['found'] and object_info['source']:
- return object_info['source']
-
- try: # User namespace
- codeobj = eval(target, self.user_ns)
- except Exception:
- raise ValueError(("'%s' was not found in history, as a file, url, "
- "nor in the user namespace.") % target)
-
- if isinstance(codeobj, string_types):
- return codeobj
- elif isinstance(codeobj, Macro):
- return codeobj.value
-
- raise TypeError("%s is neither a string nor a macro." % target,
- codeobj)
-
- #-------------------------------------------------------------------------
- # Things related to IPython exiting
- #-------------------------------------------------------------------------
- def atexit_operations(self):
- """This will be executed at the time of exit.
-
- Cleanup operations and saving of persistent data that is done
- unconditionally by IPython should be performed here.
-
- For things that may depend on startup flags or platform specifics (such
- as having readline or not), register a separate atexit function in the
- code that has the appropriate information, rather than trying to
- clutter
- """
- # Close the history session (this stores the end time and line count)
- # this must be *before* the tempfile cleanup, in case of temporary
- # history db
- self.history_manager.end_session()
-
- # Cleanup all tempfiles and folders left around
- for tfile in self.tempfiles:
- try:
- os.unlink(tfile)
- except OSError:
- pass
-
- for tdir in self.tempdirs:
- try:
- os.rmdir(tdir)
- except OSError:
- pass
-
- # Clear all user namespaces to release all references cleanly.
- self.reset(new_session=False)
-
- # Run user hooks
- self.hooks.shutdown_hook()
-
- def cleanup(self):
- self.restore_sys_module_state()
-
-
+
+ potential_target = [target]
+ try :
+ potential_target.insert(0,get_py_filename(target))
+ except IOError:
+ pass
+
+ for tgt in potential_target :
+ if os.path.isfile(tgt): # Read file
+ try :
+ return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
+ except UnicodeDecodeError :
+ if not py_only :
+ with io_open(tgt,'r', encoding='latin1') as f :
+ return f.read()
+ raise ValueError(("'%s' seem to be unreadable.") % target)
+ elif os.path.isdir(os.path.expanduser(tgt)):
+ raise ValueError("'%s' is a directory, not a regular file." % target)
+
+ if search_ns:
+ # Inspect namespace to load object source
+ object_info = self.object_inspect(target, detail_level=1)
+ if object_info['found'] and object_info['source']:
+ return object_info['source']
+
+ try: # User namespace
+ codeobj = eval(target, self.user_ns)
+ except Exception:
+ raise ValueError(("'%s' was not found in history, as a file, url, "
+ "nor in the user namespace.") % target)
+
+ if isinstance(codeobj, string_types):
+ return codeobj
+ elif isinstance(codeobj, Macro):
+ return codeobj.value
+
+ raise TypeError("%s is neither a string nor a macro." % target,
+ codeobj)
+
+ #-------------------------------------------------------------------------
+ # Things related to IPython exiting
+ #-------------------------------------------------------------------------
+ def atexit_operations(self):
+ """This will be executed at the time of exit.
+
+ Cleanup operations and saving of persistent data that is done
+ unconditionally by IPython should be performed here.
+
+ For things that may depend on startup flags or platform specifics (such
+ as having readline or not), register a separate atexit function in the
+ code that has the appropriate information, rather than trying to
+ clutter
+ """
+ # Close the history session (this stores the end time and line count)
+ # this must be *before* the tempfile cleanup, in case of temporary
+ # history db
+ self.history_manager.end_session()
+
+ # Cleanup all tempfiles and folders left around
+ for tfile in self.tempfiles:
+ try:
+ os.unlink(tfile)
+ except OSError:
+ pass
+
+ for tdir in self.tempdirs:
+ try:
+ os.rmdir(tdir)
+ except OSError:
+ pass
+
+ # Clear all user namespaces to release all references cleanly.
+ self.reset(new_session=False)
+
+ # Run user hooks
+ self.hooks.shutdown_hook()
+
+ def cleanup(self):
+ self.restore_sys_module_state()
+
+
# Overridden in terminal subclass to change prompts
def switch_doctest_mode(self, mode):
pass
-class InteractiveShellABC(with_metaclass(abc.ABCMeta, object)):
- """An abstract base class for InteractiveShell."""
-
-InteractiveShellABC.register(InteractiveShell)
+class InteractiveShellABC(with_metaclass(abc.ABCMeta, object)):
+ """An abstract base class for InteractiveShell."""
+
+InteractiveShellABC.register(InteractiveShell)
diff --git a/contrib/python/ipython/py2/IPython/core/latex_symbols.py b/contrib/python/ipython/py2/IPython/core/latex_symbols.py
index 0319b99ff8..ca7200bb59 100644
--- a/contrib/python/ipython/py2/IPython/core/latex_symbols.py
+++ b/contrib/python/ipython/py2/IPython/core/latex_symbols.py
@@ -1,1300 +1,1300 @@
-# encoding: utf-8
-
-# DO NOT EDIT THIS FILE BY HAND.
-
-# To update this file, run the script /tools/gen_latex_symbols.py using Python 3
-
-# This file is autogenerated from the file:
-# https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
-# This original list is filtered to remove any unicode characters that are not valid
-# Python identifiers.
-
-latex_symbols = {
-
- "\\^a" : "ᵃ",
- "\\^b" : "ᵇ",
- "\\^c" : "ᶜ",
- "\\^d" : "ᵈ",
- "\\^e" : "ᵉ",
- "\\^f" : "ᶠ",
- "\\^g" : "ᵍ",
- "\\^h" : "ʰ",
- "\\^i" : "ⁱ",
- "\\^j" : "ʲ",
- "\\^k" : "ᵏ",
- "\\^l" : "ˡ",
- "\\^m" : "ᵐ",
- "\\^n" : "ⁿ",
- "\\^o" : "ᵒ",
- "\\^p" : "ᵖ",
- "\\^r" : "ʳ",
- "\\^s" : "ˢ",
- "\\^t" : "ᵗ",
- "\\^u" : "ᵘ",
- "\\^v" : "ᵛ",
- "\\^w" : "ʷ",
- "\\^x" : "ˣ",
- "\\^y" : "ʸ",
- "\\^z" : "ᶻ",
- "\\^A" : "ᴬ",
- "\\^B" : "ᴮ",
- "\\^D" : "ᴰ",
- "\\^E" : "ᴱ",
- "\\^G" : "ᴳ",
- "\\^H" : "ᴴ",
- "\\^I" : "ᴵ",
- "\\^J" : "ᴶ",
- "\\^K" : "ᴷ",
- "\\^L" : "ᴸ",
- "\\^M" : "ᴹ",
- "\\^N" : "ᴺ",
- "\\^O" : "ᴼ",
- "\\^P" : "ᴾ",
- "\\^R" : "ᴿ",
- "\\^T" : "ᵀ",
- "\\^U" : "ᵁ",
- "\\^V" : "ⱽ",
- "\\^W" : "ᵂ",
- "\\^alpha" : "ᵅ",
- "\\^beta" : "ᵝ",
- "\\^gamma" : "ᵞ",
- "\\^delta" : "ᵟ",
- "\\^epsilon" : "ᵋ",
- "\\^theta" : "ᶿ",
- "\\^iota" : "ᶥ",
- "\\^phi" : "ᵠ",
- "\\^chi" : "ᵡ",
- "\\^Phi" : "ᶲ",
- "\\_a" : "ₐ",
- "\\_e" : "ₑ",
- "\\_h" : "ₕ",
- "\\_i" : "ᵢ",
- "\\_j" : "ⱼ",
- "\\_k" : "ₖ",
- "\\_l" : "ₗ",
- "\\_m" : "ₘ",
- "\\_n" : "ₙ",
- "\\_o" : "ₒ",
- "\\_p" : "ₚ",
- "\\_r" : "ᵣ",
- "\\_s" : "ₛ",
- "\\_t" : "ₜ",
- "\\_u" : "ᵤ",
- "\\_v" : "ᵥ",
- "\\_x" : "ₓ",
- "\\_schwa" : "ₔ",
- "\\_beta" : "ᵦ",
- "\\_gamma" : "ᵧ",
- "\\_rho" : "ᵨ",
- "\\_phi" : "ᵩ",
- "\\_chi" : "ᵪ",
- "\\hbar" : "ħ",
- "\\sout" : "̶",
- "\\textordfeminine" : "ª",
- "\\cdotp" : "·",
- "\\textordmasculine" : "º",
- "\\AA" : "Å",
- "\\AE" : "Æ",
- "\\DH" : "Ð",
- "\\O" : "Ø",
- "\\TH" : "Þ",
- "\\ss" : "ß",
- "\\aa" : "å",
- "\\ae" : "æ",
- "\\eth" : "ð",
- "\\o" : "ø",
- "\\th" : "þ",
- "\\DJ" : "Đ",
- "\\dj" : "đ",
- "\\Elzxh" : "ħ",
- "\\imath" : "ı",
- "\\L" : "Ł",
- "\\l" : "ł",
- "\\NG" : "Ŋ",
- "\\ng" : "ŋ",
- "\\OE" : "Œ",
- "\\oe" : "œ",
- "\\texthvlig" : "ƕ",
- "\\textnrleg" : "ƞ",
- "\\textdoublepipe" : "ǂ",
- "\\Elztrna" : "ɐ",
- "\\Elztrnsa" : "ɒ",
- "\\Elzopeno" : "ɔ",
- "\\Elzrtld" : "ɖ",
- "\\Elzschwa" : "ə",
- "\\varepsilon" : "ɛ",
- "\\Elzpgamma" : "ɣ",
- "\\Elzpbgam" : "ɤ",
- "\\Elztrnh" : "ɥ",
- "\\Elzbtdl" : "ɬ",
- "\\Elzrtll" : "ɭ",
- "\\Elztrnm" : "ɯ",
- "\\Elztrnmlr" : "ɰ",
- "\\Elzltlmr" : "ɱ",
- "\\Elzltln" : "ɲ",
- "\\Elzrtln" : "ɳ",
- "\\Elzclomeg" : "ɷ",
- "\\textphi" : "ɸ",
- "\\Elztrnr" : "ɹ",
- "\\Elztrnrl" : "ɺ",
- "\\Elzrttrnr" : "ɻ",
- "\\Elzrl" : "ɼ",
- "\\Elzrtlr" : "ɽ",
- "\\Elzfhr" : "ɾ",
- "\\Elzrtls" : "ʂ",
- "\\Elzesh" : "ʃ",
- "\\Elztrnt" : "ʇ",
- "\\Elzrtlt" : "ʈ",
- "\\Elzpupsil" : "ʊ",
- "\\Elzpscrv" : "ʋ",
- "\\Elzinvv" : "ʌ",
- "\\Elzinvw" : "ʍ",
- "\\Elztrny" : "ʎ",
- "\\Elzrtlz" : "ʐ",
- "\\Elzyogh" : "ʒ",
- "\\Elzglst" : "ʔ",
- "\\Elzreglst" : "ʕ",
- "\\Elzinglst" : "ʖ",
- "\\textturnk" : "ʞ",
- "\\Elzdyogh" : "ʤ",
- "\\Elztesh" : "ʧ",
- "\\rasp" : "ʼ",
- "\\textasciicaron" : "ˇ",
- "\\Elzverts" : "ˈ",
- "\\Elzverti" : "ˌ",
- "\\Elzlmrk" : "ː",
- "\\Elzhlmrk" : "ˑ",
- "\\grave" : "̀",
- "\\acute" : "́",
- "\\hat" : "̂",
- "\\tilde" : "̃",
- "\\bar" : "̄",
- "\\breve" : "̆",
- "\\dot" : "̇",
- "\\ddot" : "̈",
- "\\ocirc" : "̊",
- "\\H" : "̋",
- "\\check" : "̌",
- "\\Elzpalh" : "̡",
- "\\Elzrh" : "̢",
- "\\c" : "̧",
- "\\k" : "̨",
- "\\Elzsbbrg" : "̪",
- "\\Elzxl" : "̵",
- "\\Elzbar" : "̶",
- "\\Alpha" : "Α",
- "\\Beta" : "Β",
- "\\Gamma" : "Γ",
- "\\Delta" : "Δ",
- "\\Epsilon" : "Ε",
- "\\Zeta" : "Ζ",
- "\\Eta" : "Η",
- "\\Theta" : "Θ",
- "\\Iota" : "Ι",
- "\\Kappa" : "Κ",
- "\\Lambda" : "Λ",
- "\\Xi" : "Ξ",
- "\\Pi" : "Π",
- "\\Rho" : "Ρ",
- "\\Sigma" : "Σ",
- "\\Tau" : "Τ",
- "\\Upsilon" : "Υ",
- "\\Phi" : "Φ",
- "\\Chi" : "Χ",
- "\\Psi" : "Ψ",
- "\\Omega" : "Ω",
- "\\alpha" : "α",
- "\\beta" : "β",
- "\\gamma" : "γ",
- "\\delta" : "δ",
- "\\zeta" : "ζ",
- "\\eta" : "η",
- "\\theta" : "θ",
- "\\iota" : "ι",
- "\\kappa" : "κ",
- "\\lambda" : "λ",
- "\\mu" : "μ",
- "\\nu" : "ν",
- "\\xi" : "ξ",
- "\\pi" : "π",
- "\\rho" : "ρ",
- "\\varsigma" : "ς",
- "\\sigma" : "σ",
- "\\tau" : "τ",
- "\\upsilon" : "υ",
- "\\varphi" : "φ",
- "\\chi" : "χ",
- "\\psi" : "ψ",
- "\\omega" : "ω",
- "\\vartheta" : "ϑ",
- "\\phi" : "ϕ",
- "\\varpi" : "ϖ",
- "\\Stigma" : "Ϛ",
- "\\Digamma" : "Ϝ",
- "\\digamma" : "ϝ",
- "\\Koppa" : "Ϟ",
- "\\Sampi" : "Ϡ",
- "\\varkappa" : "ϰ",
- "\\varrho" : "ϱ",
- "\\textTheta" : "ϴ",
- "\\epsilon" : "ϵ",
- "\\dddot" : "⃛",
- "\\ddddot" : "⃜",
- "\\hslash" : "ℏ",
- "\\Im" : "ℑ",
- "\\ell" : "ℓ",
- "\\wp" : "℘",
- "\\Re" : "ℜ",
- "\\aleph" : "ℵ",
- "\\beth" : "ℶ",
- "\\gimel" : "ℷ",
- "\\daleth" : "ℸ",
- "\\BbbPi" : "ℿ",
- "\\Zbar" : "Ƶ",
- "\\overbar" : "̅",
- "\\ovhook" : "̉",
- "\\candra" : "̐",
- "\\oturnedcomma" : "̒",
- "\\ocommatopright" : "̕",
- "\\droang" : "̚",
- "\\wideutilde" : "̰",
- "\\underbar" : "̱",
- "\\not" : "̸",
- "\\upMu" : "Μ",
- "\\upNu" : "Ν",
- "\\upOmicron" : "Ο",
- "\\upepsilon" : "ε",
- "\\upomicron" : "ο",
- "\\upvarbeta" : "ϐ",
- "\\upoldKoppa" : "Ϙ",
- "\\upoldkoppa" : "ϙ",
- "\\upstigma" : "ϛ",
- "\\upkoppa" : "ϟ",
- "\\upsampi" : "ϡ",
- "\\tieconcat" : "⁀",
- "\\leftharpoonaccent" : "⃐",
- "\\rightharpoonaccent" : "⃑",
- "\\vertoverlay" : "⃒",
- "\\overleftarrow" : "⃖",
- "\\vec" : "⃗",
- "\\overleftrightarrow" : "⃡",
- "\\annuity" : "⃧",
- "\\threeunderdot" : "⃨",
- "\\widebridgeabove" : "⃩",
- "\\BbbC" : "ℂ",
- "\\Eulerconst" : "ℇ",
- "\\mscrg" : "ℊ",
- "\\mscrH" : "ℋ",
- "\\mfrakH" : "ℌ",
- "\\BbbH" : "ℍ",
- "\\Planckconst" : "ℎ",
- "\\mscrI" : "ℐ",
- "\\mscrL" : "ℒ",
- "\\BbbN" : "ℕ",
- "\\BbbP" : "ℙ",
- "\\BbbQ" : "ℚ",
- "\\mscrR" : "ℛ",
- "\\BbbR" : "ℝ",
- "\\BbbZ" : "ℤ",
- "\\mfrakZ" : "ℨ",
- "\\Angstrom" : "Å",
- "\\mscrB" : "ℬ",
- "\\mfrakC" : "ℭ",
- "\\mscre" : "ℯ",
- "\\mscrE" : "ℰ",
- "\\mscrF" : "ℱ",
- "\\Finv" : "Ⅎ",
- "\\mscrM" : "ℳ",
- "\\mscro" : "ℴ",
- "\\Bbbgamma" : "ℽ",
- "\\BbbGamma" : "ℾ",
- "\\mitBbbD" : "ⅅ",
- "\\mitBbbd" : "ⅆ",
- "\\mitBbbe" : "ⅇ",
- "\\mitBbbi" : "ⅈ",
- "\\mitBbbj" : "ⅉ",
- "\\mbfA" : "𝐀",
- "\\mbfB" : "𝐁",
- "\\mbfC" : "𝐂",
- "\\mbfD" : "𝐃",
- "\\mbfE" : "𝐄",
- "\\mbfF" : "𝐅",
- "\\mbfG" : "𝐆",
- "\\mbfH" : "𝐇",
- "\\mbfI" : "𝐈",
- "\\mbfJ" : "𝐉",
- "\\mbfK" : "𝐊",
- "\\mbfL" : "𝐋",
- "\\mbfM" : "𝐌",
- "\\mbfN" : "𝐍",
- "\\mbfO" : "𝐎",
- "\\mbfP" : "𝐏",
- "\\mbfQ" : "𝐐",
- "\\mbfR" : "𝐑",
- "\\mbfS" : "𝐒",
- "\\mbfT" : "𝐓",
- "\\mbfU" : "𝐔",
- "\\mbfV" : "𝐕",
- "\\mbfW" : "𝐖",
- "\\mbfX" : "𝐗",
- "\\mbfY" : "𝐘",
- "\\mbfZ" : "𝐙",
- "\\mbfa" : "𝐚",
- "\\mbfb" : "𝐛",
- "\\mbfc" : "𝐜",
- "\\mbfd" : "𝐝",
- "\\mbfe" : "𝐞",
- "\\mbff" : "𝐟",
- "\\mbfg" : "𝐠",
- "\\mbfh" : "𝐡",
- "\\mbfi" : "𝐢",
- "\\mbfj" : "𝐣",
- "\\mbfk" : "𝐤",
- "\\mbfl" : "𝐥",
- "\\mbfm" : "𝐦",
- "\\mbfn" : "𝐧",
- "\\mbfo" : "𝐨",
- "\\mbfp" : "𝐩",
- "\\mbfq" : "𝐪",
- "\\mbfr" : "𝐫",
- "\\mbfs" : "𝐬",
- "\\mbft" : "𝐭",
- "\\mbfu" : "𝐮",
- "\\mbfv" : "𝐯",
- "\\mbfw" : "𝐰",
- "\\mbfx" : "𝐱",
- "\\mbfy" : "𝐲",
- "\\mbfz" : "𝐳",
- "\\mitA" : "𝐴",
- "\\mitB" : "𝐵",
- "\\mitC" : "𝐶",
- "\\mitD" : "𝐷",
- "\\mitE" : "𝐸",
- "\\mitF" : "𝐹",
- "\\mitG" : "𝐺",
- "\\mitH" : "𝐻",
- "\\mitI" : "𝐼",
- "\\mitJ" : "𝐽",
- "\\mitK" : "𝐾",
- "\\mitL" : "𝐿",
- "\\mitM" : "𝑀",
- "\\mitN" : "𝑁",
- "\\mitO" : "𝑂",
- "\\mitP" : "𝑃",
- "\\mitQ" : "𝑄",
- "\\mitR" : "𝑅",
- "\\mitS" : "𝑆",
- "\\mitT" : "𝑇",
- "\\mitU" : "𝑈",
- "\\mitV" : "𝑉",
- "\\mitW" : "𝑊",
- "\\mitX" : "𝑋",
- "\\mitY" : "𝑌",
- "\\mitZ" : "𝑍",
- "\\mita" : "𝑎",
- "\\mitb" : "𝑏",
- "\\mitc" : "𝑐",
- "\\mitd" : "𝑑",
- "\\mite" : "𝑒",
- "\\mitf" : "𝑓",
- "\\mitg" : "𝑔",
- "\\miti" : "𝑖",
- "\\mitj" : "𝑗",
- "\\mitk" : "𝑘",
- "\\mitl" : "𝑙",
- "\\mitm" : "𝑚",
- "\\mitn" : "𝑛",
- "\\mito" : "𝑜",
- "\\mitp" : "𝑝",
- "\\mitq" : "𝑞",
- "\\mitr" : "𝑟",
- "\\mits" : "𝑠",
- "\\mitt" : "𝑡",
- "\\mitu" : "𝑢",
- "\\mitv" : "𝑣",
- "\\mitw" : "𝑤",
- "\\mitx" : "𝑥",
- "\\mity" : "𝑦",
- "\\mitz" : "𝑧",
- "\\mbfitA" : "𝑨",
- "\\mbfitB" : "𝑩",
- "\\mbfitC" : "𝑪",
- "\\mbfitD" : "𝑫",
- "\\mbfitE" : "𝑬",
- "\\mbfitF" : "𝑭",
- "\\mbfitG" : "𝑮",
- "\\mbfitH" : "𝑯",
- "\\mbfitI" : "𝑰",
- "\\mbfitJ" : "𝑱",
- "\\mbfitK" : "𝑲",
- "\\mbfitL" : "𝑳",
- "\\mbfitM" : "𝑴",
- "\\mbfitN" : "𝑵",
- "\\mbfitO" : "𝑶",
- "\\mbfitP" : "𝑷",
- "\\mbfitQ" : "𝑸",
- "\\mbfitR" : "𝑹",
- "\\mbfitS" : "𝑺",
- "\\mbfitT" : "𝑻",
- "\\mbfitU" : "𝑼",
- "\\mbfitV" : "𝑽",
- "\\mbfitW" : "𝑾",
- "\\mbfitX" : "𝑿",
- "\\mbfitY" : "𝒀",
- "\\mbfitZ" : "𝒁",
- "\\mbfita" : "𝒂",
- "\\mbfitb" : "𝒃",
- "\\mbfitc" : "𝒄",
- "\\mbfitd" : "𝒅",
- "\\mbfite" : "𝒆",
- "\\mbfitf" : "𝒇",
- "\\mbfitg" : "𝒈",
- "\\mbfith" : "𝒉",
- "\\mbfiti" : "𝒊",
- "\\mbfitj" : "𝒋",
- "\\mbfitk" : "𝒌",
- "\\mbfitl" : "𝒍",
- "\\mbfitm" : "𝒎",
- "\\mbfitn" : "𝒏",
- "\\mbfito" : "𝒐",
- "\\mbfitp" : "𝒑",
- "\\mbfitq" : "𝒒",
- "\\mbfitr" : "𝒓",
- "\\mbfits" : "𝒔",
- "\\mbfitt" : "𝒕",
- "\\mbfitu" : "𝒖",
- "\\mbfitv" : "𝒗",
- "\\mbfitw" : "𝒘",
- "\\mbfitx" : "𝒙",
- "\\mbfity" : "𝒚",
- "\\mbfitz" : "𝒛",
- "\\mscrA" : "𝒜",
- "\\mscrC" : "𝒞",
- "\\mscrD" : "𝒟",
- "\\mscrG" : "𝒢",
- "\\mscrJ" : "𝒥",
- "\\mscrK" : "𝒦",
- "\\mscrN" : "𝒩",
- "\\mscrO" : "𝒪",
- "\\mscrP" : "𝒫",
- "\\mscrQ" : "𝒬",
- "\\mscrS" : "𝒮",
- "\\mscrT" : "𝒯",
- "\\mscrU" : "𝒰",
- "\\mscrV" : "𝒱",
- "\\mscrW" : "𝒲",
- "\\mscrX" : "𝒳",
- "\\mscrY" : "𝒴",
- "\\mscrZ" : "𝒵",
- "\\mscra" : "𝒶",
- "\\mscrb" : "𝒷",
- "\\mscrc" : "𝒸",
- "\\mscrd" : "𝒹",
- "\\mscrf" : "𝒻",
- "\\mscrh" : "𝒽",
- "\\mscri" : "𝒾",
- "\\mscrj" : "𝒿",
- "\\mscrk" : "𝓀",
- "\\mscrm" : "𝓂",
- "\\mscrn" : "𝓃",
- "\\mscrp" : "𝓅",
- "\\mscrq" : "𝓆",
- "\\mscrr" : "𝓇",
- "\\mscrs" : "𝓈",
- "\\mscrt" : "𝓉",
- "\\mscru" : "𝓊",
- "\\mscrv" : "𝓋",
- "\\mscrw" : "𝓌",
- "\\mscrx" : "𝓍",
- "\\mscry" : "𝓎",
- "\\mscrz" : "𝓏",
- "\\mbfscrA" : "𝓐",
- "\\mbfscrB" : "𝓑",
- "\\mbfscrC" : "𝓒",
- "\\mbfscrD" : "𝓓",
- "\\mbfscrE" : "𝓔",
- "\\mbfscrF" : "𝓕",
- "\\mbfscrG" : "𝓖",
- "\\mbfscrH" : "𝓗",
- "\\mbfscrI" : "𝓘",
- "\\mbfscrJ" : "𝓙",
- "\\mbfscrK" : "𝓚",
- "\\mbfscrL" : "𝓛",
- "\\mbfscrM" : "𝓜",
- "\\mbfscrN" : "𝓝",
- "\\mbfscrO" : "𝓞",
- "\\mbfscrP" : "𝓟",
- "\\mbfscrQ" : "𝓠",
- "\\mbfscrR" : "𝓡",
- "\\mbfscrS" : "𝓢",
- "\\mbfscrT" : "𝓣",
- "\\mbfscrU" : "𝓤",
- "\\mbfscrV" : "𝓥",
- "\\mbfscrW" : "𝓦",
- "\\mbfscrX" : "𝓧",
- "\\mbfscrY" : "𝓨",
- "\\mbfscrZ" : "𝓩",
- "\\mbfscra" : "𝓪",
- "\\mbfscrb" : "𝓫",
- "\\mbfscrc" : "𝓬",
- "\\mbfscrd" : "𝓭",
- "\\mbfscre" : "𝓮",
- "\\mbfscrf" : "𝓯",
- "\\mbfscrg" : "𝓰",
- "\\mbfscrh" : "𝓱",
- "\\mbfscri" : "𝓲",
- "\\mbfscrj" : "𝓳",
- "\\mbfscrk" : "𝓴",
- "\\mbfscrl" : "𝓵",
- "\\mbfscrm" : "𝓶",
- "\\mbfscrn" : "𝓷",
- "\\mbfscro" : "𝓸",
- "\\mbfscrp" : "𝓹",
- "\\mbfscrq" : "𝓺",
- "\\mbfscrr" : "𝓻",
- "\\mbfscrs" : "𝓼",
- "\\mbfscrt" : "𝓽",
- "\\mbfscru" : "𝓾",
- "\\mbfscrv" : "𝓿",
- "\\mbfscrw" : "𝔀",
- "\\mbfscrx" : "𝔁",
- "\\mbfscry" : "𝔂",
- "\\mbfscrz" : "𝔃",
- "\\mfrakA" : "𝔄",
- "\\mfrakB" : "𝔅",
- "\\mfrakD" : "𝔇",
- "\\mfrakE" : "𝔈",
- "\\mfrakF" : "𝔉",
- "\\mfrakG" : "𝔊",
- "\\mfrakJ" : "𝔍",
- "\\mfrakK" : "𝔎",
- "\\mfrakL" : "𝔏",
- "\\mfrakM" : "𝔐",
- "\\mfrakN" : "𝔑",
- "\\mfrakO" : "𝔒",
- "\\mfrakP" : "𝔓",
- "\\mfrakQ" : "𝔔",
- "\\mfrakS" : "𝔖",
- "\\mfrakT" : "𝔗",
- "\\mfrakU" : "𝔘",
- "\\mfrakV" : "𝔙",
- "\\mfrakW" : "𝔚",
- "\\mfrakX" : "𝔛",
- "\\mfrakY" : "𝔜",
- "\\mfraka" : "𝔞",
- "\\mfrakb" : "𝔟",
- "\\mfrakc" : "𝔠",
- "\\mfrakd" : "𝔡",
- "\\mfrake" : "𝔢",
- "\\mfrakf" : "𝔣",
- "\\mfrakg" : "𝔤",
- "\\mfrakh" : "𝔥",
- "\\mfraki" : "𝔦",
- "\\mfrakj" : "𝔧",
- "\\mfrakk" : "𝔨",
- "\\mfrakl" : "𝔩",
- "\\mfrakm" : "𝔪",
- "\\mfrakn" : "𝔫",
- "\\mfrako" : "𝔬",
- "\\mfrakp" : "𝔭",
- "\\mfrakq" : "𝔮",
- "\\mfrakr" : "𝔯",
- "\\mfraks" : "𝔰",
- "\\mfrakt" : "𝔱",
- "\\mfraku" : "𝔲",
- "\\mfrakv" : "𝔳",
- "\\mfrakw" : "𝔴",
- "\\mfrakx" : "𝔵",
- "\\mfraky" : "𝔶",
- "\\mfrakz" : "𝔷",
- "\\BbbA" : "𝔸",
- "\\BbbB" : "𝔹",
- "\\BbbD" : "𝔻",
- "\\BbbE" : "𝔼",
- "\\BbbF" : "𝔽",
- "\\BbbG" : "𝔾",
- "\\BbbI" : "𝕀",
- "\\BbbJ" : "𝕁",
- "\\BbbK" : "𝕂",
- "\\BbbL" : "𝕃",
- "\\BbbM" : "𝕄",
- "\\BbbO" : "𝕆",
- "\\BbbS" : "𝕊",
- "\\BbbT" : "𝕋",
- "\\BbbU" : "𝕌",
- "\\BbbV" : "𝕍",
- "\\BbbW" : "𝕎",
- "\\BbbX" : "𝕏",
- "\\BbbY" : "𝕐",
- "\\Bbba" : "𝕒",
- "\\Bbbb" : "𝕓",
- "\\Bbbc" : "𝕔",
- "\\Bbbd" : "𝕕",
- "\\Bbbe" : "𝕖",
- "\\Bbbf" : "𝕗",
- "\\Bbbg" : "𝕘",
- "\\Bbbh" : "𝕙",
- "\\Bbbi" : "𝕚",
- "\\Bbbj" : "𝕛",
- "\\Bbbk" : "𝕜",
- "\\Bbbl" : "𝕝",
- "\\Bbbm" : "𝕞",
- "\\Bbbn" : "𝕟",
- "\\Bbbo" : "𝕠",
- "\\Bbbp" : "𝕡",
- "\\Bbbq" : "𝕢",
- "\\Bbbr" : "𝕣",
- "\\Bbbs" : "𝕤",
- "\\Bbbt" : "𝕥",
- "\\Bbbu" : "𝕦",
- "\\Bbbv" : "𝕧",
- "\\Bbbw" : "𝕨",
- "\\Bbbx" : "𝕩",
- "\\Bbby" : "𝕪",
- "\\Bbbz" : "𝕫",
- "\\mbffrakA" : "𝕬",
- "\\mbffrakB" : "𝕭",
- "\\mbffrakC" : "𝕮",
- "\\mbffrakD" : "𝕯",
- "\\mbffrakE" : "𝕰",
- "\\mbffrakF" : "𝕱",
- "\\mbffrakG" : "𝕲",
- "\\mbffrakH" : "𝕳",
- "\\mbffrakI" : "𝕴",
- "\\mbffrakJ" : "𝕵",
- "\\mbffrakK" : "𝕶",
- "\\mbffrakL" : "𝕷",
- "\\mbffrakM" : "𝕸",
- "\\mbffrakN" : "𝕹",
- "\\mbffrakO" : "𝕺",
- "\\mbffrakP" : "𝕻",
- "\\mbffrakQ" : "𝕼",
- "\\mbffrakR" : "𝕽",
- "\\mbffrakS" : "𝕾",
- "\\mbffrakT" : "𝕿",
- "\\mbffrakU" : "𝖀",
- "\\mbffrakV" : "𝖁",
- "\\mbffrakW" : "𝖂",
- "\\mbffrakX" : "𝖃",
- "\\mbffrakY" : "𝖄",
- "\\mbffrakZ" : "𝖅",
- "\\mbffraka" : "𝖆",
- "\\mbffrakb" : "𝖇",
- "\\mbffrakc" : "𝖈",
- "\\mbffrakd" : "𝖉",
- "\\mbffrake" : "𝖊",
- "\\mbffrakf" : "𝖋",
- "\\mbffrakg" : "𝖌",
- "\\mbffrakh" : "𝖍",
- "\\mbffraki" : "𝖎",
- "\\mbffrakj" : "𝖏",
- "\\mbffrakk" : "𝖐",
- "\\mbffrakl" : "𝖑",
- "\\mbffrakm" : "𝖒",
- "\\mbffrakn" : "𝖓",
- "\\mbffrako" : "𝖔",
- "\\mbffrakp" : "𝖕",
- "\\mbffrakq" : "𝖖",
- "\\mbffrakr" : "𝖗",
- "\\mbffraks" : "𝖘",
- "\\mbffrakt" : "𝖙",
- "\\mbffraku" : "𝖚",
- "\\mbffrakv" : "𝖛",
- "\\mbffrakw" : "𝖜",
- "\\mbffrakx" : "𝖝",
- "\\mbffraky" : "𝖞",
- "\\mbffrakz" : "𝖟",
- "\\msansA" : "𝖠",
- "\\msansB" : "𝖡",
- "\\msansC" : "𝖢",
- "\\msansD" : "𝖣",
- "\\msansE" : "𝖤",
- "\\msansF" : "𝖥",
- "\\msansG" : "𝖦",
- "\\msansH" : "𝖧",
- "\\msansI" : "𝖨",
- "\\msansJ" : "𝖩",
- "\\msansK" : "𝖪",
- "\\msansL" : "𝖫",
- "\\msansM" : "𝖬",
- "\\msansN" : "𝖭",
- "\\msansO" : "𝖮",
- "\\msansP" : "𝖯",
- "\\msansQ" : "𝖰",
- "\\msansR" : "𝖱",
- "\\msansS" : "𝖲",
- "\\msansT" : "𝖳",
- "\\msansU" : "𝖴",
- "\\msansV" : "𝖵",
- "\\msansW" : "𝖶",
- "\\msansX" : "𝖷",
- "\\msansY" : "𝖸",
- "\\msansZ" : "𝖹",
- "\\msansa" : "𝖺",
- "\\msansb" : "𝖻",
- "\\msansc" : "𝖼",
- "\\msansd" : "𝖽",
- "\\msanse" : "𝖾",
- "\\msansf" : "𝖿",
- "\\msansg" : "𝗀",
- "\\msansh" : "𝗁",
- "\\msansi" : "𝗂",
- "\\msansj" : "𝗃",
- "\\msansk" : "𝗄",
- "\\msansl" : "𝗅",
- "\\msansm" : "𝗆",
- "\\msansn" : "𝗇",
- "\\msanso" : "𝗈",
- "\\msansp" : "𝗉",
- "\\msansq" : "𝗊",
- "\\msansr" : "𝗋",
- "\\msanss" : "𝗌",
- "\\msanst" : "𝗍",
- "\\msansu" : "𝗎",
- "\\msansv" : "𝗏",
- "\\msansw" : "𝗐",
- "\\msansx" : "𝗑",
- "\\msansy" : "𝗒",
- "\\msansz" : "𝗓",
- "\\mbfsansA" : "𝗔",
- "\\mbfsansB" : "𝗕",
- "\\mbfsansC" : "𝗖",
- "\\mbfsansD" : "𝗗",
- "\\mbfsansE" : "𝗘",
- "\\mbfsansF" : "𝗙",
- "\\mbfsansG" : "𝗚",
- "\\mbfsansH" : "𝗛",
- "\\mbfsansI" : "𝗜",
- "\\mbfsansJ" : "𝗝",
- "\\mbfsansK" : "𝗞",
- "\\mbfsansL" : "𝗟",
- "\\mbfsansM" : "𝗠",
- "\\mbfsansN" : "𝗡",
- "\\mbfsansO" : "𝗢",
- "\\mbfsansP" : "𝗣",
- "\\mbfsansQ" : "𝗤",
- "\\mbfsansR" : "𝗥",
- "\\mbfsansS" : "𝗦",
- "\\mbfsansT" : "𝗧",
- "\\mbfsansU" : "𝗨",
- "\\mbfsansV" : "𝗩",
- "\\mbfsansW" : "𝗪",
- "\\mbfsansX" : "𝗫",
- "\\mbfsansY" : "𝗬",
- "\\mbfsansZ" : "𝗭",
- "\\mbfsansa" : "𝗮",
- "\\mbfsansb" : "𝗯",
- "\\mbfsansc" : "𝗰",
- "\\mbfsansd" : "𝗱",
- "\\mbfsanse" : "𝗲",
- "\\mbfsansf" : "𝗳",
- "\\mbfsansg" : "𝗴",
- "\\mbfsansh" : "𝗵",
- "\\mbfsansi" : "𝗶",
- "\\mbfsansj" : "𝗷",
- "\\mbfsansk" : "𝗸",
- "\\mbfsansl" : "𝗹",
- "\\mbfsansm" : "𝗺",
- "\\mbfsansn" : "𝗻",
- "\\mbfsanso" : "𝗼",
- "\\mbfsansp" : "𝗽",
- "\\mbfsansq" : "𝗾",
- "\\mbfsansr" : "𝗿",
- "\\mbfsanss" : "𝘀",
- "\\mbfsanst" : "𝘁",
- "\\mbfsansu" : "𝘂",
- "\\mbfsansv" : "𝘃",
- "\\mbfsansw" : "𝘄",
- "\\mbfsansx" : "𝘅",
- "\\mbfsansy" : "𝘆",
- "\\mbfsansz" : "𝘇",
- "\\mitsansA" : "𝘈",
- "\\mitsansB" : "𝘉",
- "\\mitsansC" : "𝘊",
- "\\mitsansD" : "𝘋",
- "\\mitsansE" : "𝘌",
- "\\mitsansF" : "𝘍",
- "\\mitsansG" : "𝘎",
- "\\mitsansH" : "𝘏",
- "\\mitsansI" : "𝘐",
- "\\mitsansJ" : "𝘑",
- "\\mitsansK" : "𝘒",
- "\\mitsansL" : "𝘓",
- "\\mitsansM" : "𝘔",
- "\\mitsansN" : "𝘕",
- "\\mitsansO" : "𝘖",
- "\\mitsansP" : "𝘗",
- "\\mitsansQ" : "𝘘",
- "\\mitsansR" : "𝘙",
- "\\mitsansS" : "𝘚",
- "\\mitsansT" : "𝘛",
- "\\mitsansU" : "𝘜",
- "\\mitsansV" : "𝘝",
- "\\mitsansW" : "𝘞",
- "\\mitsansX" : "𝘟",
- "\\mitsansY" : "𝘠",
- "\\mitsansZ" : "𝘡",
- "\\mitsansa" : "𝘢",
- "\\mitsansb" : "𝘣",
- "\\mitsansc" : "𝘤",
- "\\mitsansd" : "𝘥",
- "\\mitsanse" : "𝘦",
- "\\mitsansf" : "𝘧",
- "\\mitsansg" : "𝘨",
- "\\mitsansh" : "𝘩",
- "\\mitsansi" : "𝘪",
- "\\mitsansj" : "𝘫",
- "\\mitsansk" : "𝘬",
- "\\mitsansl" : "𝘭",
- "\\mitsansm" : "𝘮",
- "\\mitsansn" : "𝘯",
- "\\mitsanso" : "𝘰",
- "\\mitsansp" : "𝘱",
- "\\mitsansq" : "𝘲",
- "\\mitsansr" : "𝘳",
- "\\mitsanss" : "𝘴",
- "\\mitsanst" : "𝘵",
- "\\mitsansu" : "𝘶",
- "\\mitsansv" : "𝘷",
- "\\mitsansw" : "𝘸",
- "\\mitsansx" : "𝘹",
- "\\mitsansy" : "𝘺",
- "\\mitsansz" : "𝘻",
- "\\mbfitsansA" : "𝘼",
- "\\mbfitsansB" : "𝘽",
- "\\mbfitsansC" : "𝘾",
- "\\mbfitsansD" : "𝘿",
- "\\mbfitsansE" : "𝙀",
- "\\mbfitsansF" : "𝙁",
- "\\mbfitsansG" : "𝙂",
- "\\mbfitsansH" : "𝙃",
- "\\mbfitsansI" : "𝙄",
- "\\mbfitsansJ" : "𝙅",
- "\\mbfitsansK" : "𝙆",
- "\\mbfitsansL" : "𝙇",
- "\\mbfitsansM" : "𝙈",
- "\\mbfitsansN" : "𝙉",
- "\\mbfitsansO" : "𝙊",
- "\\mbfitsansP" : "𝙋",
- "\\mbfitsansQ" : "𝙌",
- "\\mbfitsansR" : "𝙍",
- "\\mbfitsansS" : "𝙎",
- "\\mbfitsansT" : "𝙏",
- "\\mbfitsansU" : "𝙐",
- "\\mbfitsansV" : "𝙑",
- "\\mbfitsansW" : "𝙒",
- "\\mbfitsansX" : "𝙓",
- "\\mbfitsansY" : "𝙔",
- "\\mbfitsansZ" : "𝙕",
- "\\mbfitsansa" : "𝙖",
- "\\mbfitsansb" : "𝙗",
- "\\mbfitsansc" : "𝙘",
- "\\mbfitsansd" : "𝙙",
- "\\mbfitsanse" : "𝙚",
- "\\mbfitsansf" : "𝙛",
- "\\mbfitsansg" : "𝙜",
- "\\mbfitsansh" : "𝙝",
- "\\mbfitsansi" : "𝙞",
- "\\mbfitsansj" : "𝙟",
- "\\mbfitsansk" : "𝙠",
- "\\mbfitsansl" : "𝙡",
- "\\mbfitsansm" : "𝙢",
- "\\mbfitsansn" : "𝙣",
- "\\mbfitsanso" : "𝙤",
- "\\mbfitsansp" : "𝙥",
- "\\mbfitsansq" : "𝙦",
- "\\mbfitsansr" : "𝙧",
- "\\mbfitsanss" : "𝙨",
- "\\mbfitsanst" : "𝙩",
- "\\mbfitsansu" : "𝙪",
- "\\mbfitsansv" : "𝙫",
- "\\mbfitsansw" : "𝙬",
- "\\mbfitsansx" : "𝙭",
- "\\mbfitsansy" : "𝙮",
- "\\mbfitsansz" : "𝙯",
- "\\mttA" : "𝙰",
- "\\mttB" : "𝙱",
- "\\mttC" : "𝙲",
- "\\mttD" : "𝙳",
- "\\mttE" : "𝙴",
- "\\mttF" : "𝙵",
- "\\mttG" : "𝙶",
- "\\mttH" : "𝙷",
- "\\mttI" : "𝙸",
- "\\mttJ" : "𝙹",
- "\\mttK" : "𝙺",
- "\\mttL" : "𝙻",
- "\\mttM" : "𝙼",
- "\\mttN" : "𝙽",
- "\\mttO" : "𝙾",
- "\\mttP" : "𝙿",
- "\\mttQ" : "𝚀",
- "\\mttR" : "𝚁",
- "\\mttS" : "𝚂",
- "\\mttT" : "𝚃",
- "\\mttU" : "𝚄",
- "\\mttV" : "𝚅",
- "\\mttW" : "𝚆",
- "\\mttX" : "𝚇",
- "\\mttY" : "𝚈",
- "\\mttZ" : "𝚉",
- "\\mtta" : "𝚊",
- "\\mttb" : "𝚋",
- "\\mttc" : "𝚌",
- "\\mttd" : "𝚍",
- "\\mtte" : "𝚎",
- "\\mttf" : "𝚏",
- "\\mttg" : "𝚐",
- "\\mtth" : "𝚑",
- "\\mtti" : "𝚒",
- "\\mttj" : "𝚓",
- "\\mttk" : "𝚔",
- "\\mttl" : "𝚕",
- "\\mttm" : "𝚖",
- "\\mttn" : "𝚗",
- "\\mtto" : "𝚘",
- "\\mttp" : "𝚙",
- "\\mttq" : "𝚚",
- "\\mttr" : "𝚛",
- "\\mtts" : "𝚜",
- "\\mttt" : "𝚝",
- "\\mttu" : "𝚞",
- "\\mttv" : "𝚟",
- "\\mttw" : "𝚠",
- "\\mttx" : "𝚡",
- "\\mtty" : "𝚢",
- "\\mttz" : "𝚣",
- "\\mbfAlpha" : "𝚨",
- "\\mbfBeta" : "𝚩",
- "\\mbfGamma" : "𝚪",
- "\\mbfDelta" : "𝚫",
- "\\mbfEpsilon" : "𝚬",
- "\\mbfZeta" : "𝚭",
- "\\mbfEta" : "𝚮",
- "\\mbfTheta" : "𝚯",
- "\\mbfIota" : "𝚰",
- "\\mbfKappa" : "𝚱",
- "\\mbfLambda" : "𝚲",
- "\\mbfMu" : "𝚳",
- "\\mbfNu" : "𝚴",
- "\\mbfXi" : "𝚵",
- "\\mbfOmicron" : "𝚶",
- "\\mbfPi" : "𝚷",
- "\\mbfRho" : "𝚸",
- "\\mbfvarTheta" : "𝚹",
- "\\mbfSigma" : "𝚺",
- "\\mbfTau" : "𝚻",
- "\\mbfUpsilon" : "𝚼",
- "\\mbfPhi" : "𝚽",
- "\\mbfChi" : "𝚾",
- "\\mbfPsi" : "𝚿",
- "\\mbfOmega" : "𝛀",
- "\\mbfalpha" : "𝛂",
- "\\mbfbeta" : "𝛃",
- "\\mbfgamma" : "𝛄",
- "\\mbfdelta" : "𝛅",
- "\\mbfepsilon" : "𝛆",
- "\\mbfzeta" : "𝛇",
- "\\mbfeta" : "𝛈",
- "\\mbftheta" : "𝛉",
- "\\mbfiota" : "𝛊",
- "\\mbfkappa" : "𝛋",
- "\\mbflambda" : "𝛌",
- "\\mbfmu" : "𝛍",
- "\\mbfnu" : "𝛎",
- "\\mbfxi" : "𝛏",
- "\\mbfomicron" : "𝛐",
- "\\mbfpi" : "𝛑",
- "\\mbfrho" : "𝛒",
- "\\mbfvarsigma" : "𝛓",
- "\\mbfsigma" : "𝛔",
- "\\mbftau" : "𝛕",
- "\\mbfupsilon" : "𝛖",
- "\\mbfvarphi" : "𝛗",
- "\\mbfchi" : "𝛘",
- "\\mbfpsi" : "𝛙",
- "\\mbfomega" : "𝛚",
- "\\mbfvarepsilon" : "𝛜",
- "\\mbfvartheta" : "𝛝",
- "\\mbfvarkappa" : "𝛞",
- "\\mbfphi" : "𝛟",
- "\\mbfvarrho" : "𝛠",
- "\\mbfvarpi" : "𝛡",
- "\\mitAlpha" : "𝛢",
- "\\mitBeta" : "𝛣",
- "\\mitGamma" : "𝛤",
- "\\mitDelta" : "𝛥",
- "\\mitEpsilon" : "𝛦",
- "\\mitZeta" : "𝛧",
- "\\mitEta" : "𝛨",
- "\\mitTheta" : "𝛩",
- "\\mitIota" : "𝛪",
- "\\mitKappa" : "𝛫",
- "\\mitLambda" : "𝛬",
- "\\mitMu" : "𝛭",
- "\\mitNu" : "𝛮",
- "\\mitXi" : "𝛯",
- "\\mitOmicron" : "𝛰",
- "\\mitPi" : "𝛱",
- "\\mitRho" : "𝛲",
- "\\mitvarTheta" : "𝛳",
- "\\mitSigma" : "𝛴",
- "\\mitTau" : "𝛵",
- "\\mitUpsilon" : "𝛶",
- "\\mitPhi" : "𝛷",
- "\\mitChi" : "𝛸",
- "\\mitPsi" : "𝛹",
- "\\mitOmega" : "𝛺",
- "\\mitalpha" : "𝛼",
- "\\mitbeta" : "𝛽",
- "\\mitgamma" : "𝛾",
- "\\mitdelta" : "𝛿",
- "\\mitepsilon" : "𝜀",
- "\\mitzeta" : "𝜁",
- "\\miteta" : "𝜂",
- "\\mittheta" : "𝜃",
- "\\mitiota" : "𝜄",
- "\\mitkappa" : "𝜅",
- "\\mitlambda" : "𝜆",
- "\\mitmu" : "𝜇",
- "\\mitnu" : "𝜈",
- "\\mitxi" : "𝜉",
- "\\mitomicron" : "𝜊",
- "\\mitpi" : "𝜋",
- "\\mitrho" : "𝜌",
- "\\mitvarsigma" : "𝜍",
- "\\mitsigma" : "𝜎",
- "\\mittau" : "𝜏",
- "\\mitupsilon" : "𝜐",
- "\\mitphi" : "𝜑",
- "\\mitchi" : "𝜒",
- "\\mitpsi" : "𝜓",
- "\\mitomega" : "𝜔",
- "\\mitvarepsilon" : "𝜖",
- "\\mitvartheta" : "𝜗",
- "\\mitvarkappa" : "𝜘",
- "\\mitvarphi" : "𝜙",
- "\\mitvarrho" : "𝜚",
- "\\mitvarpi" : "𝜛",
- "\\mbfitAlpha" : "𝜜",
- "\\mbfitBeta" : "𝜝",
- "\\mbfitGamma" : "𝜞",
- "\\mbfitDelta" : "𝜟",
- "\\mbfitEpsilon" : "𝜠",
- "\\mbfitZeta" : "𝜡",
- "\\mbfitEta" : "𝜢",
- "\\mbfitTheta" : "𝜣",
- "\\mbfitIota" : "𝜤",
- "\\mbfitKappa" : "𝜥",
- "\\mbfitLambda" : "𝜦",
- "\\mbfitMu" : "𝜧",
- "\\mbfitNu" : "𝜨",
- "\\mbfitXi" : "𝜩",
- "\\mbfitOmicron" : "𝜪",
- "\\mbfitPi" : "𝜫",
- "\\mbfitRho" : "𝜬",
- "\\mbfitvarTheta" : "𝜭",
- "\\mbfitSigma" : "𝜮",
- "\\mbfitTau" : "𝜯",
- "\\mbfitUpsilon" : "𝜰",
- "\\mbfitPhi" : "𝜱",
- "\\mbfitChi" : "𝜲",
- "\\mbfitPsi" : "𝜳",
- "\\mbfitOmega" : "𝜴",
- "\\mbfitalpha" : "𝜶",
- "\\mbfitbeta" : "𝜷",
- "\\mbfitgamma" : "𝜸",
- "\\mbfitdelta" : "𝜹",
- "\\mbfitepsilon" : "𝜺",
- "\\mbfitzeta" : "𝜻",
- "\\mbfiteta" : "𝜼",
- "\\mbfittheta" : "𝜽",
- "\\mbfitiota" : "𝜾",
- "\\mbfitkappa" : "𝜿",
- "\\mbfitlambda" : "𝝀",
- "\\mbfitmu" : "𝝁",
- "\\mbfitnu" : "𝝂",
- "\\mbfitxi" : "𝝃",
- "\\mbfitomicron" : "𝝄",
- "\\mbfitpi" : "𝝅",
- "\\mbfitrho" : "𝝆",
- "\\mbfitvarsigma" : "𝝇",
- "\\mbfitsigma" : "𝝈",
- "\\mbfittau" : "𝝉",
- "\\mbfitupsilon" : "𝝊",
- "\\mbfitphi" : "𝝋",
- "\\mbfitchi" : "𝝌",
- "\\mbfitpsi" : "𝝍",
- "\\mbfitomega" : "𝝎",
- "\\mbfitvarepsilon" : "𝝐",
- "\\mbfitvartheta" : "𝝑",
- "\\mbfitvarkappa" : "𝝒",
- "\\mbfitvarphi" : "𝝓",
- "\\mbfitvarrho" : "𝝔",
- "\\mbfitvarpi" : "𝝕",
- "\\mbfsansAlpha" : "𝝖",
- "\\mbfsansBeta" : "𝝗",
- "\\mbfsansGamma" : "𝝘",
- "\\mbfsansDelta" : "𝝙",
- "\\mbfsansEpsilon" : "𝝚",
- "\\mbfsansZeta" : "𝝛",
- "\\mbfsansEta" : "𝝜",
- "\\mbfsansTheta" : "𝝝",
- "\\mbfsansIota" : "𝝞",
- "\\mbfsansKappa" : "𝝟",
- "\\mbfsansLambda" : "𝝠",
- "\\mbfsansMu" : "𝝡",
- "\\mbfsansNu" : "𝝢",
- "\\mbfsansXi" : "𝝣",
- "\\mbfsansOmicron" : "𝝤",
- "\\mbfsansPi" : "𝝥",
- "\\mbfsansRho" : "𝝦",
- "\\mbfsansvarTheta" : "𝝧",
- "\\mbfsansSigma" : "𝝨",
- "\\mbfsansTau" : "𝝩",
- "\\mbfsansUpsilon" : "𝝪",
- "\\mbfsansPhi" : "𝝫",
- "\\mbfsansChi" : "𝝬",
- "\\mbfsansPsi" : "𝝭",
- "\\mbfsansOmega" : "𝝮",
- "\\mbfsansalpha" : "𝝰",
- "\\mbfsansbeta" : "𝝱",
- "\\mbfsansgamma" : "𝝲",
- "\\mbfsansdelta" : "𝝳",
- "\\mbfsansepsilon" : "𝝴",
- "\\mbfsanszeta" : "𝝵",
- "\\mbfsanseta" : "𝝶",
- "\\mbfsanstheta" : "𝝷",
- "\\mbfsansiota" : "𝝸",
- "\\mbfsanskappa" : "𝝹",
- "\\mbfsanslambda" : "𝝺",
- "\\mbfsansmu" : "𝝻",
- "\\mbfsansnu" : "𝝼",
- "\\mbfsansxi" : "𝝽",
- "\\mbfsansomicron" : "𝝾",
- "\\mbfsanspi" : "𝝿",
- "\\mbfsansrho" : "𝞀",
- "\\mbfsansvarsigma" : "𝞁",
- "\\mbfsanssigma" : "𝞂",
- "\\mbfsanstau" : "𝞃",
- "\\mbfsansupsilon" : "𝞄",
- "\\mbfsansphi" : "𝞅",
- "\\mbfsanschi" : "𝞆",
- "\\mbfsanspsi" : "𝞇",
- "\\mbfsansomega" : "𝞈",
- "\\mbfsansvarepsilon" : "𝞊",
- "\\mbfsansvartheta" : "𝞋",
- "\\mbfsansvarkappa" : "𝞌",
- "\\mbfsansvarphi" : "𝞍",
- "\\mbfsansvarrho" : "𝞎",
- "\\mbfsansvarpi" : "𝞏",
- "\\mbfitsansAlpha" : "𝞐",
- "\\mbfitsansBeta" : "𝞑",
- "\\mbfitsansGamma" : "𝞒",
- "\\mbfitsansDelta" : "𝞓",
- "\\mbfitsansEpsilon" : "𝞔",
- "\\mbfitsansZeta" : "𝞕",
- "\\mbfitsansEta" : "𝞖",
- "\\mbfitsansTheta" : "𝞗",
- "\\mbfitsansIota" : "𝞘",
- "\\mbfitsansKappa" : "𝞙",
- "\\mbfitsansLambda" : "𝞚",
- "\\mbfitsansMu" : "𝞛",
- "\\mbfitsansNu" : "𝞜",
- "\\mbfitsansXi" : "𝞝",
- "\\mbfitsansOmicron" : "𝞞",
- "\\mbfitsansPi" : "𝞟",
- "\\mbfitsansRho" : "𝞠",
- "\\mbfitsansvarTheta" : "𝞡",
- "\\mbfitsansSigma" : "𝞢",
- "\\mbfitsansTau" : "𝞣",
- "\\mbfitsansUpsilon" : "𝞤",
- "\\mbfitsansPhi" : "𝞥",
- "\\mbfitsansChi" : "𝞦",
- "\\mbfitsansPsi" : "𝞧",
- "\\mbfitsansOmega" : "𝞨",
- "\\mbfitsansalpha" : "𝞪",
- "\\mbfitsansbeta" : "𝞫",
- "\\mbfitsansgamma" : "𝞬",
- "\\mbfitsansdelta" : "𝞭",
- "\\mbfitsansepsilon" : "𝞮",
- "\\mbfitsanszeta" : "𝞯",
- "\\mbfitsanseta" : "𝞰",
- "\\mbfitsanstheta" : "𝞱",
- "\\mbfitsansiota" : "𝞲",
- "\\mbfitsanskappa" : "𝞳",
- "\\mbfitsanslambda" : "𝞴",
- "\\mbfitsansmu" : "𝞵",
- "\\mbfitsansnu" : "𝞶",
- "\\mbfitsansxi" : "𝞷",
- "\\mbfitsansomicron" : "𝞸",
- "\\mbfitsanspi" : "𝞹",
- "\\mbfitsansrho" : "𝞺",
- "\\mbfitsansvarsigma" : "𝞻",
- "\\mbfitsanssigma" : "𝞼",
- "\\mbfitsanstau" : "𝞽",
- "\\mbfitsansupsilon" : "𝞾",
- "\\mbfitsansphi" : "𝞿",
- "\\mbfitsanschi" : "𝟀",
- "\\mbfitsanspsi" : "𝟁",
- "\\mbfitsansomega" : "𝟂",
- "\\mbfitsansvarepsilon" : "𝟄",
- "\\mbfitsansvartheta" : "𝟅",
- "\\mbfitsansvarkappa" : "𝟆",
- "\\mbfitsansvarphi" : "𝟇",
- "\\mbfitsansvarrho" : "𝟈",
- "\\mbfitsansvarpi" : "𝟉",
- "\\mbfzero" : "𝟎",
- "\\mbfone" : "𝟏",
- "\\mbftwo" : "𝟐",
- "\\mbfthree" : "𝟑",
- "\\mbffour" : "𝟒",
- "\\mbffive" : "𝟓",
- "\\mbfsix" : "𝟔",
- "\\mbfseven" : "𝟕",
- "\\mbfeight" : "𝟖",
- "\\mbfnine" : "𝟗",
- "\\Bbbzero" : "𝟘",
- "\\Bbbone" : "𝟙",
- "\\Bbbtwo" : "𝟚",
- "\\Bbbthree" : "𝟛",
- "\\Bbbfour" : "𝟜",
- "\\Bbbfive" : "𝟝",
- "\\Bbbsix" : "𝟞",
- "\\Bbbseven" : "𝟟",
- "\\Bbbeight" : "𝟠",
- "\\Bbbnine" : "𝟡",
- "\\msanszero" : "𝟢",
- "\\msansone" : "𝟣",
- "\\msanstwo" : "𝟤",
- "\\msansthree" : "𝟥",
- "\\msansfour" : "𝟦",
- "\\msansfive" : "𝟧",
- "\\msanssix" : "𝟨",
- "\\msansseven" : "𝟩",
- "\\msanseight" : "𝟪",
- "\\msansnine" : "𝟫",
- "\\mbfsanszero" : "𝟬",
- "\\mbfsansone" : "𝟭",
- "\\mbfsanstwo" : "𝟮",
- "\\mbfsansthree" : "𝟯",
- "\\mbfsansfour" : "𝟰",
- "\\mbfsansfive" : "𝟱",
- "\\mbfsanssix" : "𝟲",
- "\\mbfsansseven" : "𝟳",
- "\\mbfsanseight" : "𝟴",
- "\\mbfsansnine" : "𝟵",
- "\\mttzero" : "𝟶",
- "\\mttone" : "𝟷",
- "\\mtttwo" : "𝟸",
- "\\mttthree" : "𝟹",
- "\\mttfour" : "𝟺",
- "\\mttfive" : "𝟻",
- "\\mttsix" : "𝟼",
- "\\mttseven" : "𝟽",
- "\\mtteight" : "𝟾",
- "\\mttnine" : "𝟿",
-}
-
-
-reverse_latex_symbol = { v:k for k,v in latex_symbols.items()}
+# encoding: utf-8
+
+# DO NOT EDIT THIS FILE BY HAND.
+
+# To update this file, run the script /tools/gen_latex_symbols.py using Python 3
+
+# This file is autogenerated from the file:
+# https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
+# This original list is filtered to remove any unicode characters that are not valid
+# Python identifiers.
+
+latex_symbols = {
+
+ "\\^a" : "ᵃ",
+ "\\^b" : "ᵇ",
+ "\\^c" : "ᶜ",
+ "\\^d" : "ᵈ",
+ "\\^e" : "ᵉ",
+ "\\^f" : "ᶠ",
+ "\\^g" : "ᵍ",
+ "\\^h" : "ʰ",
+ "\\^i" : "ⁱ",
+ "\\^j" : "ʲ",
+ "\\^k" : "ᵏ",
+ "\\^l" : "ˡ",
+ "\\^m" : "ᵐ",
+ "\\^n" : "ⁿ",
+ "\\^o" : "ᵒ",
+ "\\^p" : "ᵖ",
+ "\\^r" : "ʳ",
+ "\\^s" : "ˢ",
+ "\\^t" : "ᵗ",
+ "\\^u" : "ᵘ",
+ "\\^v" : "ᵛ",
+ "\\^w" : "ʷ",
+ "\\^x" : "ˣ",
+ "\\^y" : "ʸ",
+ "\\^z" : "ᶻ",
+ "\\^A" : "ᴬ",
+ "\\^B" : "ᴮ",
+ "\\^D" : "ᴰ",
+ "\\^E" : "ᴱ",
+ "\\^G" : "ᴳ",
+ "\\^H" : "ᴴ",
+ "\\^I" : "ᴵ",
+ "\\^J" : "ᴶ",
+ "\\^K" : "ᴷ",
+ "\\^L" : "ᴸ",
+ "\\^M" : "ᴹ",
+ "\\^N" : "ᴺ",
+ "\\^O" : "ᴼ",
+ "\\^P" : "ᴾ",
+ "\\^R" : "ᴿ",
+ "\\^T" : "ᵀ",
+ "\\^U" : "ᵁ",
+ "\\^V" : "ⱽ",
+ "\\^W" : "ᵂ",
+ "\\^alpha" : "ᵅ",
+ "\\^beta" : "ᵝ",
+ "\\^gamma" : "ᵞ",
+ "\\^delta" : "ᵟ",
+ "\\^epsilon" : "ᵋ",
+ "\\^theta" : "ᶿ",
+ "\\^iota" : "ᶥ",
+ "\\^phi" : "ᵠ",
+ "\\^chi" : "ᵡ",
+ "\\^Phi" : "ᶲ",
+ "\\_a" : "ₐ",
+ "\\_e" : "ₑ",
+ "\\_h" : "ₕ",
+ "\\_i" : "ᵢ",
+ "\\_j" : "ⱼ",
+ "\\_k" : "ₖ",
+ "\\_l" : "ₗ",
+ "\\_m" : "ₘ",
+ "\\_n" : "ₙ",
+ "\\_o" : "ₒ",
+ "\\_p" : "ₚ",
+ "\\_r" : "ᵣ",
+ "\\_s" : "ₛ",
+ "\\_t" : "ₜ",
+ "\\_u" : "ᵤ",
+ "\\_v" : "ᵥ",
+ "\\_x" : "ₓ",
+ "\\_schwa" : "ₔ",
+ "\\_beta" : "ᵦ",
+ "\\_gamma" : "ᵧ",
+ "\\_rho" : "ᵨ",
+ "\\_phi" : "ᵩ",
+ "\\_chi" : "ᵪ",
+ "\\hbar" : "ħ",
+ "\\sout" : "̶",
+ "\\textordfeminine" : "ª",
+ "\\cdotp" : "·",
+ "\\textordmasculine" : "º",
+ "\\AA" : "Å",
+ "\\AE" : "Æ",
+ "\\DH" : "Ð",
+ "\\O" : "Ø",
+ "\\TH" : "Þ",
+ "\\ss" : "ß",
+ "\\aa" : "å",
+ "\\ae" : "æ",
+ "\\eth" : "ð",
+ "\\o" : "ø",
+ "\\th" : "þ",
+ "\\DJ" : "Đ",
+ "\\dj" : "đ",
+ "\\Elzxh" : "ħ",
+ "\\imath" : "ı",
+ "\\L" : "Ł",
+ "\\l" : "ł",
+ "\\NG" : "Ŋ",
+ "\\ng" : "ŋ",
+ "\\OE" : "Œ",
+ "\\oe" : "œ",
+ "\\texthvlig" : "ƕ",
+ "\\textnrleg" : "ƞ",
+ "\\textdoublepipe" : "ǂ",
+ "\\Elztrna" : "ɐ",
+ "\\Elztrnsa" : "ɒ",
+ "\\Elzopeno" : "ɔ",
+ "\\Elzrtld" : "ɖ",
+ "\\Elzschwa" : "ə",
+ "\\varepsilon" : "ɛ",
+ "\\Elzpgamma" : "ɣ",
+ "\\Elzpbgam" : "ɤ",
+ "\\Elztrnh" : "ɥ",
+ "\\Elzbtdl" : "ɬ",
+ "\\Elzrtll" : "ɭ",
+ "\\Elztrnm" : "ɯ",
+ "\\Elztrnmlr" : "ɰ",
+ "\\Elzltlmr" : "ɱ",
+ "\\Elzltln" : "ɲ",
+ "\\Elzrtln" : "ɳ",
+ "\\Elzclomeg" : "ɷ",
+ "\\textphi" : "ɸ",
+ "\\Elztrnr" : "ɹ",
+ "\\Elztrnrl" : "ɺ",
+ "\\Elzrttrnr" : "ɻ",
+ "\\Elzrl" : "ɼ",
+ "\\Elzrtlr" : "ɽ",
+ "\\Elzfhr" : "ɾ",
+ "\\Elzrtls" : "ʂ",
+ "\\Elzesh" : "ʃ",
+ "\\Elztrnt" : "ʇ",
+ "\\Elzrtlt" : "ʈ",
+ "\\Elzpupsil" : "ʊ",
+ "\\Elzpscrv" : "ʋ",
+ "\\Elzinvv" : "ʌ",
+ "\\Elzinvw" : "ʍ",
+ "\\Elztrny" : "ʎ",
+ "\\Elzrtlz" : "ʐ",
+ "\\Elzyogh" : "ʒ",
+ "\\Elzglst" : "ʔ",
+ "\\Elzreglst" : "ʕ",
+ "\\Elzinglst" : "ʖ",
+ "\\textturnk" : "ʞ",
+ "\\Elzdyogh" : "ʤ",
+ "\\Elztesh" : "ʧ",
+ "\\rasp" : "ʼ",
+ "\\textasciicaron" : "ˇ",
+ "\\Elzverts" : "ˈ",
+ "\\Elzverti" : "ˌ",
+ "\\Elzlmrk" : "ː",
+ "\\Elzhlmrk" : "ˑ",
+ "\\grave" : "̀",
+ "\\acute" : "́",
+ "\\hat" : "̂",
+ "\\tilde" : "̃",
+ "\\bar" : "̄",
+ "\\breve" : "̆",
+ "\\dot" : "̇",
+ "\\ddot" : "̈",
+ "\\ocirc" : "̊",
+ "\\H" : "̋",
+ "\\check" : "̌",
+ "\\Elzpalh" : "̡",
+ "\\Elzrh" : "̢",
+ "\\c" : "̧",
+ "\\k" : "̨",
+ "\\Elzsbbrg" : "̪",
+ "\\Elzxl" : "̵",
+ "\\Elzbar" : "̶",
+ "\\Alpha" : "Α",
+ "\\Beta" : "Β",
+ "\\Gamma" : "Γ",
+ "\\Delta" : "Δ",
+ "\\Epsilon" : "Ε",
+ "\\Zeta" : "Ζ",
+ "\\Eta" : "Η",
+ "\\Theta" : "Θ",
+ "\\Iota" : "Ι",
+ "\\Kappa" : "Κ",
+ "\\Lambda" : "Λ",
+ "\\Xi" : "Ξ",
+ "\\Pi" : "Π",
+ "\\Rho" : "Ρ",
+ "\\Sigma" : "Σ",
+ "\\Tau" : "Τ",
+ "\\Upsilon" : "Υ",
+ "\\Phi" : "Φ",
+ "\\Chi" : "Χ",
+ "\\Psi" : "Ψ",
+ "\\Omega" : "Ω",
+ "\\alpha" : "α",
+ "\\beta" : "β",
+ "\\gamma" : "γ",
+ "\\delta" : "δ",
+ "\\zeta" : "ζ",
+ "\\eta" : "η",
+ "\\theta" : "θ",
+ "\\iota" : "ι",
+ "\\kappa" : "κ",
+ "\\lambda" : "λ",
+ "\\mu" : "μ",
+ "\\nu" : "ν",
+ "\\xi" : "ξ",
+ "\\pi" : "π",
+ "\\rho" : "ρ",
+ "\\varsigma" : "ς",
+ "\\sigma" : "σ",
+ "\\tau" : "τ",
+ "\\upsilon" : "υ",
+ "\\varphi" : "φ",
+ "\\chi" : "χ",
+ "\\psi" : "ψ",
+ "\\omega" : "ω",
+ "\\vartheta" : "ϑ",
+ "\\phi" : "ϕ",
+ "\\varpi" : "ϖ",
+ "\\Stigma" : "Ϛ",
+ "\\Digamma" : "Ϝ",
+ "\\digamma" : "ϝ",
+ "\\Koppa" : "Ϟ",
+ "\\Sampi" : "Ϡ",
+ "\\varkappa" : "ϰ",
+ "\\varrho" : "ϱ",
+ "\\textTheta" : "ϴ",
+ "\\epsilon" : "ϵ",
+ "\\dddot" : "⃛",
+ "\\ddddot" : "⃜",
+ "\\hslash" : "ℏ",
+ "\\Im" : "ℑ",
+ "\\ell" : "ℓ",
+ "\\wp" : "℘",
+ "\\Re" : "ℜ",
+ "\\aleph" : "ℵ",
+ "\\beth" : "ℶ",
+ "\\gimel" : "ℷ",
+ "\\daleth" : "ℸ",
+ "\\BbbPi" : "ℿ",
+ "\\Zbar" : "Ƶ",
+ "\\overbar" : "̅",
+ "\\ovhook" : "̉",
+ "\\candra" : "̐",
+ "\\oturnedcomma" : "̒",
+ "\\ocommatopright" : "̕",
+ "\\droang" : "̚",
+ "\\wideutilde" : "̰",
+ "\\underbar" : "̱",
+ "\\not" : "̸",
+ "\\upMu" : "Μ",
+ "\\upNu" : "Ν",
+ "\\upOmicron" : "Ο",
+ "\\upepsilon" : "ε",
+ "\\upomicron" : "ο",
+ "\\upvarbeta" : "ϐ",
+ "\\upoldKoppa" : "Ϙ",
+ "\\upoldkoppa" : "ϙ",
+ "\\upstigma" : "ϛ",
+ "\\upkoppa" : "ϟ",
+ "\\upsampi" : "ϡ",
+ "\\tieconcat" : "⁀",
+ "\\leftharpoonaccent" : "⃐",
+ "\\rightharpoonaccent" : "⃑",
+ "\\vertoverlay" : "⃒",
+ "\\overleftarrow" : "⃖",
+ "\\vec" : "⃗",
+ "\\overleftrightarrow" : "⃡",
+ "\\annuity" : "⃧",
+ "\\threeunderdot" : "⃨",
+ "\\widebridgeabove" : "⃩",
+ "\\BbbC" : "ℂ",
+ "\\Eulerconst" : "ℇ",
+ "\\mscrg" : "ℊ",
+ "\\mscrH" : "ℋ",
+ "\\mfrakH" : "ℌ",
+ "\\BbbH" : "ℍ",
+ "\\Planckconst" : "ℎ",
+ "\\mscrI" : "ℐ",
+ "\\mscrL" : "ℒ",
+ "\\BbbN" : "ℕ",
+ "\\BbbP" : "ℙ",
+ "\\BbbQ" : "ℚ",
+ "\\mscrR" : "ℛ",
+ "\\BbbR" : "ℝ",
+ "\\BbbZ" : "ℤ",
+ "\\mfrakZ" : "ℨ",
+ "\\Angstrom" : "Å",
+ "\\mscrB" : "ℬ",
+ "\\mfrakC" : "ℭ",
+ "\\mscre" : "ℯ",
+ "\\mscrE" : "ℰ",
+ "\\mscrF" : "ℱ",
+ "\\Finv" : "Ⅎ",
+ "\\mscrM" : "ℳ",
+ "\\mscro" : "ℴ",
+ "\\Bbbgamma" : "ℽ",
+ "\\BbbGamma" : "ℾ",
+ "\\mitBbbD" : "ⅅ",
+ "\\mitBbbd" : "ⅆ",
+ "\\mitBbbe" : "ⅇ",
+ "\\mitBbbi" : "ⅈ",
+ "\\mitBbbj" : "ⅉ",
+ "\\mbfA" : "𝐀",
+ "\\mbfB" : "𝐁",
+ "\\mbfC" : "𝐂",
+ "\\mbfD" : "𝐃",
+ "\\mbfE" : "𝐄",
+ "\\mbfF" : "𝐅",
+ "\\mbfG" : "𝐆",
+ "\\mbfH" : "𝐇",
+ "\\mbfI" : "𝐈",
+ "\\mbfJ" : "𝐉",
+ "\\mbfK" : "𝐊",
+ "\\mbfL" : "𝐋",
+ "\\mbfM" : "𝐌",
+ "\\mbfN" : "𝐍",
+ "\\mbfO" : "𝐎",
+ "\\mbfP" : "𝐏",
+ "\\mbfQ" : "𝐐",
+ "\\mbfR" : "𝐑",
+ "\\mbfS" : "𝐒",
+ "\\mbfT" : "𝐓",
+ "\\mbfU" : "𝐔",
+ "\\mbfV" : "𝐕",
+ "\\mbfW" : "𝐖",
+ "\\mbfX" : "𝐗",
+ "\\mbfY" : "𝐘",
+ "\\mbfZ" : "𝐙",
+ "\\mbfa" : "𝐚",
+ "\\mbfb" : "𝐛",
+ "\\mbfc" : "𝐜",
+ "\\mbfd" : "𝐝",
+ "\\mbfe" : "𝐞",
+ "\\mbff" : "𝐟",
+ "\\mbfg" : "𝐠",
+ "\\mbfh" : "𝐡",
+ "\\mbfi" : "𝐢",
+ "\\mbfj" : "𝐣",
+ "\\mbfk" : "𝐤",
+ "\\mbfl" : "𝐥",
+ "\\mbfm" : "𝐦",
+ "\\mbfn" : "𝐧",
+ "\\mbfo" : "𝐨",
+ "\\mbfp" : "𝐩",
+ "\\mbfq" : "𝐪",
+ "\\mbfr" : "𝐫",
+ "\\mbfs" : "𝐬",
+ "\\mbft" : "𝐭",
+ "\\mbfu" : "𝐮",
+ "\\mbfv" : "𝐯",
+ "\\mbfw" : "𝐰",
+ "\\mbfx" : "𝐱",
+ "\\mbfy" : "𝐲",
+ "\\mbfz" : "𝐳",
+ "\\mitA" : "𝐴",
+ "\\mitB" : "𝐵",
+ "\\mitC" : "𝐶",
+ "\\mitD" : "𝐷",
+ "\\mitE" : "𝐸",
+ "\\mitF" : "𝐹",
+ "\\mitG" : "𝐺",
+ "\\mitH" : "𝐻",
+ "\\mitI" : "𝐼",
+ "\\mitJ" : "𝐽",
+ "\\mitK" : "𝐾",
+ "\\mitL" : "𝐿",
+ "\\mitM" : "𝑀",
+ "\\mitN" : "𝑁",
+ "\\mitO" : "𝑂",
+ "\\mitP" : "𝑃",
+ "\\mitQ" : "𝑄",
+ "\\mitR" : "𝑅",
+ "\\mitS" : "𝑆",
+ "\\mitT" : "𝑇",
+ "\\mitU" : "𝑈",
+ "\\mitV" : "𝑉",
+ "\\mitW" : "𝑊",
+ "\\mitX" : "𝑋",
+ "\\mitY" : "𝑌",
+ "\\mitZ" : "𝑍",
+ "\\mita" : "𝑎",
+ "\\mitb" : "𝑏",
+ "\\mitc" : "𝑐",
+ "\\mitd" : "𝑑",
+ "\\mite" : "𝑒",
+ "\\mitf" : "𝑓",
+ "\\mitg" : "𝑔",
+ "\\miti" : "𝑖",
+ "\\mitj" : "𝑗",
+ "\\mitk" : "𝑘",
+ "\\mitl" : "𝑙",
+ "\\mitm" : "𝑚",
+ "\\mitn" : "𝑛",
+ "\\mito" : "𝑜",
+ "\\mitp" : "𝑝",
+ "\\mitq" : "𝑞",
+ "\\mitr" : "𝑟",
+ "\\mits" : "𝑠",
+ "\\mitt" : "𝑡",
+ "\\mitu" : "𝑢",
+ "\\mitv" : "𝑣",
+ "\\mitw" : "𝑤",
+ "\\mitx" : "𝑥",
+ "\\mity" : "𝑦",
+ "\\mitz" : "𝑧",
+ "\\mbfitA" : "𝑨",
+ "\\mbfitB" : "𝑩",
+ "\\mbfitC" : "𝑪",
+ "\\mbfitD" : "𝑫",
+ "\\mbfitE" : "𝑬",
+ "\\mbfitF" : "𝑭",
+ "\\mbfitG" : "𝑮",
+ "\\mbfitH" : "𝑯",
+ "\\mbfitI" : "𝑰",
+ "\\mbfitJ" : "𝑱",
+ "\\mbfitK" : "𝑲",
+ "\\mbfitL" : "𝑳",
+ "\\mbfitM" : "𝑴",
+ "\\mbfitN" : "𝑵",
+ "\\mbfitO" : "𝑶",
+ "\\mbfitP" : "𝑷",
+ "\\mbfitQ" : "𝑸",
+ "\\mbfitR" : "𝑹",
+ "\\mbfitS" : "𝑺",
+ "\\mbfitT" : "𝑻",
+ "\\mbfitU" : "𝑼",
+ "\\mbfitV" : "𝑽",
+ "\\mbfitW" : "𝑾",
+ "\\mbfitX" : "𝑿",
+ "\\mbfitY" : "𝒀",
+ "\\mbfitZ" : "𝒁",
+ "\\mbfita" : "𝒂",
+ "\\mbfitb" : "𝒃",
+ "\\mbfitc" : "𝒄",
+ "\\mbfitd" : "𝒅",
+ "\\mbfite" : "𝒆",
+ "\\mbfitf" : "𝒇",
+ "\\mbfitg" : "𝒈",
+ "\\mbfith" : "𝒉",
+ "\\mbfiti" : "𝒊",
+ "\\mbfitj" : "𝒋",
+ "\\mbfitk" : "𝒌",
+ "\\mbfitl" : "𝒍",
+ "\\mbfitm" : "𝒎",
+ "\\mbfitn" : "𝒏",
+ "\\mbfito" : "𝒐",
+ "\\mbfitp" : "𝒑",
+ "\\mbfitq" : "𝒒",
+ "\\mbfitr" : "𝒓",
+ "\\mbfits" : "𝒔",
+ "\\mbfitt" : "𝒕",
+ "\\mbfitu" : "𝒖",
+ "\\mbfitv" : "𝒗",
+ "\\mbfitw" : "𝒘",
+ "\\mbfitx" : "𝒙",
+ "\\mbfity" : "𝒚",
+ "\\mbfitz" : "𝒛",
+ "\\mscrA" : "𝒜",
+ "\\mscrC" : "𝒞",
+ "\\mscrD" : "𝒟",
+ "\\mscrG" : "𝒢",
+ "\\mscrJ" : "𝒥",
+ "\\mscrK" : "𝒦",
+ "\\mscrN" : "𝒩",
+ "\\mscrO" : "𝒪",
+ "\\mscrP" : "𝒫",
+ "\\mscrQ" : "𝒬",
+ "\\mscrS" : "𝒮",
+ "\\mscrT" : "𝒯",
+ "\\mscrU" : "𝒰",
+ "\\mscrV" : "𝒱",
+ "\\mscrW" : "𝒲",
+ "\\mscrX" : "𝒳",
+ "\\mscrY" : "𝒴",
+ "\\mscrZ" : "𝒵",
+ "\\mscra" : "𝒶",
+ "\\mscrb" : "𝒷",
+ "\\mscrc" : "𝒸",
+ "\\mscrd" : "𝒹",
+ "\\mscrf" : "𝒻",
+ "\\mscrh" : "𝒽",
+ "\\mscri" : "𝒾",
+ "\\mscrj" : "𝒿",
+ "\\mscrk" : "𝓀",
+ "\\mscrm" : "𝓂",
+ "\\mscrn" : "𝓃",
+ "\\mscrp" : "𝓅",
+ "\\mscrq" : "𝓆",
+ "\\mscrr" : "𝓇",
+ "\\mscrs" : "𝓈",
+ "\\mscrt" : "𝓉",
+ "\\mscru" : "𝓊",
+ "\\mscrv" : "𝓋",
+ "\\mscrw" : "𝓌",
+ "\\mscrx" : "𝓍",
+ "\\mscry" : "𝓎",
+ "\\mscrz" : "𝓏",
+ "\\mbfscrA" : "𝓐",
+ "\\mbfscrB" : "𝓑",
+ "\\mbfscrC" : "𝓒",
+ "\\mbfscrD" : "𝓓",
+ "\\mbfscrE" : "𝓔",
+ "\\mbfscrF" : "𝓕",
+ "\\mbfscrG" : "𝓖",
+ "\\mbfscrH" : "𝓗",
+ "\\mbfscrI" : "𝓘",
+ "\\mbfscrJ" : "𝓙",
+ "\\mbfscrK" : "𝓚",
+ "\\mbfscrL" : "𝓛",
+ "\\mbfscrM" : "𝓜",
+ "\\mbfscrN" : "𝓝",
+ "\\mbfscrO" : "𝓞",
+ "\\mbfscrP" : "𝓟",
+ "\\mbfscrQ" : "𝓠",
+ "\\mbfscrR" : "𝓡",
+ "\\mbfscrS" : "𝓢",
+ "\\mbfscrT" : "𝓣",
+ "\\mbfscrU" : "𝓤",
+ "\\mbfscrV" : "𝓥",
+ "\\mbfscrW" : "𝓦",
+ "\\mbfscrX" : "𝓧",
+ "\\mbfscrY" : "𝓨",
+ "\\mbfscrZ" : "𝓩",
+ "\\mbfscra" : "𝓪",
+ "\\mbfscrb" : "𝓫",
+ "\\mbfscrc" : "𝓬",
+ "\\mbfscrd" : "𝓭",
+ "\\mbfscre" : "𝓮",
+ "\\mbfscrf" : "𝓯",
+ "\\mbfscrg" : "𝓰",
+ "\\mbfscrh" : "𝓱",
+ "\\mbfscri" : "𝓲",
+ "\\mbfscrj" : "𝓳",
+ "\\mbfscrk" : "𝓴",
+ "\\mbfscrl" : "𝓵",
+ "\\mbfscrm" : "𝓶",
+ "\\mbfscrn" : "𝓷",
+ "\\mbfscro" : "𝓸",
+ "\\mbfscrp" : "𝓹",
+ "\\mbfscrq" : "𝓺",
+ "\\mbfscrr" : "𝓻",
+ "\\mbfscrs" : "𝓼",
+ "\\mbfscrt" : "𝓽",
+ "\\mbfscru" : "𝓾",
+ "\\mbfscrv" : "𝓿",
+ "\\mbfscrw" : "𝔀",
+ "\\mbfscrx" : "𝔁",
+ "\\mbfscry" : "𝔂",
+ "\\mbfscrz" : "𝔃",
+ "\\mfrakA" : "𝔄",
+ "\\mfrakB" : "𝔅",
+ "\\mfrakD" : "𝔇",
+ "\\mfrakE" : "𝔈",
+ "\\mfrakF" : "𝔉",
+ "\\mfrakG" : "𝔊",
+ "\\mfrakJ" : "𝔍",
+ "\\mfrakK" : "𝔎",
+ "\\mfrakL" : "𝔏",
+ "\\mfrakM" : "𝔐",
+ "\\mfrakN" : "𝔑",
+ "\\mfrakO" : "𝔒",
+ "\\mfrakP" : "𝔓",
+ "\\mfrakQ" : "𝔔",
+ "\\mfrakS" : "𝔖",
+ "\\mfrakT" : "𝔗",
+ "\\mfrakU" : "𝔘",
+ "\\mfrakV" : "𝔙",
+ "\\mfrakW" : "𝔚",
+ "\\mfrakX" : "𝔛",
+ "\\mfrakY" : "𝔜",
+ "\\mfraka" : "𝔞",
+ "\\mfrakb" : "𝔟",
+ "\\mfrakc" : "𝔠",
+ "\\mfrakd" : "𝔡",
+ "\\mfrake" : "𝔢",
+ "\\mfrakf" : "𝔣",
+ "\\mfrakg" : "𝔤",
+ "\\mfrakh" : "𝔥",
+ "\\mfraki" : "𝔦",
+ "\\mfrakj" : "𝔧",
+ "\\mfrakk" : "𝔨",
+ "\\mfrakl" : "𝔩",
+ "\\mfrakm" : "𝔪",
+ "\\mfrakn" : "𝔫",
+ "\\mfrako" : "𝔬",
+ "\\mfrakp" : "𝔭",
+ "\\mfrakq" : "𝔮",
+ "\\mfrakr" : "𝔯",
+ "\\mfraks" : "𝔰",
+ "\\mfrakt" : "𝔱",
+ "\\mfraku" : "𝔲",
+ "\\mfrakv" : "𝔳",
+ "\\mfrakw" : "𝔴",
+ "\\mfrakx" : "𝔵",
+ "\\mfraky" : "𝔶",
+ "\\mfrakz" : "𝔷",
+ "\\BbbA" : "𝔸",
+ "\\BbbB" : "𝔹",
+ "\\BbbD" : "𝔻",
+ "\\BbbE" : "𝔼",
+ "\\BbbF" : "𝔽",
+ "\\BbbG" : "𝔾",
+ "\\BbbI" : "𝕀",
+ "\\BbbJ" : "𝕁",
+ "\\BbbK" : "𝕂",
+ "\\BbbL" : "𝕃",
+ "\\BbbM" : "𝕄",
+ "\\BbbO" : "𝕆",
+ "\\BbbS" : "𝕊",
+ "\\BbbT" : "𝕋",
+ "\\BbbU" : "𝕌",
+ "\\BbbV" : "𝕍",
+ "\\BbbW" : "𝕎",
+ "\\BbbX" : "𝕏",
+ "\\BbbY" : "𝕐",
+ "\\Bbba" : "𝕒",
+ "\\Bbbb" : "𝕓",
+ "\\Bbbc" : "𝕔",
+ "\\Bbbd" : "𝕕",
+ "\\Bbbe" : "𝕖",
+ "\\Bbbf" : "𝕗",
+ "\\Bbbg" : "𝕘",
+ "\\Bbbh" : "𝕙",
+ "\\Bbbi" : "𝕚",
+ "\\Bbbj" : "𝕛",
+ "\\Bbbk" : "𝕜",
+ "\\Bbbl" : "𝕝",
+ "\\Bbbm" : "𝕞",
+ "\\Bbbn" : "𝕟",
+ "\\Bbbo" : "𝕠",
+ "\\Bbbp" : "𝕡",
+ "\\Bbbq" : "𝕢",
+ "\\Bbbr" : "𝕣",
+ "\\Bbbs" : "𝕤",
+ "\\Bbbt" : "𝕥",
+ "\\Bbbu" : "𝕦",
+ "\\Bbbv" : "𝕧",
+ "\\Bbbw" : "𝕨",
+ "\\Bbbx" : "𝕩",
+ "\\Bbby" : "𝕪",
+ "\\Bbbz" : "𝕫",
+ "\\mbffrakA" : "𝕬",
+ "\\mbffrakB" : "𝕭",
+ "\\mbffrakC" : "𝕮",
+ "\\mbffrakD" : "𝕯",
+ "\\mbffrakE" : "𝕰",
+ "\\mbffrakF" : "𝕱",
+ "\\mbffrakG" : "𝕲",
+ "\\mbffrakH" : "𝕳",
+ "\\mbffrakI" : "𝕴",
+ "\\mbffrakJ" : "𝕵",
+ "\\mbffrakK" : "𝕶",
+ "\\mbffrakL" : "𝕷",
+ "\\mbffrakM" : "𝕸",
+ "\\mbffrakN" : "𝕹",
+ "\\mbffrakO" : "𝕺",
+ "\\mbffrakP" : "𝕻",
+ "\\mbffrakQ" : "𝕼",
+ "\\mbffrakR" : "𝕽",
+ "\\mbffrakS" : "𝕾",
+ "\\mbffrakT" : "𝕿",
+ "\\mbffrakU" : "𝖀",
+ "\\mbffrakV" : "𝖁",
+ "\\mbffrakW" : "𝖂",
+ "\\mbffrakX" : "𝖃",
+ "\\mbffrakY" : "𝖄",
+ "\\mbffrakZ" : "𝖅",
+ "\\mbffraka" : "𝖆",
+ "\\mbffrakb" : "𝖇",
+ "\\mbffrakc" : "𝖈",
+ "\\mbffrakd" : "𝖉",
+ "\\mbffrake" : "𝖊",
+ "\\mbffrakf" : "𝖋",
+ "\\mbffrakg" : "𝖌",
+ "\\mbffrakh" : "𝖍",
+ "\\mbffraki" : "𝖎",
+ "\\mbffrakj" : "𝖏",
+ "\\mbffrakk" : "𝖐",
+ "\\mbffrakl" : "𝖑",
+ "\\mbffrakm" : "𝖒",
+ "\\mbffrakn" : "𝖓",
+ "\\mbffrako" : "𝖔",
+ "\\mbffrakp" : "𝖕",
+ "\\mbffrakq" : "𝖖",
+ "\\mbffrakr" : "𝖗",
+ "\\mbffraks" : "𝖘",
+ "\\mbffrakt" : "𝖙",
+ "\\mbffraku" : "𝖚",
+ "\\mbffrakv" : "𝖛",
+ "\\mbffrakw" : "𝖜",
+ "\\mbffrakx" : "𝖝",
+ "\\mbffraky" : "𝖞",
+ "\\mbffrakz" : "𝖟",
+ "\\msansA" : "𝖠",
+ "\\msansB" : "𝖡",
+ "\\msansC" : "𝖢",
+ "\\msansD" : "𝖣",
+ "\\msansE" : "𝖤",
+ "\\msansF" : "𝖥",
+ "\\msansG" : "𝖦",
+ "\\msansH" : "𝖧",
+ "\\msansI" : "𝖨",
+ "\\msansJ" : "𝖩",
+ "\\msansK" : "𝖪",
+ "\\msansL" : "𝖫",
+ "\\msansM" : "𝖬",
+ "\\msansN" : "𝖭",
+ "\\msansO" : "𝖮",
+ "\\msansP" : "𝖯",
+ "\\msansQ" : "𝖰",
+ "\\msansR" : "𝖱",
+ "\\msansS" : "𝖲",
+ "\\msansT" : "𝖳",
+ "\\msansU" : "𝖴",
+ "\\msansV" : "𝖵",
+ "\\msansW" : "𝖶",
+ "\\msansX" : "𝖷",
+ "\\msansY" : "𝖸",
+ "\\msansZ" : "𝖹",
+ "\\msansa" : "𝖺",
+ "\\msansb" : "𝖻",
+ "\\msansc" : "𝖼",
+ "\\msansd" : "𝖽",
+ "\\msanse" : "𝖾",
+ "\\msansf" : "𝖿",
+ "\\msansg" : "𝗀",
+ "\\msansh" : "𝗁",
+ "\\msansi" : "𝗂",
+ "\\msansj" : "𝗃",
+ "\\msansk" : "𝗄",
+ "\\msansl" : "𝗅",
+ "\\msansm" : "𝗆",
+ "\\msansn" : "𝗇",
+ "\\msanso" : "𝗈",
+ "\\msansp" : "𝗉",
+ "\\msansq" : "𝗊",
+ "\\msansr" : "𝗋",
+ "\\msanss" : "𝗌",
+ "\\msanst" : "𝗍",
+ "\\msansu" : "𝗎",
+ "\\msansv" : "𝗏",
+ "\\msansw" : "𝗐",
+ "\\msansx" : "𝗑",
+ "\\msansy" : "𝗒",
+ "\\msansz" : "𝗓",
+ "\\mbfsansA" : "𝗔",
+ "\\mbfsansB" : "𝗕",
+ "\\mbfsansC" : "𝗖",
+ "\\mbfsansD" : "𝗗",
+ "\\mbfsansE" : "𝗘",
+ "\\mbfsansF" : "𝗙",
+ "\\mbfsansG" : "𝗚",
+ "\\mbfsansH" : "𝗛",
+ "\\mbfsansI" : "𝗜",
+ "\\mbfsansJ" : "𝗝",
+ "\\mbfsansK" : "𝗞",
+ "\\mbfsansL" : "𝗟",
+ "\\mbfsansM" : "𝗠",
+ "\\mbfsansN" : "𝗡",
+ "\\mbfsansO" : "𝗢",
+ "\\mbfsansP" : "𝗣",
+ "\\mbfsansQ" : "𝗤",
+ "\\mbfsansR" : "𝗥",
+ "\\mbfsansS" : "𝗦",
+ "\\mbfsansT" : "𝗧",
+ "\\mbfsansU" : "𝗨",
+ "\\mbfsansV" : "𝗩",
+ "\\mbfsansW" : "𝗪",
+ "\\mbfsansX" : "𝗫",
+ "\\mbfsansY" : "𝗬",
+ "\\mbfsansZ" : "𝗭",
+ "\\mbfsansa" : "𝗮",
+ "\\mbfsansb" : "𝗯",
+ "\\mbfsansc" : "𝗰",
+ "\\mbfsansd" : "𝗱",
+ "\\mbfsanse" : "𝗲",
+ "\\mbfsansf" : "𝗳",
+ "\\mbfsansg" : "𝗴",
+ "\\mbfsansh" : "𝗵",
+ "\\mbfsansi" : "𝗶",
+ "\\mbfsansj" : "𝗷",
+ "\\mbfsansk" : "𝗸",
+ "\\mbfsansl" : "𝗹",
+ "\\mbfsansm" : "𝗺",
+ "\\mbfsansn" : "𝗻",
+ "\\mbfsanso" : "𝗼",
+ "\\mbfsansp" : "𝗽",
+ "\\mbfsansq" : "𝗾",
+ "\\mbfsansr" : "𝗿",
+ "\\mbfsanss" : "𝘀",
+ "\\mbfsanst" : "𝘁",
+ "\\mbfsansu" : "𝘂",
+ "\\mbfsansv" : "𝘃",
+ "\\mbfsansw" : "𝘄",
+ "\\mbfsansx" : "𝘅",
+ "\\mbfsansy" : "𝘆",
+ "\\mbfsansz" : "𝘇",
+ "\\mitsansA" : "𝘈",
+ "\\mitsansB" : "𝘉",
+ "\\mitsansC" : "𝘊",
+ "\\mitsansD" : "𝘋",
+ "\\mitsansE" : "𝘌",
+ "\\mitsansF" : "𝘍",
+ "\\mitsansG" : "𝘎",
+ "\\mitsansH" : "𝘏",
+ "\\mitsansI" : "𝘐",
+ "\\mitsansJ" : "𝘑",
+ "\\mitsansK" : "𝘒",
+ "\\mitsansL" : "𝘓",
+ "\\mitsansM" : "𝘔",
+ "\\mitsansN" : "𝘕",
+ "\\mitsansO" : "𝘖",
+ "\\mitsansP" : "𝘗",
+ "\\mitsansQ" : "𝘘",
+ "\\mitsansR" : "𝘙",
+ "\\mitsansS" : "𝘚",
+ "\\mitsansT" : "𝘛",
+ "\\mitsansU" : "𝘜",
+ "\\mitsansV" : "𝘝",
+ "\\mitsansW" : "𝘞",
+ "\\mitsansX" : "𝘟",
+ "\\mitsansY" : "𝘠",
+ "\\mitsansZ" : "𝘡",
+ "\\mitsansa" : "𝘢",
+ "\\mitsansb" : "𝘣",
+ "\\mitsansc" : "𝘤",
+ "\\mitsansd" : "𝘥",
+ "\\mitsanse" : "𝘦",
+ "\\mitsansf" : "𝘧",
+ "\\mitsansg" : "𝘨",
+ "\\mitsansh" : "𝘩",
+ "\\mitsansi" : "𝘪",
+ "\\mitsansj" : "𝘫",
+ "\\mitsansk" : "𝘬",
+ "\\mitsansl" : "𝘭",
+ "\\mitsansm" : "𝘮",
+ "\\mitsansn" : "𝘯",
+ "\\mitsanso" : "𝘰",
+ "\\mitsansp" : "𝘱",
+ "\\mitsansq" : "𝘲",
+ "\\mitsansr" : "𝘳",
+ "\\mitsanss" : "𝘴",
+ "\\mitsanst" : "𝘵",
+ "\\mitsansu" : "𝘶",
+ "\\mitsansv" : "𝘷",
+ "\\mitsansw" : "𝘸",
+ "\\mitsansx" : "𝘹",
+ "\\mitsansy" : "𝘺",
+ "\\mitsansz" : "𝘻",
+ "\\mbfitsansA" : "𝘼",
+ "\\mbfitsansB" : "𝘽",
+ "\\mbfitsansC" : "𝘾",
+ "\\mbfitsansD" : "𝘿",
+ "\\mbfitsansE" : "𝙀",
+ "\\mbfitsansF" : "𝙁",
+ "\\mbfitsansG" : "𝙂",
+ "\\mbfitsansH" : "𝙃",
+ "\\mbfitsansI" : "𝙄",
+ "\\mbfitsansJ" : "𝙅",
+ "\\mbfitsansK" : "𝙆",
+ "\\mbfitsansL" : "𝙇",
+ "\\mbfitsansM" : "𝙈",
+ "\\mbfitsansN" : "𝙉",
+ "\\mbfitsansO" : "𝙊",
+ "\\mbfitsansP" : "𝙋",
+ "\\mbfitsansQ" : "𝙌",
+ "\\mbfitsansR" : "𝙍",
+ "\\mbfitsansS" : "𝙎",
+ "\\mbfitsansT" : "𝙏",
+ "\\mbfitsansU" : "𝙐",
+ "\\mbfitsansV" : "𝙑",
+ "\\mbfitsansW" : "𝙒",
+ "\\mbfitsansX" : "𝙓",
+ "\\mbfitsansY" : "𝙔",
+ "\\mbfitsansZ" : "𝙕",
+ "\\mbfitsansa" : "𝙖",
+ "\\mbfitsansb" : "𝙗",
+ "\\mbfitsansc" : "𝙘",
+ "\\mbfitsansd" : "𝙙",
+ "\\mbfitsanse" : "𝙚",
+ "\\mbfitsansf" : "𝙛",
+ "\\mbfitsansg" : "𝙜",
+ "\\mbfitsansh" : "𝙝",
+ "\\mbfitsansi" : "𝙞",
+ "\\mbfitsansj" : "𝙟",
+ "\\mbfitsansk" : "𝙠",
+ "\\mbfitsansl" : "𝙡",
+ "\\mbfitsansm" : "𝙢",
+ "\\mbfitsansn" : "𝙣",
+ "\\mbfitsanso" : "𝙤",
+ "\\mbfitsansp" : "𝙥",
+ "\\mbfitsansq" : "𝙦",
+ "\\mbfitsansr" : "𝙧",
+ "\\mbfitsanss" : "𝙨",
+ "\\mbfitsanst" : "𝙩",
+ "\\mbfitsansu" : "𝙪",
+ "\\mbfitsansv" : "𝙫",
+ "\\mbfitsansw" : "𝙬",
+ "\\mbfitsansx" : "𝙭",
+ "\\mbfitsansy" : "𝙮",
+ "\\mbfitsansz" : "𝙯",
+ "\\mttA" : "𝙰",
+ "\\mttB" : "𝙱",
+ "\\mttC" : "𝙲",
+ "\\mttD" : "𝙳",
+ "\\mttE" : "𝙴",
+ "\\mttF" : "𝙵",
+ "\\mttG" : "𝙶",
+ "\\mttH" : "𝙷",
+ "\\mttI" : "𝙸",
+ "\\mttJ" : "𝙹",
+ "\\mttK" : "𝙺",
+ "\\mttL" : "𝙻",
+ "\\mttM" : "𝙼",
+ "\\mttN" : "𝙽",
+ "\\mttO" : "𝙾",
+ "\\mttP" : "𝙿",
+ "\\mttQ" : "𝚀",
+ "\\mttR" : "𝚁",
+ "\\mttS" : "𝚂",
+ "\\mttT" : "𝚃",
+ "\\mttU" : "𝚄",
+ "\\mttV" : "𝚅",
+ "\\mttW" : "𝚆",
+ "\\mttX" : "𝚇",
+ "\\mttY" : "𝚈",
+ "\\mttZ" : "𝚉",
+ "\\mtta" : "𝚊",
+ "\\mttb" : "𝚋",
+ "\\mttc" : "𝚌",
+ "\\mttd" : "𝚍",
+ "\\mtte" : "𝚎",
+ "\\mttf" : "𝚏",
+ "\\mttg" : "𝚐",
+ "\\mtth" : "𝚑",
+ "\\mtti" : "𝚒",
+ "\\mttj" : "𝚓",
+ "\\mttk" : "𝚔",
+ "\\mttl" : "𝚕",
+ "\\mttm" : "𝚖",
+ "\\mttn" : "𝚗",
+ "\\mtto" : "𝚘",
+ "\\mttp" : "𝚙",
+ "\\mttq" : "𝚚",
+ "\\mttr" : "𝚛",
+ "\\mtts" : "𝚜",
+ "\\mttt" : "𝚝",
+ "\\mttu" : "𝚞",
+ "\\mttv" : "𝚟",
+ "\\mttw" : "𝚠",
+ "\\mttx" : "𝚡",
+ "\\mtty" : "𝚢",
+ "\\mttz" : "𝚣",
+ "\\mbfAlpha" : "𝚨",
+ "\\mbfBeta" : "𝚩",
+ "\\mbfGamma" : "𝚪",
+ "\\mbfDelta" : "𝚫",
+ "\\mbfEpsilon" : "𝚬",
+ "\\mbfZeta" : "𝚭",
+ "\\mbfEta" : "𝚮",
+ "\\mbfTheta" : "𝚯",
+ "\\mbfIota" : "𝚰",
+ "\\mbfKappa" : "𝚱",
+ "\\mbfLambda" : "𝚲",
+ "\\mbfMu" : "𝚳",
+ "\\mbfNu" : "𝚴",
+ "\\mbfXi" : "𝚵",
+ "\\mbfOmicron" : "𝚶",
+ "\\mbfPi" : "𝚷",
+ "\\mbfRho" : "𝚸",
+ "\\mbfvarTheta" : "𝚹",
+ "\\mbfSigma" : "𝚺",
+ "\\mbfTau" : "𝚻",
+ "\\mbfUpsilon" : "𝚼",
+ "\\mbfPhi" : "𝚽",
+ "\\mbfChi" : "𝚾",
+ "\\mbfPsi" : "𝚿",
+ "\\mbfOmega" : "𝛀",
+ "\\mbfalpha" : "𝛂",
+ "\\mbfbeta" : "𝛃",
+ "\\mbfgamma" : "𝛄",
+ "\\mbfdelta" : "𝛅",
+ "\\mbfepsilon" : "𝛆",
+ "\\mbfzeta" : "𝛇",
+ "\\mbfeta" : "𝛈",
+ "\\mbftheta" : "𝛉",
+ "\\mbfiota" : "𝛊",
+ "\\mbfkappa" : "𝛋",
+ "\\mbflambda" : "𝛌",
+ "\\mbfmu" : "𝛍",
+ "\\mbfnu" : "𝛎",
+ "\\mbfxi" : "𝛏",
+ "\\mbfomicron" : "𝛐",
+ "\\mbfpi" : "𝛑",
+ "\\mbfrho" : "𝛒",
+ "\\mbfvarsigma" : "𝛓",
+ "\\mbfsigma" : "𝛔",
+ "\\mbftau" : "𝛕",
+ "\\mbfupsilon" : "𝛖",
+ "\\mbfvarphi" : "𝛗",
+ "\\mbfchi" : "𝛘",
+ "\\mbfpsi" : "𝛙",
+ "\\mbfomega" : "𝛚",
+ "\\mbfvarepsilon" : "𝛜",
+ "\\mbfvartheta" : "𝛝",
+ "\\mbfvarkappa" : "𝛞",
+ "\\mbfphi" : "𝛟",
+ "\\mbfvarrho" : "𝛠",
+ "\\mbfvarpi" : "𝛡",
+ "\\mitAlpha" : "𝛢",
+ "\\mitBeta" : "𝛣",
+ "\\mitGamma" : "𝛤",
+ "\\mitDelta" : "𝛥",
+ "\\mitEpsilon" : "𝛦",
+ "\\mitZeta" : "𝛧",
+ "\\mitEta" : "𝛨",
+ "\\mitTheta" : "𝛩",
+ "\\mitIota" : "𝛪",
+ "\\mitKappa" : "𝛫",
+ "\\mitLambda" : "𝛬",
+ "\\mitMu" : "𝛭",
+ "\\mitNu" : "𝛮",
+ "\\mitXi" : "𝛯",
+ "\\mitOmicron" : "𝛰",
+ "\\mitPi" : "𝛱",
+ "\\mitRho" : "𝛲",
+ "\\mitvarTheta" : "𝛳",
+ "\\mitSigma" : "𝛴",
+ "\\mitTau" : "𝛵",
+ "\\mitUpsilon" : "𝛶",
+ "\\mitPhi" : "𝛷",
+ "\\mitChi" : "𝛸",
+ "\\mitPsi" : "𝛹",
+ "\\mitOmega" : "𝛺",
+ "\\mitalpha" : "𝛼",
+ "\\mitbeta" : "𝛽",
+ "\\mitgamma" : "𝛾",
+ "\\mitdelta" : "𝛿",
+ "\\mitepsilon" : "𝜀",
+ "\\mitzeta" : "𝜁",
+ "\\miteta" : "𝜂",
+ "\\mittheta" : "𝜃",
+ "\\mitiota" : "𝜄",
+ "\\mitkappa" : "𝜅",
+ "\\mitlambda" : "𝜆",
+ "\\mitmu" : "𝜇",
+ "\\mitnu" : "𝜈",
+ "\\mitxi" : "𝜉",
+ "\\mitomicron" : "𝜊",
+ "\\mitpi" : "𝜋",
+ "\\mitrho" : "𝜌",
+ "\\mitvarsigma" : "𝜍",
+ "\\mitsigma" : "𝜎",
+ "\\mittau" : "𝜏",
+ "\\mitupsilon" : "𝜐",
+ "\\mitphi" : "𝜑",
+ "\\mitchi" : "𝜒",
+ "\\mitpsi" : "𝜓",
+ "\\mitomega" : "𝜔",
+ "\\mitvarepsilon" : "𝜖",
+ "\\mitvartheta" : "𝜗",
+ "\\mitvarkappa" : "𝜘",
+ "\\mitvarphi" : "𝜙",
+ "\\mitvarrho" : "𝜚",
+ "\\mitvarpi" : "𝜛",
+ "\\mbfitAlpha" : "𝜜",
+ "\\mbfitBeta" : "𝜝",
+ "\\mbfitGamma" : "𝜞",
+ "\\mbfitDelta" : "𝜟",
+ "\\mbfitEpsilon" : "𝜠",
+ "\\mbfitZeta" : "𝜡",
+ "\\mbfitEta" : "𝜢",
+ "\\mbfitTheta" : "𝜣",
+ "\\mbfitIota" : "𝜤",
+ "\\mbfitKappa" : "𝜥",
+ "\\mbfitLambda" : "𝜦",
+ "\\mbfitMu" : "𝜧",
+ "\\mbfitNu" : "𝜨",
+ "\\mbfitXi" : "𝜩",
+ "\\mbfitOmicron" : "𝜪",
+ "\\mbfitPi" : "𝜫",
+ "\\mbfitRho" : "𝜬",
+ "\\mbfitvarTheta" : "𝜭",
+ "\\mbfitSigma" : "𝜮",
+ "\\mbfitTau" : "𝜯",
+ "\\mbfitUpsilon" : "𝜰",
+ "\\mbfitPhi" : "𝜱",
+ "\\mbfitChi" : "𝜲",
+ "\\mbfitPsi" : "𝜳",
+ "\\mbfitOmega" : "𝜴",
+ "\\mbfitalpha" : "𝜶",
+ "\\mbfitbeta" : "𝜷",
+ "\\mbfitgamma" : "𝜸",
+ "\\mbfitdelta" : "𝜹",
+ "\\mbfitepsilon" : "𝜺",
+ "\\mbfitzeta" : "𝜻",
+ "\\mbfiteta" : "𝜼",
+ "\\mbfittheta" : "𝜽",
+ "\\mbfitiota" : "𝜾",
+ "\\mbfitkappa" : "𝜿",
+ "\\mbfitlambda" : "𝝀",
+ "\\mbfitmu" : "𝝁",
+ "\\mbfitnu" : "𝝂",
+ "\\mbfitxi" : "𝝃",
+ "\\mbfitomicron" : "𝝄",
+ "\\mbfitpi" : "𝝅",
+ "\\mbfitrho" : "𝝆",
+ "\\mbfitvarsigma" : "𝝇",
+ "\\mbfitsigma" : "𝝈",
+ "\\mbfittau" : "𝝉",
+ "\\mbfitupsilon" : "𝝊",
+ "\\mbfitphi" : "𝝋",
+ "\\mbfitchi" : "𝝌",
+ "\\mbfitpsi" : "𝝍",
+ "\\mbfitomega" : "𝝎",
+ "\\mbfitvarepsilon" : "𝝐",
+ "\\mbfitvartheta" : "𝝑",
+ "\\mbfitvarkappa" : "𝝒",
+ "\\mbfitvarphi" : "𝝓",
+ "\\mbfitvarrho" : "𝝔",
+ "\\mbfitvarpi" : "𝝕",
+ "\\mbfsansAlpha" : "𝝖",
+ "\\mbfsansBeta" : "𝝗",
+ "\\mbfsansGamma" : "𝝘",
+ "\\mbfsansDelta" : "𝝙",
+ "\\mbfsansEpsilon" : "𝝚",
+ "\\mbfsansZeta" : "𝝛",
+ "\\mbfsansEta" : "𝝜",
+ "\\mbfsansTheta" : "𝝝",
+ "\\mbfsansIota" : "𝝞",
+ "\\mbfsansKappa" : "𝝟",
+ "\\mbfsansLambda" : "𝝠",
+ "\\mbfsansMu" : "𝝡",
+ "\\mbfsansNu" : "𝝢",
+ "\\mbfsansXi" : "𝝣",
+ "\\mbfsansOmicron" : "𝝤",
+ "\\mbfsansPi" : "𝝥",
+ "\\mbfsansRho" : "𝝦",
+ "\\mbfsansvarTheta" : "𝝧",
+ "\\mbfsansSigma" : "𝝨",
+ "\\mbfsansTau" : "𝝩",
+ "\\mbfsansUpsilon" : "𝝪",
+ "\\mbfsansPhi" : "𝝫",
+ "\\mbfsansChi" : "𝝬",
+ "\\mbfsansPsi" : "𝝭",
+ "\\mbfsansOmega" : "𝝮",
+ "\\mbfsansalpha" : "𝝰",
+ "\\mbfsansbeta" : "𝝱",
+ "\\mbfsansgamma" : "𝝲",
+ "\\mbfsansdelta" : "𝝳",
+ "\\mbfsansepsilon" : "𝝴",
+ "\\mbfsanszeta" : "𝝵",
+ "\\mbfsanseta" : "𝝶",
+ "\\mbfsanstheta" : "𝝷",
+ "\\mbfsansiota" : "𝝸",
+ "\\mbfsanskappa" : "𝝹",
+ "\\mbfsanslambda" : "𝝺",
+ "\\mbfsansmu" : "𝝻",
+ "\\mbfsansnu" : "𝝼",
+ "\\mbfsansxi" : "𝝽",
+ "\\mbfsansomicron" : "𝝾",
+ "\\mbfsanspi" : "𝝿",
+ "\\mbfsansrho" : "𝞀",
+ "\\mbfsansvarsigma" : "𝞁",
+ "\\mbfsanssigma" : "𝞂",
+ "\\mbfsanstau" : "𝞃",
+ "\\mbfsansupsilon" : "𝞄",
+ "\\mbfsansphi" : "𝞅",
+ "\\mbfsanschi" : "𝞆",
+ "\\mbfsanspsi" : "𝞇",
+ "\\mbfsansomega" : "𝞈",
+ "\\mbfsansvarepsilon" : "𝞊",
+ "\\mbfsansvartheta" : "𝞋",
+ "\\mbfsansvarkappa" : "𝞌",
+ "\\mbfsansvarphi" : "𝞍",
+ "\\mbfsansvarrho" : "𝞎",
+ "\\mbfsansvarpi" : "𝞏",
+ "\\mbfitsansAlpha" : "𝞐",
+ "\\mbfitsansBeta" : "𝞑",
+ "\\mbfitsansGamma" : "𝞒",
+ "\\mbfitsansDelta" : "𝞓",
+ "\\mbfitsansEpsilon" : "𝞔",
+ "\\mbfitsansZeta" : "𝞕",
+ "\\mbfitsansEta" : "𝞖",
+ "\\mbfitsansTheta" : "𝞗",
+ "\\mbfitsansIota" : "𝞘",
+ "\\mbfitsansKappa" : "𝞙",
+ "\\mbfitsansLambda" : "𝞚",
+ "\\mbfitsansMu" : "𝞛",
+ "\\mbfitsansNu" : "𝞜",
+ "\\mbfitsansXi" : "𝞝",
+ "\\mbfitsansOmicron" : "𝞞",
+ "\\mbfitsansPi" : "𝞟",
+ "\\mbfitsansRho" : "𝞠",
+ "\\mbfitsansvarTheta" : "𝞡",
+ "\\mbfitsansSigma" : "𝞢",
+ "\\mbfitsansTau" : "𝞣",
+ "\\mbfitsansUpsilon" : "𝞤",
+ "\\mbfitsansPhi" : "𝞥",
+ "\\mbfitsansChi" : "𝞦",
+ "\\mbfitsansPsi" : "𝞧",
+ "\\mbfitsansOmega" : "𝞨",
+ "\\mbfitsansalpha" : "𝞪",
+ "\\mbfitsansbeta" : "𝞫",
+ "\\mbfitsansgamma" : "𝞬",
+ "\\mbfitsansdelta" : "𝞭",
+ "\\mbfitsansepsilon" : "𝞮",
+ "\\mbfitsanszeta" : "𝞯",
+ "\\mbfitsanseta" : "𝞰",
+ "\\mbfitsanstheta" : "𝞱",
+ "\\mbfitsansiota" : "𝞲",
+ "\\mbfitsanskappa" : "𝞳",
+ "\\mbfitsanslambda" : "𝞴",
+ "\\mbfitsansmu" : "𝞵",
+ "\\mbfitsansnu" : "𝞶",
+ "\\mbfitsansxi" : "𝞷",
+ "\\mbfitsansomicron" : "𝞸",
+ "\\mbfitsanspi" : "𝞹",
+ "\\mbfitsansrho" : "𝞺",
+ "\\mbfitsansvarsigma" : "𝞻",
+ "\\mbfitsanssigma" : "𝞼",
+ "\\mbfitsanstau" : "𝞽",
+ "\\mbfitsansupsilon" : "𝞾",
+ "\\mbfitsansphi" : "𝞿",
+ "\\mbfitsanschi" : "𝟀",
+ "\\mbfitsanspsi" : "𝟁",
+ "\\mbfitsansomega" : "𝟂",
+ "\\mbfitsansvarepsilon" : "𝟄",
+ "\\mbfitsansvartheta" : "𝟅",
+ "\\mbfitsansvarkappa" : "𝟆",
+ "\\mbfitsansvarphi" : "𝟇",
+ "\\mbfitsansvarrho" : "𝟈",
+ "\\mbfitsansvarpi" : "𝟉",
+ "\\mbfzero" : "𝟎",
+ "\\mbfone" : "𝟏",
+ "\\mbftwo" : "𝟐",
+ "\\mbfthree" : "𝟑",
+ "\\mbffour" : "𝟒",
+ "\\mbffive" : "𝟓",
+ "\\mbfsix" : "𝟔",
+ "\\mbfseven" : "𝟕",
+ "\\mbfeight" : "𝟖",
+ "\\mbfnine" : "𝟗",
+ "\\Bbbzero" : "𝟘",
+ "\\Bbbone" : "𝟙",
+ "\\Bbbtwo" : "𝟚",
+ "\\Bbbthree" : "𝟛",
+ "\\Bbbfour" : "𝟜",
+ "\\Bbbfive" : "𝟝",
+ "\\Bbbsix" : "𝟞",
+ "\\Bbbseven" : "𝟟",
+ "\\Bbbeight" : "𝟠",
+ "\\Bbbnine" : "𝟡",
+ "\\msanszero" : "𝟢",
+ "\\msansone" : "𝟣",
+ "\\msanstwo" : "𝟤",
+ "\\msansthree" : "𝟥",
+ "\\msansfour" : "𝟦",
+ "\\msansfive" : "𝟧",
+ "\\msanssix" : "𝟨",
+ "\\msansseven" : "𝟩",
+ "\\msanseight" : "𝟪",
+ "\\msansnine" : "𝟫",
+ "\\mbfsanszero" : "𝟬",
+ "\\mbfsansone" : "𝟭",
+ "\\mbfsanstwo" : "𝟮",
+ "\\mbfsansthree" : "𝟯",
+ "\\mbfsansfour" : "𝟰",
+ "\\mbfsansfive" : "𝟱",
+ "\\mbfsanssix" : "𝟲",
+ "\\mbfsansseven" : "𝟳",
+ "\\mbfsanseight" : "𝟴",
+ "\\mbfsansnine" : "𝟵",
+ "\\mttzero" : "𝟶",
+ "\\mttone" : "𝟷",
+ "\\mtttwo" : "𝟸",
+ "\\mttthree" : "𝟹",
+ "\\mttfour" : "𝟺",
+ "\\mttfive" : "𝟻",
+ "\\mttsix" : "𝟼",
+ "\\mttseven" : "𝟽",
+ "\\mtteight" : "𝟾",
+ "\\mttnine" : "𝟿",
+}
+
+
+reverse_latex_symbol = { v:k for k,v in latex_symbols.items()}
diff --git a/contrib/python/ipython/py2/IPython/core/logger.py b/contrib/python/ipython/py2/IPython/core/logger.py
index ba5a88a73a..0e41db598f 100644
--- a/contrib/python/ipython/py2/IPython/core/logger.py
+++ b/contrib/python/ipython/py2/IPython/core/logger.py
@@ -1,221 +1,221 @@
-"""Logger class for IPython's logging facilities.
-"""
-from __future__ import print_function
-
-#*****************************************************************************
-# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
-# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#*****************************************************************************
-
-#****************************************************************************
-# Modules and globals
-
-# Python standard modules
-import glob
-import io
-import os
-import time
-
-from IPython.utils.py3compat import str_to_unicode
-
-#****************************************************************************
-# FIXME: This class isn't a mixin anymore, but it still needs attributes from
-# ipython and does input cache management. Finish cleanup later...
-
-class Logger(object):
- """A Logfile class with different policies for file creation"""
-
- def __init__(self, home_dir, logfname='Logger.log', loghead=u'',
- logmode='over'):
-
- # this is the full ipython instance, we need some attributes from it
- # which won't exist until later. What a mess, clean up later...
- self.home_dir = home_dir
-
- self.logfname = logfname
- self.loghead = loghead
- self.logmode = logmode
- self.logfile = None
-
- # Whether to log raw or processed input
- self.log_raw_input = False
-
- # whether to also log output
- self.log_output = False
-
- # whether to put timestamps before each log entry
- self.timestamp = False
-
- # activity control flags
- self.log_active = False
-
- # logmode is a validated property
- def _set_mode(self,mode):
- if mode not in ['append','backup','global','over','rotate']:
- raise ValueError('invalid log mode %s given' % mode)
- self._logmode = mode
-
- def _get_mode(self):
- return self._logmode
-
- logmode = property(_get_mode,_set_mode)
-
- def logstart(self, logfname=None, loghead=None, logmode=None,
- log_output=False, timestamp=False, log_raw_input=False):
- """Generate a new log-file with a default header.
-
- Raises RuntimeError if the log has already been started"""
-
- if self.logfile is not None:
- raise RuntimeError('Log file is already active: %s' %
- self.logfname)
-
- # The parameters can override constructor defaults
- if logfname is not None: self.logfname = logfname
- if loghead is not None: self.loghead = loghead
- if logmode is not None: self.logmode = logmode
-
- # Parameters not part of the constructor
- self.timestamp = timestamp
- self.log_output = log_output
- self.log_raw_input = log_raw_input
-
- # init depending on the log mode requested
- isfile = os.path.isfile
- logmode = self.logmode
-
- if logmode == 'append':
- self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
-
- elif logmode == 'backup':
- if isfile(self.logfname):
- backup_logname = self.logfname+'~'
- # Manually remove any old backup, since os.rename may fail
- # under Windows.
- if isfile(backup_logname):
- os.remove(backup_logname)
- os.rename(self.logfname,backup_logname)
- self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
-
- elif logmode == 'global':
- self.logfname = os.path.join(self.home_dir,self.logfname)
- self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
-
- elif logmode == 'over':
- if isfile(self.logfname):
- os.remove(self.logfname)
- self.logfile = io.open(self.logfname,'w', encoding='utf-8')
-
- elif logmode == 'rotate':
- if isfile(self.logfname):
- if isfile(self.logfname+'.001~'):
- old = glob.glob(self.logfname+'.*~')
- old.sort()
- old.reverse()
- for f in old:
- root, ext = os.path.splitext(f)
- num = int(ext[1:-1])+1
- os.rename(f, root+'.'+repr(num).zfill(3)+'~')
- os.rename(self.logfname, self.logfname+'.001~')
- self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
-
- if logmode != 'append':
- self.logfile.write(self.loghead)
-
- self.logfile.flush()
- self.log_active = True
-
- def switch_log(self,val):
- """Switch logging on/off. val should be ONLY a boolean."""
-
- if val not in [False,True,0,1]:
- raise ValueError('Call switch_log ONLY with a boolean argument, '
- 'not with: %s' % val)
-
- label = {0:'OFF',1:'ON',False:'OFF',True:'ON'}
-
- if self.logfile is None:
- print("""
-Logging hasn't been started yet (use logstart for that).
-
-%logon/%logoff are for temporarily starting and stopping logging for a logfile
-which already exists. But you must first start the logging process with
-%logstart (optionally giving a logfile name).""")
-
- else:
- if self.log_active == val:
- print('Logging is already',label[val])
- else:
- print('Switching logging',label[val])
- self.log_active = not self.log_active
- self.log_active_out = self.log_active
-
- def logstate(self):
- """Print a status message about the logger."""
- if self.logfile is None:
- print('Logging has not been activated.')
- else:
- state = self.log_active and 'active' or 'temporarily suspended'
- print('Filename :', self.logfname)
- print('Mode :', self.logmode)
- print('Output logging :', self.log_output)
- print('Raw input log :', self.log_raw_input)
- print('Timestamping :', self.timestamp)
- print('State :', state)
-
- def log(self, line_mod, line_ori):
- """Write the sources to a log.
-
- Inputs:
-
- - line_mod: possibly modified input, such as the transformations made
- by input prefilters or input handlers of various kinds. This should
- always be valid Python.
-
- - line_ori: unmodified input line from the user. This is not
- necessarily valid Python.
- """
-
- # Write the log line, but decide which one according to the
- # log_raw_input flag, set when the log is started.
- if self.log_raw_input:
- self.log_write(line_ori)
- else:
- self.log_write(line_mod)
-
- def log_write(self, data, kind='input'):
- """Write data to the log file, if active"""
-
- #print 'data: %r' % data # dbg
- if self.log_active and data:
- write = self.logfile.write
- if kind=='input':
- if self.timestamp:
- write(str_to_unicode(time.strftime('# %a, %d %b %Y %H:%M:%S\n',
- time.localtime())))
- write(data)
- elif kind=='output' and self.log_output:
- odata = u'\n'.join([u'#[Out]# %s' % s
- for s in data.splitlines()])
- write(u'%s\n' % odata)
- self.logfile.flush()
-
- def logstop(self):
- """Fully stop logging and close log file.
-
- In order to start logging again, a new logstart() call needs to be
- made, possibly (though not necessarily) with a new filename, mode and
- other options."""
-
- if self.logfile is not None:
- self.logfile.close()
- self.logfile = None
- else:
- print("Logging hadn't been started.")
- self.log_active = False
-
- # For backwards compatibility, in case anyone was using this.
- close_log = logstop
+"""Logger class for IPython's logging facilities.
+"""
+from __future__ import print_function
+
+#*****************************************************************************
+# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
+# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+#****************************************************************************
+# Modules and globals
+
+# Python standard modules
+import glob
+import io
+import os
+import time
+
+from IPython.utils.py3compat import str_to_unicode
+
+#****************************************************************************
+# FIXME: This class isn't a mixin anymore, but it still needs attributes from
+# ipython and does input cache management. Finish cleanup later...
+
+class Logger(object):
+ """A Logfile class with different policies for file creation"""
+
+ def __init__(self, home_dir, logfname='Logger.log', loghead=u'',
+ logmode='over'):
+
+ # this is the full ipython instance, we need some attributes from it
+ # which won't exist until later. What a mess, clean up later...
+ self.home_dir = home_dir
+
+ self.logfname = logfname
+ self.loghead = loghead
+ self.logmode = logmode
+ self.logfile = None
+
+ # Whether to log raw or processed input
+ self.log_raw_input = False
+
+ # whether to also log output
+ self.log_output = False
+
+ # whether to put timestamps before each log entry
+ self.timestamp = False
+
+ # activity control flags
+ self.log_active = False
+
+ # logmode is a validated property
+ def _set_mode(self,mode):
+ if mode not in ['append','backup','global','over','rotate']:
+ raise ValueError('invalid log mode %s given' % mode)
+ self._logmode = mode
+
+ def _get_mode(self):
+ return self._logmode
+
+ logmode = property(_get_mode,_set_mode)
+
+ def logstart(self, logfname=None, loghead=None, logmode=None,
+ log_output=False, timestamp=False, log_raw_input=False):
+ """Generate a new log-file with a default header.
+
+ Raises RuntimeError if the log has already been started"""
+
+ if self.logfile is not None:
+ raise RuntimeError('Log file is already active: %s' %
+ self.logfname)
+
+ # The parameters can override constructor defaults
+ if logfname is not None: self.logfname = logfname
+ if loghead is not None: self.loghead = loghead
+ if logmode is not None: self.logmode = logmode
+
+ # Parameters not part of the constructor
+ self.timestamp = timestamp
+ self.log_output = log_output
+ self.log_raw_input = log_raw_input
+
+ # init depending on the log mode requested
+ isfile = os.path.isfile
+ logmode = self.logmode
+
+ if logmode == 'append':
+ self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
+
+ elif logmode == 'backup':
+ if isfile(self.logfname):
+ backup_logname = self.logfname+'~'
+ # Manually remove any old backup, since os.rename may fail
+ # under Windows.
+ if isfile(backup_logname):
+ os.remove(backup_logname)
+ os.rename(self.logfname,backup_logname)
+ self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
+
+ elif logmode == 'global':
+ self.logfname = os.path.join(self.home_dir,self.logfname)
+ self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
+
+ elif logmode == 'over':
+ if isfile(self.logfname):
+ os.remove(self.logfname)
+ self.logfile = io.open(self.logfname,'w', encoding='utf-8')
+
+ elif logmode == 'rotate':
+ if isfile(self.logfname):
+ if isfile(self.logfname+'.001~'):
+ old = glob.glob(self.logfname+'.*~')
+ old.sort()
+ old.reverse()
+ for f in old:
+ root, ext = os.path.splitext(f)
+ num = int(ext[1:-1])+1
+ os.rename(f, root+'.'+repr(num).zfill(3)+'~')
+ os.rename(self.logfname, self.logfname+'.001~')
+ self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
+
+ if logmode != 'append':
+ self.logfile.write(self.loghead)
+
+ self.logfile.flush()
+ self.log_active = True
+
+ def switch_log(self,val):
+ """Switch logging on/off. val should be ONLY a boolean."""
+
+ if val not in [False,True,0,1]:
+ raise ValueError('Call switch_log ONLY with a boolean argument, '
+ 'not with: %s' % val)
+
+ label = {0:'OFF',1:'ON',False:'OFF',True:'ON'}
+
+ if self.logfile is None:
+ print("""
+Logging hasn't been started yet (use logstart for that).
+
+%logon/%logoff are for temporarily starting and stopping logging for a logfile
+which already exists. But you must first start the logging process with
+%logstart (optionally giving a logfile name).""")
+
+ else:
+ if self.log_active == val:
+ print('Logging is already',label[val])
+ else:
+ print('Switching logging',label[val])
+ self.log_active = not self.log_active
+ self.log_active_out = self.log_active
+
+ def logstate(self):
+ """Print a status message about the logger."""
+ if self.logfile is None:
+ print('Logging has not been activated.')
+ else:
+ state = self.log_active and 'active' or 'temporarily suspended'
+ print('Filename :', self.logfname)
+ print('Mode :', self.logmode)
+ print('Output logging :', self.log_output)
+ print('Raw input log :', self.log_raw_input)
+ print('Timestamping :', self.timestamp)
+ print('State :', state)
+
+ def log(self, line_mod, line_ori):
+ """Write the sources to a log.
+
+ Inputs:
+
+ - line_mod: possibly modified input, such as the transformations made
+ by input prefilters or input handlers of various kinds. This should
+ always be valid Python.
+
+ - line_ori: unmodified input line from the user. This is not
+ necessarily valid Python.
+ """
+
+ # Write the log line, but decide which one according to the
+ # log_raw_input flag, set when the log is started.
+ if self.log_raw_input:
+ self.log_write(line_ori)
+ else:
+ self.log_write(line_mod)
+
+ def log_write(self, data, kind='input'):
+ """Write data to the log file, if active"""
+
+ #print 'data: %r' % data # dbg
+ if self.log_active and data:
+ write = self.logfile.write
+ if kind=='input':
+ if self.timestamp:
+ write(str_to_unicode(time.strftime('# %a, %d %b %Y %H:%M:%S\n',
+ time.localtime())))
+ write(data)
+ elif kind=='output' and self.log_output:
+ odata = u'\n'.join([u'#[Out]# %s' % s
+ for s in data.splitlines()])
+ write(u'%s\n' % odata)
+ self.logfile.flush()
+
+ def logstop(self):
+ """Fully stop logging and close log file.
+
+ In order to start logging again, a new logstart() call needs to be
+ made, possibly (though not necessarily) with a new filename, mode and
+ other options."""
+
+ if self.logfile is not None:
+ self.logfile.close()
+ self.logfile = None
+ else:
+ print("Logging hadn't been started.")
+ self.log_active = False
+
+ # For backwards compatibility, in case anyone was using this.
+ close_log = logstop
diff --git a/contrib/python/ipython/py2/IPython/core/macro.py b/contrib/python/ipython/py2/IPython/core/macro.py
index 803236ffe5..9032706d2b 100644
--- a/contrib/python/ipython/py2/IPython/core/macro.py
+++ b/contrib/python/ipython/py2/IPython/core/macro.py
@@ -1,57 +1,57 @@
-"""Support for interactive macros in IPython"""
-
-#*****************************************************************************
-# Copyright (C) 2001-2005 Fernando Perez <fperez@colorado.edu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#*****************************************************************************
-
-import re
-
-from IPython.utils import py3compat
-from IPython.utils.encoding import DEFAULT_ENCODING
-
-coding_declaration = re.compile(r"#\s*coding[:=]\s*([-\w.]+)")
-
-class Macro(object):
- """Simple class to store the value of macros as strings.
-
- Macro is just a callable that executes a string of IPython
- input when called.
- """
-
- def __init__(self,code):
- """store the macro value, as a single string which can be executed"""
- lines = []
- enc = None
- for line in code.splitlines():
- coding_match = coding_declaration.match(line)
- if coding_match:
- enc = coding_match.group(1)
- else:
- lines.append(line)
- code = "\n".join(lines)
- if isinstance(code, bytes):
- code = code.decode(enc or DEFAULT_ENCODING)
- self.value = code + '\n'
-
- def __str__(self):
- return py3compat.unicode_to_str(self.value)
-
- def __unicode__(self):
- return self.value
-
- def __repr__(self):
- return 'IPython.macro.Macro(%s)' % repr(self.value)
-
- def __getstate__(self):
- """ needed for safe pickling via %store """
- return {'value': self.value}
-
- def __add__(self, other):
- if isinstance(other, Macro):
- return Macro(self.value + other.value)
- elif isinstance(other, py3compat.string_types):
- return Macro(self.value + other)
- raise TypeError
+"""Support for interactive macros in IPython"""
+
+#*****************************************************************************
+# Copyright (C) 2001-2005 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+import re
+
+from IPython.utils import py3compat
+from IPython.utils.encoding import DEFAULT_ENCODING
+
+coding_declaration = re.compile(r"#\s*coding[:=]\s*([-\w.]+)")
+
+class Macro(object):
+ """Simple class to store the value of macros as strings.
+
+ Macro is just a callable that executes a string of IPython
+ input when called.
+ """
+
+ def __init__(self,code):
+ """store the macro value, as a single string which can be executed"""
+ lines = []
+ enc = None
+ for line in code.splitlines():
+ coding_match = coding_declaration.match(line)
+ if coding_match:
+ enc = coding_match.group(1)
+ else:
+ lines.append(line)
+ code = "\n".join(lines)
+ if isinstance(code, bytes):
+ code = code.decode(enc or DEFAULT_ENCODING)
+ self.value = code + '\n'
+
+ def __str__(self):
+ return py3compat.unicode_to_str(self.value)
+
+ def __unicode__(self):
+ return self.value
+
+ def __repr__(self):
+ return 'IPython.macro.Macro(%s)' % repr(self.value)
+
+ def __getstate__(self):
+ """ needed for safe pickling via %store """
+ return {'value': self.value}
+
+ def __add__(self, other):
+ if isinstance(other, Macro):
+ return Macro(self.value + other.value)
+ elif isinstance(other, py3compat.string_types):
+ return Macro(self.value + other)
+ raise TypeError
diff --git a/contrib/python/ipython/py2/IPython/core/magic.py b/contrib/python/ipython/py2/IPython/core/magic.py
index 97e7b4291e..61a929fd23 100644
--- a/contrib/python/ipython/py2/IPython/core/magic.py
+++ b/contrib/python/ipython/py2/IPython/core/magic.py
@@ -1,680 +1,680 @@
-# encoding: utf-8
-"""Magic functions for InteractiveShell.
-"""
-from __future__ import print_function
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
-# Copyright (C) 2001 Fernando Perez <fperez@colorado.edu>
-# Copyright (C) 2008 The IPython Development Team
-
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-import os
-import re
-import sys
-import types
-from getopt import getopt, GetoptError
-
-from traitlets.config.configurable import Configurable
-from IPython.core import oinspect
-from IPython.core.error import UsageError
-from IPython.core.inputsplitter import ESC_MAGIC, ESC_MAGIC2
-from decorator import decorator
-from IPython.utils.ipstruct import Struct
-from IPython.utils.process import arg_split
-from IPython.utils.py3compat import string_types, iteritems
-from IPython.utils.text import dedent
+# encoding: utf-8
+"""Magic functions for InteractiveShell.
+"""
+from __future__ import print_function
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
+# Copyright (C) 2001 Fernando Perez <fperez@colorado.edu>
+# Copyright (C) 2008 The IPython Development Team
+
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+import os
+import re
+import sys
+import types
+from getopt import getopt, GetoptError
+
+from traitlets.config.configurable import Configurable
+from IPython.core import oinspect
+from IPython.core.error import UsageError
+from IPython.core.inputsplitter import ESC_MAGIC, ESC_MAGIC2
+from decorator import decorator
+from IPython.utils.ipstruct import Struct
+from IPython.utils.process import arg_split
+from IPython.utils.py3compat import string_types, iteritems
+from IPython.utils.text import dedent
from traitlets import Bool, Dict, Instance, observe
from logging import error
-
-#-----------------------------------------------------------------------------
-# Globals
-#-----------------------------------------------------------------------------
-
-# A dict we'll use for each class that has magics, used as temporary storage to
-# pass information between the @line/cell_magic method decorators and the
-# @magics_class class decorator, because the method decorators have no
-# access to the class when they run. See for more details:
-# http://stackoverflow.com/questions/2366713/can-a-python-decorator-of-an-instance-method-access-the-class
-
-magics = dict(line={}, cell={})
-
-magic_kinds = ('line', 'cell')
-magic_spec = ('line', 'cell', 'line_cell')
-magic_escapes = dict(line=ESC_MAGIC, cell=ESC_MAGIC2)
-
-#-----------------------------------------------------------------------------
-# Utility classes and functions
-#-----------------------------------------------------------------------------
-
-class Bunch: pass
-
-
-def on_off(tag):
- """Return an ON/OFF string for a 1/0 input. Simple utility function."""
- return ['OFF','ON'][tag]
-
-
-def compress_dhist(dh):
- """Compress a directory history into a new one with at most 20 entries.
-
- Return a new list made from the first and last 10 elements of dhist after
- removal of duplicates.
- """
- head, tail = dh[:-10], dh[-10:]
-
- newhead = []
- done = set()
- for h in head:
- if h in done:
- continue
- newhead.append(h)
- done.add(h)
-
- return newhead + tail
-
-
-def needs_local_scope(func):
- """Decorator to mark magic functions which need to local scope to run."""
- func.needs_local_scope = True
- return func
-
-#-----------------------------------------------------------------------------
-# Class and method decorators for registering magics
-#-----------------------------------------------------------------------------
-
-def magics_class(cls):
- """Class decorator for all subclasses of the main Magics class.
-
- Any class that subclasses Magics *must* also apply this decorator, to
- ensure that all the methods that have been decorated as line/cell magics
- get correctly registered in the class instance. This is necessary because
- when method decorators run, the class does not exist yet, so they
- temporarily store their information into a module global. Application of
- this class decorator copies that global data to the class instance and
- clears the global.
-
- Obviously, this mechanism is not thread-safe, which means that the
- *creation* of subclasses of Magic should only be done in a single-thread
- context. Instantiation of the classes has no restrictions. Given that
- these classes are typically created at IPython startup time and before user
- application code becomes active, in practice this should not pose any
- problems.
- """
- cls.registered = True
- cls.magics = dict(line = magics['line'],
- cell = magics['cell'])
- magics['line'] = {}
- magics['cell'] = {}
- return cls
-
-
-def record_magic(dct, magic_kind, magic_name, func):
- """Utility function to store a function as a magic of a specific kind.
-
- Parameters
- ----------
- dct : dict
- A dictionary with 'line' and 'cell' subdicts.
-
- magic_kind : str
- Kind of magic to be stored.
-
- magic_name : str
- Key to store the magic as.
-
- func : function
- Callable object to store.
- """
- if magic_kind == 'line_cell':
- dct['line'][magic_name] = dct['cell'][magic_name] = func
- else:
- dct[magic_kind][magic_name] = func
-
-
-def validate_type(magic_kind):
- """Ensure that the given magic_kind is valid.
-
- Check that the given magic_kind is one of the accepted spec types (stored
- in the global `magic_spec`), raise ValueError otherwise.
- """
- if magic_kind not in magic_spec:
- raise ValueError('magic_kind must be one of %s, %s given' %
- magic_kinds, magic_kind)
-
-
-# The docstrings for the decorator below will be fairly similar for the two
-# types (method and function), so we generate them here once and reuse the
-# templates below.
-_docstring_template = \
-"""Decorate the given {0} as {1} magic.
-
-The decorator can be used with or without arguments, as follows.
-
-i) without arguments: it will create a {1} magic named as the {0} being
-decorated::
-
- @deco
- def foo(...)
-
-will create a {1} magic named `foo`.
-
-ii) with one string argument: which will be used as the actual name of the
-resulting magic::
-
- @deco('bar')
- def foo(...)
-
-will create a {1} magic named `bar`.
-"""
-
-# These two are decorator factories. While they are conceptually very similar,
-# there are enough differences in the details that it's simpler to have them
-# written as completely standalone functions rather than trying to share code
-# and make a single one with convoluted logic.
-
-def _method_magic_marker(magic_kind):
- """Decorator factory for methods in Magics subclasses.
- """
-
- validate_type(magic_kind)
-
- # This is a closure to capture the magic_kind. We could also use a class,
- # but it's overkill for just that one bit of state.
- def magic_deco(arg):
- call = lambda f, *a, **k: f(*a, **k)
-
- if callable(arg):
- # "Naked" decorator call (just @foo, no args)
- func = arg
- name = func.__name__
- retval = decorator(call, func)
- record_magic(magics, magic_kind, name, name)
- elif isinstance(arg, string_types):
- # Decorator called with arguments (@foo('bar'))
- name = arg
- def mark(func, *a, **kw):
- record_magic(magics, magic_kind, name, func.__name__)
- return decorator(call, func)
- retval = mark
- else:
- raise TypeError("Decorator can only be called with "
- "string or function")
- return retval
-
- # Ensure the resulting decorator has a usable docstring
- magic_deco.__doc__ = _docstring_template.format('method', magic_kind)
- return magic_deco
-
-
-def _function_magic_marker(magic_kind):
- """Decorator factory for standalone functions.
- """
- validate_type(magic_kind)
-
- # This is a closure to capture the magic_kind. We could also use a class,
- # but it's overkill for just that one bit of state.
- def magic_deco(arg):
- call = lambda f, *a, **k: f(*a, **k)
-
- # Find get_ipython() in the caller's namespace
- caller = sys._getframe(1)
- for ns in ['f_locals', 'f_globals', 'f_builtins']:
- get_ipython = getattr(caller, ns).get('get_ipython')
- if get_ipython is not None:
- break
- else:
- raise NameError('Decorator can only run in context where '
- '`get_ipython` exists')
-
- ip = get_ipython()
-
- if callable(arg):
- # "Naked" decorator call (just @foo, no args)
- func = arg
- name = func.__name__
- ip.register_magic_function(func, magic_kind, name)
- retval = decorator(call, func)
- elif isinstance(arg, string_types):
- # Decorator called with arguments (@foo('bar'))
- name = arg
- def mark(func, *a, **kw):
- ip.register_magic_function(func, magic_kind, name)
- return decorator(call, func)
- retval = mark
- else:
- raise TypeError("Decorator can only be called with "
- "string or function")
- return retval
-
- # Ensure the resulting decorator has a usable docstring
- ds = _docstring_template.format('function', magic_kind)
-
- ds += dedent("""
- Note: this decorator can only be used in a context where IPython is already
- active, so that the `get_ipython()` call succeeds. You can therefore use
- it in your startup files loaded after IPython initializes, but *not* in the
- IPython configuration file itself, which is executed before IPython is
- fully up and running. Any file located in the `startup` subdirectory of
- your configuration profile will be OK in this sense.
- """)
-
- magic_deco.__doc__ = ds
- return magic_deco
-
-
-# Create the actual decorators for public use
-
-# These three are used to decorate methods in class definitions
-line_magic = _method_magic_marker('line')
-cell_magic = _method_magic_marker('cell')
-line_cell_magic = _method_magic_marker('line_cell')
-
-# These three decorate standalone functions and perform the decoration
-# immediately. They can only run where get_ipython() works
-register_line_magic = _function_magic_marker('line')
-register_cell_magic = _function_magic_marker('cell')
-register_line_cell_magic = _function_magic_marker('line_cell')
-
-#-----------------------------------------------------------------------------
-# Core Magic classes
-#-----------------------------------------------------------------------------
-
-class MagicsManager(Configurable):
- """Object that handles all magic-related functionality for IPython.
- """
- # Non-configurable class attributes
-
- # A two-level dict, first keyed by magic type, then by magic function, and
- # holding the actual callable object as value. This is the dict used for
- # magic function dispatch
- magics = Dict()
-
- # A registry of the original objects that we've been given holding magics.
- registry = Dict()
-
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
-
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# A dict we'll use for each class that has magics, used as temporary storage to
+# pass information between the @line/cell_magic method decorators and the
+# @magics_class class decorator, because the method decorators have no
+# access to the class when they run. See for more details:
+# http://stackoverflow.com/questions/2366713/can-a-python-decorator-of-an-instance-method-access-the-class
+
+magics = dict(line={}, cell={})
+
+magic_kinds = ('line', 'cell')
+magic_spec = ('line', 'cell', 'line_cell')
+magic_escapes = dict(line=ESC_MAGIC, cell=ESC_MAGIC2)
+
+#-----------------------------------------------------------------------------
+# Utility classes and functions
+#-----------------------------------------------------------------------------
+
+class Bunch: pass
+
+
+def on_off(tag):
+ """Return an ON/OFF string for a 1/0 input. Simple utility function."""
+ return ['OFF','ON'][tag]
+
+
+def compress_dhist(dh):
+ """Compress a directory history into a new one with at most 20 entries.
+
+ Return a new list made from the first and last 10 elements of dhist after
+ removal of duplicates.
+ """
+ head, tail = dh[:-10], dh[-10:]
+
+ newhead = []
+ done = set()
+ for h in head:
+ if h in done:
+ continue
+ newhead.append(h)
+ done.add(h)
+
+ return newhead + tail
+
+
+def needs_local_scope(func):
+ """Decorator to mark magic functions which need to local scope to run."""
+ func.needs_local_scope = True
+ return func
+
+#-----------------------------------------------------------------------------
+# Class and method decorators for registering magics
+#-----------------------------------------------------------------------------
+
+def magics_class(cls):
+ """Class decorator for all subclasses of the main Magics class.
+
+ Any class that subclasses Magics *must* also apply this decorator, to
+ ensure that all the methods that have been decorated as line/cell magics
+ get correctly registered in the class instance. This is necessary because
+ when method decorators run, the class does not exist yet, so they
+ temporarily store their information into a module global. Application of
+ this class decorator copies that global data to the class instance and
+ clears the global.
+
+ Obviously, this mechanism is not thread-safe, which means that the
+ *creation* of subclasses of Magic should only be done in a single-thread
+ context. Instantiation of the classes has no restrictions. Given that
+ these classes are typically created at IPython startup time and before user
+ application code becomes active, in practice this should not pose any
+ problems.
+ """
+ cls.registered = True
+ cls.magics = dict(line = magics['line'],
+ cell = magics['cell'])
+ magics['line'] = {}
+ magics['cell'] = {}
+ return cls
+
+
+def record_magic(dct, magic_kind, magic_name, func):
+ """Utility function to store a function as a magic of a specific kind.
+
+ Parameters
+ ----------
+ dct : dict
+ A dictionary with 'line' and 'cell' subdicts.
+
+ magic_kind : str
+ Kind of magic to be stored.
+
+ magic_name : str
+ Key to store the magic as.
+
+ func : function
+ Callable object to store.
+ """
+ if magic_kind == 'line_cell':
+ dct['line'][magic_name] = dct['cell'][magic_name] = func
+ else:
+ dct[magic_kind][magic_name] = func
+
+
+def validate_type(magic_kind):
+ """Ensure that the given magic_kind is valid.
+
+ Check that the given magic_kind is one of the accepted spec types (stored
+ in the global `magic_spec`), raise ValueError otherwise.
+ """
+ if magic_kind not in magic_spec:
+ raise ValueError('magic_kind must be one of %s, %s given' %
+ magic_kinds, magic_kind)
+
+
+# The docstrings for the decorator below will be fairly similar for the two
+# types (method and function), so we generate them here once and reuse the
+# templates below.
+_docstring_template = \
+"""Decorate the given {0} as {1} magic.
+
+The decorator can be used with or without arguments, as follows.
+
+i) without arguments: it will create a {1} magic named as the {0} being
+decorated::
+
+ @deco
+ def foo(...)
+
+will create a {1} magic named `foo`.
+
+ii) with one string argument: which will be used as the actual name of the
+resulting magic::
+
+ @deco('bar')
+ def foo(...)
+
+will create a {1} magic named `bar`.
+"""
+
+# These two are decorator factories. While they are conceptually very similar,
+# there are enough differences in the details that it's simpler to have them
+# written as completely standalone functions rather than trying to share code
+# and make a single one with convoluted logic.
+
+def _method_magic_marker(magic_kind):
+ """Decorator factory for methods in Magics subclasses.
+ """
+
+ validate_type(magic_kind)
+
+ # This is a closure to capture the magic_kind. We could also use a class,
+ # but it's overkill for just that one bit of state.
+ def magic_deco(arg):
+ call = lambda f, *a, **k: f(*a, **k)
+
+ if callable(arg):
+ # "Naked" decorator call (just @foo, no args)
+ func = arg
+ name = func.__name__
+ retval = decorator(call, func)
+ record_magic(magics, magic_kind, name, name)
+ elif isinstance(arg, string_types):
+ # Decorator called with arguments (@foo('bar'))
+ name = arg
+ def mark(func, *a, **kw):
+ record_magic(magics, magic_kind, name, func.__name__)
+ return decorator(call, func)
+ retval = mark
+ else:
+ raise TypeError("Decorator can only be called with "
+ "string or function")
+ return retval
+
+ # Ensure the resulting decorator has a usable docstring
+ magic_deco.__doc__ = _docstring_template.format('method', magic_kind)
+ return magic_deco
+
+
+def _function_magic_marker(magic_kind):
+ """Decorator factory for standalone functions.
+ """
+ validate_type(magic_kind)
+
+ # This is a closure to capture the magic_kind. We could also use a class,
+ # but it's overkill for just that one bit of state.
+ def magic_deco(arg):
+ call = lambda f, *a, **k: f(*a, **k)
+
+ # Find get_ipython() in the caller's namespace
+ caller = sys._getframe(1)
+ for ns in ['f_locals', 'f_globals', 'f_builtins']:
+ get_ipython = getattr(caller, ns).get('get_ipython')
+ if get_ipython is not None:
+ break
+ else:
+ raise NameError('Decorator can only run in context where '
+ '`get_ipython` exists')
+
+ ip = get_ipython()
+
+ if callable(arg):
+ # "Naked" decorator call (just @foo, no args)
+ func = arg
+ name = func.__name__
+ ip.register_magic_function(func, magic_kind, name)
+ retval = decorator(call, func)
+ elif isinstance(arg, string_types):
+ # Decorator called with arguments (@foo('bar'))
+ name = arg
+ def mark(func, *a, **kw):
+ ip.register_magic_function(func, magic_kind, name)
+ return decorator(call, func)
+ retval = mark
+ else:
+ raise TypeError("Decorator can only be called with "
+ "string or function")
+ return retval
+
+ # Ensure the resulting decorator has a usable docstring
+ ds = _docstring_template.format('function', magic_kind)
+
+ ds += dedent("""
+ Note: this decorator can only be used in a context where IPython is already
+ active, so that the `get_ipython()` call succeeds. You can therefore use
+ it in your startup files loaded after IPython initializes, but *not* in the
+ IPython configuration file itself, which is executed before IPython is
+ fully up and running. Any file located in the `startup` subdirectory of
+ your configuration profile will be OK in this sense.
+ """)
+
+ magic_deco.__doc__ = ds
+ return magic_deco
+
+
+# Create the actual decorators for public use
+
+# These three are used to decorate methods in class definitions
+line_magic = _method_magic_marker('line')
+cell_magic = _method_magic_marker('cell')
+line_cell_magic = _method_magic_marker('line_cell')
+
+# These three decorate standalone functions and perform the decoration
+# immediately. They can only run where get_ipython() works
+register_line_magic = _function_magic_marker('line')
+register_cell_magic = _function_magic_marker('cell')
+register_line_cell_magic = _function_magic_marker('line_cell')
+
+#-----------------------------------------------------------------------------
+# Core Magic classes
+#-----------------------------------------------------------------------------
+
+class MagicsManager(Configurable):
+ """Object that handles all magic-related functionality for IPython.
+ """
+ # Non-configurable class attributes
+
+ # A two-level dict, first keyed by magic type, then by magic function, and
+ # holding the actual callable object as value. This is the dict used for
+ # magic function dispatch
+ magics = Dict()
+
+ # A registry of the original objects that we've been given holding magics.
+ registry = Dict()
+
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
auto_magic = Bool(True, help=
"Automatically call line magics without requiring explicit % prefix"
).tag(config=True)
@observe('auto_magic')
def _auto_magic_changed(self, change):
self.shell.automagic = change['new']
-
- _auto_status = [
- 'Automagic is OFF, % prefix IS needed for line magics.',
- 'Automagic is ON, % prefix IS NOT needed for line magics.']
-
- user_magics = Instance('IPython.core.magics.UserMagics', allow_none=True)
-
- def __init__(self, shell=None, config=None, user_magics=None, **traits):
-
- super(MagicsManager, self).__init__(shell=shell, config=config,
- user_magics=user_magics, **traits)
- self.magics = dict(line={}, cell={})
- # Let's add the user_magics to the registry for uniformity, so *all*
- # registered magic containers can be found there.
- self.registry[user_magics.__class__.__name__] = user_magics
-
- def auto_status(self):
- """Return descriptive string with automagic status."""
- return self._auto_status[self.auto_magic]
-
- def lsmagic(self):
- """Return a dict of currently available magic functions.
-
- The return dict has the keys 'line' and 'cell', corresponding to the
- two types of magics we support. Each value is a list of names.
- """
- return self.magics
-
- def lsmagic_docs(self, brief=False, missing=''):
- """Return dict of documentation of magic functions.
-
- The return dict has the keys 'line' and 'cell', corresponding to the
- two types of magics we support. Each value is a dict keyed by magic
- name whose value is the function docstring. If a docstring is
- unavailable, the value of `missing` is used instead.
-
- If brief is True, only the first line of each docstring will be returned.
- """
- docs = {}
- for m_type in self.magics:
- m_docs = {}
- for m_name, m_func in iteritems(self.magics[m_type]):
- if m_func.__doc__:
- if brief:
- m_docs[m_name] = m_func.__doc__.split('\n', 1)[0]
- else:
- m_docs[m_name] = m_func.__doc__.rstrip()
- else:
- m_docs[m_name] = missing
- docs[m_type] = m_docs
- return docs
-
- def register(self, *magic_objects):
- """Register one or more instances of Magics.
-
- Take one or more classes or instances of classes that subclass the main
- `core.Magic` class, and register them with IPython to use the magic
- functions they provide. The registration process will then ensure that
- any methods that have decorated to provide line and/or cell magics will
- be recognized with the `%x`/`%%x` syntax as a line/cell magic
- respectively.
-
- If classes are given, they will be instantiated with the default
- constructor. If your classes need a custom constructor, you should
- instanitate them first and pass the instance.
-
- The provided arguments can be an arbitrary mix of classes and instances.
-
- Parameters
- ----------
- magic_objects : one or more classes or instances
- """
- # Start by validating them to ensure they have all had their magic
- # methods registered at the instance level
- for m in magic_objects:
- if not m.registered:
- raise ValueError("Class of magics %r was constructed without "
- "the @register_magics class decorator")
- if isinstance(m, type):
- # If we're given an uninstantiated class
- m = m(shell=self.shell)
-
- # Now that we have an instance, we can register it and update the
- # table of callables
- self.registry[m.__class__.__name__] = m
- for mtype in magic_kinds:
- self.magics[mtype].update(m.magics[mtype])
-
- def register_function(self, func, magic_kind='line', magic_name=None):
- """Expose a standalone function as magic function for IPython.
-
- This will create an IPython magic (line, cell or both) from a
- standalone function. The functions should have the following
- signatures:
-
- * For line magics: `def f(line)`
- * For cell magics: `def f(line, cell)`
- * For a function that does both: `def f(line, cell=None)`
-
- In the latter case, the function will be called with `cell==None` when
- invoked as `%f`, and with cell as a string when invoked as `%%f`.
-
- Parameters
- ----------
- func : callable
- Function to be registered as a magic.
-
- magic_kind : str
- Kind of magic, one of 'line', 'cell' or 'line_cell'
-
- magic_name : optional str
- If given, the name the magic will have in the IPython namespace. By
- default, the name of the function itself is used.
- """
-
- # Create the new method in the user_magics and register it in the
- # global table
- validate_type(magic_kind)
- magic_name = func.__name__ if magic_name is None else magic_name
- setattr(self.user_magics, magic_name, func)
- record_magic(self.magics, magic_kind, magic_name, func)
-
- def register_alias(self, alias_name, magic_name, magic_kind='line'):
- """Register an alias to a magic function.
-
- The alias is an instance of :class:`MagicAlias`, which holds the
- name and kind of the magic it should call. Binding is done at
- call time, so if the underlying magic function is changed the alias
- will call the new function.
-
- Parameters
- ----------
- alias_name : str
- The name of the magic to be registered.
-
- magic_name : str
- The name of an existing magic.
-
- magic_kind : str
- Kind of magic, one of 'line' or 'cell'
- """
-
- # `validate_type` is too permissive, as it allows 'line_cell'
- # which we do not handle.
- if magic_kind not in magic_kinds:
- raise ValueError('magic_kind must be one of %s, %s given' %
- magic_kinds, magic_kind)
-
- alias = MagicAlias(self.shell, magic_name, magic_kind)
- setattr(self.user_magics, alias_name, alias)
- record_magic(self.magics, magic_kind, alias_name, alias)
-
-# Key base class that provides the central functionality for magics.
-
-
-class Magics(Configurable):
- """Base class for implementing magic functions.
-
- Shell functions which can be reached as %function_name. All magic
- functions should accept a string, which they can parse for their own
- needs. This can make some functions easier to type, eg `%cd ../`
- vs. `%cd("../")`
-
- Classes providing magic functions need to subclass this class, and they
- MUST:
-
- - Use the method decorators `@line_magic` and `@cell_magic` to decorate
- individual methods as magic functions, AND
-
- - Use the class decorator `@magics_class` to ensure that the magic
- methods are properly registered at the instance level upon instance
- initialization.
-
- See :mod:`magic_functions` for examples of actual implementation classes.
- """
- # Dict holding all command-line options for each magic.
- options_table = None
- # Dict for the mapping of magic names to methods, set by class decorator
- magics = None
- # Flag to check that the class decorator was properly applied
- registered = False
- # Instance of IPython shell
- shell = None
-
- def __init__(self, shell=None, **kwargs):
- if not(self.__class__.registered):
- raise ValueError('Magics subclass without registration - '
- 'did you forget to apply @magics_class?')
- if shell is not None:
- if hasattr(shell, 'configurables'):
- shell.configurables.append(self)
- if hasattr(shell, 'config'):
- kwargs.setdefault('parent', shell)
-
- self.shell = shell
- self.options_table = {}
- # The method decorators are run when the instance doesn't exist yet, so
- # they can only record the names of the methods they are supposed to
- # grab. Only now, that the instance exists, can we create the proper
- # mapping to bound methods. So we read the info off the original names
- # table and replace each method name by the actual bound method.
- # But we mustn't clobber the *class* mapping, in case of multiple instances.
- class_magics = self.magics
- self.magics = {}
- for mtype in magic_kinds:
- tab = self.magics[mtype] = {}
- cls_tab = class_magics[mtype]
- for magic_name, meth_name in iteritems(cls_tab):
- if isinstance(meth_name, string_types):
- # it's a method name, grab it
- tab[magic_name] = getattr(self, meth_name)
- else:
- # it's the real thing
- tab[magic_name] = meth_name
- # Configurable **needs** to be initiated at the end or the config
- # magics get screwed up.
- super(Magics, self).__init__(**kwargs)
-
- def arg_err(self,func):
- """Print docstring if incorrect arguments were passed"""
- print('Error in arguments:')
- print(oinspect.getdoc(func))
-
- def format_latex(self, strng):
- """Format a string for latex inclusion."""
-
- # Characters that need to be escaped for latex:
- escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
- # Magic command names as headers:
- cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
- re.MULTILINE)
- # Magic commands
- cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
- re.MULTILINE)
- # Paragraph continue
- par_re = re.compile(r'\\$',re.MULTILINE)
-
- # The "\n" symbol
- newline_re = re.compile(r'\\n')
-
- # Now build the string for output:
- #strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
- strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
- strng)
- strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
- strng = par_re.sub(r'\\\\',strng)
- strng = escape_re.sub(r'\\\1',strng)
- strng = newline_re.sub(r'\\textbackslash{}n',strng)
- return strng
-
- def parse_options(self, arg_str, opt_str, *long_opts, **kw):
- """Parse options passed to an argument string.
-
- The interface is similar to that of :func:`getopt.getopt`, but it
- returns a :class:`~IPython.utils.struct.Struct` with the options as keys
- and the stripped argument string still as a string.
-
- arg_str is quoted as a true sys.argv vector by using shlex.split.
- This allows us to easily expand variables, glob files, quote
- arguments, etc.
-
- Parameters
- ----------
-
- arg_str : str
- The arguments to parse.
-
- opt_str : str
- The options specification.
-
- mode : str, default 'string'
- If given as 'list', the argument string is returned as a list (split
- on whitespace) instead of a string.
-
- list_all : bool, default False
- Put all option values in lists. Normally only options
- appearing more than once are put in a list.
-
- posix : bool, default True
- Whether to split the input line in POSIX mode or not, as per the
- conventions outlined in the :mod:`shlex` module from the standard
- library.
- """
-
- # inject default options at the beginning of the input line
- caller = sys._getframe(1).f_code.co_name
- arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
-
- mode = kw.get('mode','string')
- if mode not in ['string','list']:
- raise ValueError('incorrect mode given: %s' % mode)
- # Get options
- list_all = kw.get('list_all',0)
- posix = kw.get('posix', os.name == 'posix')
- strict = kw.get('strict', True)
-
- # Check if we have more than one argument to warrant extra processing:
- odict = {} # Dictionary with options
- args = arg_str.split()
- if len(args) >= 1:
- # If the list of inputs only has 0 or 1 thing in it, there's no
- # need to look for options
- argv = arg_split(arg_str, posix, strict)
- # Do regular option processing
- try:
- opts,args = getopt(argv, opt_str, long_opts)
- except GetoptError as e:
- raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
- " ".join(long_opts)))
- for o,a in opts:
- if o.startswith('--'):
- o = o[2:]
- else:
- o = o[1:]
- try:
- odict[o].append(a)
- except AttributeError:
- odict[o] = [odict[o],a]
- except KeyError:
- if list_all:
- odict[o] = [a]
- else:
- odict[o] = a
-
- # Prepare opts,args for return
- opts = Struct(odict)
- if mode == 'string':
- args = ' '.join(args)
-
- return opts,args
-
- def default_option(self, fn, optstr):
- """Make an entry in the options_table for fn, with value optstr"""
-
- if fn not in self.lsmagic():
- error("%s is not a magic function" % fn)
- self.options_table[fn] = optstr
-
-
-class MagicAlias(object):
- """An alias to another magic function.
-
- An alias is determined by its magic name and magic kind. Lookup
- is done at call time, so if the underlying magic changes the alias
- will call the new function.
-
- Use the :meth:`MagicsManager.register_alias` method or the
- `%alias_magic` magic function to create and register a new alias.
- """
- def __init__(self, shell, magic_name, magic_kind):
- self.shell = shell
- self.magic_name = magic_name
- self.magic_kind = magic_kind
-
- self.pretty_target = '%s%s' % (magic_escapes[self.magic_kind], self.magic_name)
- self.__doc__ = "Alias for `%s`." % self.pretty_target
-
- self._in_call = False
-
- def __call__(self, *args, **kwargs):
- """Call the magic alias."""
- fn = self.shell.find_magic(self.magic_name, self.magic_kind)
- if fn is None:
- raise UsageError("Magic `%s` not found." % self.pretty_target)
-
- # Protect against infinite recursion.
- if self._in_call:
- raise UsageError("Infinite recursion detected; "
- "magic aliases cannot call themselves.")
- self._in_call = True
- try:
- return fn(*args, **kwargs)
- finally:
- self._in_call = False
+
+ _auto_status = [
+ 'Automagic is OFF, % prefix IS needed for line magics.',
+ 'Automagic is ON, % prefix IS NOT needed for line magics.']
+
+ user_magics = Instance('IPython.core.magics.UserMagics', allow_none=True)
+
+ def __init__(self, shell=None, config=None, user_magics=None, **traits):
+
+ super(MagicsManager, self).__init__(shell=shell, config=config,
+ user_magics=user_magics, **traits)
+ self.magics = dict(line={}, cell={})
+ # Let's add the user_magics to the registry for uniformity, so *all*
+ # registered magic containers can be found there.
+ self.registry[user_magics.__class__.__name__] = user_magics
+
+ def auto_status(self):
+ """Return descriptive string with automagic status."""
+ return self._auto_status[self.auto_magic]
+
+ def lsmagic(self):
+ """Return a dict of currently available magic functions.
+
+ The return dict has the keys 'line' and 'cell', corresponding to the
+ two types of magics we support. Each value is a list of names.
+ """
+ return self.magics
+
+ def lsmagic_docs(self, brief=False, missing=''):
+ """Return dict of documentation of magic functions.
+
+ The return dict has the keys 'line' and 'cell', corresponding to the
+ two types of magics we support. Each value is a dict keyed by magic
+ name whose value is the function docstring. If a docstring is
+ unavailable, the value of `missing` is used instead.
+
+ If brief is True, only the first line of each docstring will be returned.
+ """
+ docs = {}
+ for m_type in self.magics:
+ m_docs = {}
+ for m_name, m_func in iteritems(self.magics[m_type]):
+ if m_func.__doc__:
+ if brief:
+ m_docs[m_name] = m_func.__doc__.split('\n', 1)[0]
+ else:
+ m_docs[m_name] = m_func.__doc__.rstrip()
+ else:
+ m_docs[m_name] = missing
+ docs[m_type] = m_docs
+ return docs
+
+ def register(self, *magic_objects):
+ """Register one or more instances of Magics.
+
+ Take one or more classes or instances of classes that subclass the main
+ `core.Magic` class, and register them with IPython to use the magic
+ functions they provide. The registration process will then ensure that
+ any methods that have decorated to provide line and/or cell magics will
+ be recognized with the `%x`/`%%x` syntax as a line/cell magic
+ respectively.
+
+ If classes are given, they will be instantiated with the default
+ constructor. If your classes need a custom constructor, you should
+ instanitate them first and pass the instance.
+
+ The provided arguments can be an arbitrary mix of classes and instances.
+
+ Parameters
+ ----------
+ magic_objects : one or more classes or instances
+ """
+ # Start by validating them to ensure they have all had their magic
+ # methods registered at the instance level
+ for m in magic_objects:
+ if not m.registered:
+ raise ValueError("Class of magics %r was constructed without "
+ "the @register_magics class decorator")
+ if isinstance(m, type):
+ # If we're given an uninstantiated class
+ m = m(shell=self.shell)
+
+ # Now that we have an instance, we can register it and update the
+ # table of callables
+ self.registry[m.__class__.__name__] = m
+ for mtype in magic_kinds:
+ self.magics[mtype].update(m.magics[mtype])
+
+ def register_function(self, func, magic_kind='line', magic_name=None):
+ """Expose a standalone function as magic function for IPython.
+
+ This will create an IPython magic (line, cell or both) from a
+ standalone function. The functions should have the following
+ signatures:
+
+ * For line magics: `def f(line)`
+ * For cell magics: `def f(line, cell)`
+ * For a function that does both: `def f(line, cell=None)`
+
+ In the latter case, the function will be called with `cell==None` when
+ invoked as `%f`, and with cell as a string when invoked as `%%f`.
+
+ Parameters
+ ----------
+ func : callable
+ Function to be registered as a magic.
+
+ magic_kind : str
+ Kind of magic, one of 'line', 'cell' or 'line_cell'
+
+ magic_name : optional str
+ If given, the name the magic will have in the IPython namespace. By
+ default, the name of the function itself is used.
+ """
+
+ # Create the new method in the user_magics and register it in the
+ # global table
+ validate_type(magic_kind)
+ magic_name = func.__name__ if magic_name is None else magic_name
+ setattr(self.user_magics, magic_name, func)
+ record_magic(self.magics, magic_kind, magic_name, func)
+
+ def register_alias(self, alias_name, magic_name, magic_kind='line'):
+ """Register an alias to a magic function.
+
+ The alias is an instance of :class:`MagicAlias`, which holds the
+ name and kind of the magic it should call. Binding is done at
+ call time, so if the underlying magic function is changed the alias
+ will call the new function.
+
+ Parameters
+ ----------
+ alias_name : str
+ The name of the magic to be registered.
+
+ magic_name : str
+ The name of an existing magic.
+
+ magic_kind : str
+ Kind of magic, one of 'line' or 'cell'
+ """
+
+ # `validate_type` is too permissive, as it allows 'line_cell'
+ # which we do not handle.
+ if magic_kind not in magic_kinds:
+ raise ValueError('magic_kind must be one of %s, %s given' %
+ magic_kinds, magic_kind)
+
+ alias = MagicAlias(self.shell, magic_name, magic_kind)
+ setattr(self.user_magics, alias_name, alias)
+ record_magic(self.magics, magic_kind, alias_name, alias)
+
+# Key base class that provides the central functionality for magics.
+
+
+class Magics(Configurable):
+ """Base class for implementing magic functions.
+
+ Shell functions which can be reached as %function_name. All magic
+ functions should accept a string, which they can parse for their own
+ needs. This can make some functions easier to type, eg `%cd ../`
+ vs. `%cd("../")`
+
+ Classes providing magic functions need to subclass this class, and they
+ MUST:
+
+ - Use the method decorators `@line_magic` and `@cell_magic` to decorate
+ individual methods as magic functions, AND
+
+ - Use the class decorator `@magics_class` to ensure that the magic
+ methods are properly registered at the instance level upon instance
+ initialization.
+
+ See :mod:`magic_functions` for examples of actual implementation classes.
+ """
+ # Dict holding all command-line options for each magic.
+ options_table = None
+ # Dict for the mapping of magic names to methods, set by class decorator
+ magics = None
+ # Flag to check that the class decorator was properly applied
+ registered = False
+ # Instance of IPython shell
+ shell = None
+
+ def __init__(self, shell=None, **kwargs):
+ if not(self.__class__.registered):
+ raise ValueError('Magics subclass without registration - '
+ 'did you forget to apply @magics_class?')
+ if shell is not None:
+ if hasattr(shell, 'configurables'):
+ shell.configurables.append(self)
+ if hasattr(shell, 'config'):
+ kwargs.setdefault('parent', shell)
+
+ self.shell = shell
+ self.options_table = {}
+ # The method decorators are run when the instance doesn't exist yet, so
+ # they can only record the names of the methods they are supposed to
+ # grab. Only now, that the instance exists, can we create the proper
+ # mapping to bound methods. So we read the info off the original names
+ # table and replace each method name by the actual bound method.
+ # But we mustn't clobber the *class* mapping, in case of multiple instances.
+ class_magics = self.magics
+ self.magics = {}
+ for mtype in magic_kinds:
+ tab = self.magics[mtype] = {}
+ cls_tab = class_magics[mtype]
+ for magic_name, meth_name in iteritems(cls_tab):
+ if isinstance(meth_name, string_types):
+ # it's a method name, grab it
+ tab[magic_name] = getattr(self, meth_name)
+ else:
+ # it's the real thing
+ tab[magic_name] = meth_name
+ # Configurable **needs** to be initiated at the end or the config
+ # magics get screwed up.
+ super(Magics, self).__init__(**kwargs)
+
+ def arg_err(self,func):
+ """Print docstring if incorrect arguments were passed"""
+ print('Error in arguments:')
+ print(oinspect.getdoc(func))
+
+ def format_latex(self, strng):
+ """Format a string for latex inclusion."""
+
+ # Characters that need to be escaped for latex:
+ escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
+ # Magic command names as headers:
+ cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
+ re.MULTILINE)
+ # Magic commands
+ cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
+ re.MULTILINE)
+ # Paragraph continue
+ par_re = re.compile(r'\\$',re.MULTILINE)
+
+ # The "\n" symbol
+ newline_re = re.compile(r'\\n')
+
+ # Now build the string for output:
+ #strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
+ strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
+ strng)
+ strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
+ strng = par_re.sub(r'\\\\',strng)
+ strng = escape_re.sub(r'\\\1',strng)
+ strng = newline_re.sub(r'\\textbackslash{}n',strng)
+ return strng
+
+ def parse_options(self, arg_str, opt_str, *long_opts, **kw):
+ """Parse options passed to an argument string.
+
+ The interface is similar to that of :func:`getopt.getopt`, but it
+ returns a :class:`~IPython.utils.struct.Struct` with the options as keys
+ and the stripped argument string still as a string.
+
+ arg_str is quoted as a true sys.argv vector by using shlex.split.
+ This allows us to easily expand variables, glob files, quote
+ arguments, etc.
+
+ Parameters
+ ----------
+
+ arg_str : str
+ The arguments to parse.
+
+ opt_str : str
+ The options specification.
+
+ mode : str, default 'string'
+ If given as 'list', the argument string is returned as a list (split
+ on whitespace) instead of a string.
+
+ list_all : bool, default False
+ Put all option values in lists. Normally only options
+ appearing more than once are put in a list.
+
+ posix : bool, default True
+ Whether to split the input line in POSIX mode or not, as per the
+ conventions outlined in the :mod:`shlex` module from the standard
+ library.
+ """
+
+ # inject default options at the beginning of the input line
+ caller = sys._getframe(1).f_code.co_name
+ arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
+
+ mode = kw.get('mode','string')
+ if mode not in ['string','list']:
+ raise ValueError('incorrect mode given: %s' % mode)
+ # Get options
+ list_all = kw.get('list_all',0)
+ posix = kw.get('posix', os.name == 'posix')
+ strict = kw.get('strict', True)
+
+ # Check if we have more than one argument to warrant extra processing:
+ odict = {} # Dictionary with options
+ args = arg_str.split()
+ if len(args) >= 1:
+ # If the list of inputs only has 0 or 1 thing in it, there's no
+ # need to look for options
+ argv = arg_split(arg_str, posix, strict)
+ # Do regular option processing
+ try:
+ opts,args = getopt(argv, opt_str, long_opts)
+ except GetoptError as e:
+ raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
+ " ".join(long_opts)))
+ for o,a in opts:
+ if o.startswith('--'):
+ o = o[2:]
+ else:
+ o = o[1:]
+ try:
+ odict[o].append(a)
+ except AttributeError:
+ odict[o] = [odict[o],a]
+ except KeyError:
+ if list_all:
+ odict[o] = [a]
+ else:
+ odict[o] = a
+
+ # Prepare opts,args for return
+ opts = Struct(odict)
+ if mode == 'string':
+ args = ' '.join(args)
+
+ return opts,args
+
+ def default_option(self, fn, optstr):
+ """Make an entry in the options_table for fn, with value optstr"""
+
+ if fn not in self.lsmagic():
+ error("%s is not a magic function" % fn)
+ self.options_table[fn] = optstr
+
+
+class MagicAlias(object):
+ """An alias to another magic function.
+
+ An alias is determined by its magic name and magic kind. Lookup
+ is done at call time, so if the underlying magic changes the alias
+ will call the new function.
+
+ Use the :meth:`MagicsManager.register_alias` method or the
+ `%alias_magic` magic function to create and register a new alias.
+ """
+ def __init__(self, shell, magic_name, magic_kind):
+ self.shell = shell
+ self.magic_name = magic_name
+ self.magic_kind = magic_kind
+
+ self.pretty_target = '%s%s' % (magic_escapes[self.magic_kind], self.magic_name)
+ self.__doc__ = "Alias for `%s`." % self.pretty_target
+
+ self._in_call = False
+
+ def __call__(self, *args, **kwargs):
+ """Call the magic alias."""
+ fn = self.shell.find_magic(self.magic_name, self.magic_kind)
+ if fn is None:
+ raise UsageError("Magic `%s` not found." % self.pretty_target)
+
+ # Protect against infinite recursion.
+ if self._in_call:
+ raise UsageError("Infinite recursion detected; "
+ "magic aliases cannot call themselves.")
+ self._in_call = True
+ try:
+ return fn(*args, **kwargs)
+ finally:
+ self._in_call = False
diff --git a/contrib/python/ipython/py2/IPython/core/magic_arguments.py b/contrib/python/ipython/py2/IPython/core/magic_arguments.py
index 38e03aa176..9231609572 100644
--- a/contrib/python/ipython/py2/IPython/core/magic_arguments.py
+++ b/contrib/python/ipython/py2/IPython/core/magic_arguments.py
@@ -1,278 +1,278 @@
-''' A decorator-based method of constructing IPython magics with `argparse`
-option handling.
-
-New magic functions can be defined like so::
-
- from IPython.core.magic_arguments import (argument, magic_arguments,
- parse_argstring)
-
- @magic_arguments()
- @argument('-o', '--option', help='An optional argument.')
- @argument('arg', type=int, help='An integer positional argument.')
- def magic_cool(self, arg):
- """ A really cool magic command.
-
- """
- args = parse_argstring(magic_cool, arg)
- ...
-
-The `@magic_arguments` decorator marks the function as having argparse arguments.
-The `@argument` decorator adds an argument using the same syntax as argparse's
-`add_argument()` method. More sophisticated uses may also require the
-`@argument_group` or `@kwds` decorator to customize the formatting and the
-parsing.
-
-Help text for the magic is automatically generated from the docstring and the
-arguments::
-
- In[1]: %cool?
- %cool [-o OPTION] arg
-
- A really cool magic command.
-
- positional arguments:
- arg An integer positional argument.
-
- optional arguments:
- -o OPTION, --option OPTION
- An optional argument.
-
-Inheritance diagram:
-
-.. inheritance-diagram:: IPython.core.magic_arguments
- :parts: 3
-
-'''
-#-----------------------------------------------------------------------------
-# Copyright (C) 2010-2011, IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import argparse
-import re
-
-# Our own imports
-from IPython.core.error import UsageError
-from IPython.utils.decorators import undoc
-from IPython.utils.process import arg_split
-from IPython.utils.text import dedent
-
-NAME_RE = re.compile(r"[a-zA-Z][a-zA-Z0-9_-]*$")
-
-@undoc
-class MagicHelpFormatter(argparse.RawDescriptionHelpFormatter):
- """A HelpFormatter with a couple of changes to meet our needs.
- """
- # Modified to dedent text.
- def _fill_text(self, text, width, indent):
- return argparse.RawDescriptionHelpFormatter._fill_text(self, dedent(text), width, indent)
-
- # Modified to wrap argument placeholders in <> where necessary.
- def _format_action_invocation(self, action):
- if not action.option_strings:
- metavar, = self._metavar_formatter(action, action.dest)(1)
- return metavar
-
- else:
- parts = []
-
- # if the Optional doesn't take a value, format is:
- # -s, --long
- if action.nargs == 0:
- parts.extend(action.option_strings)
-
- # if the Optional takes a value, format is:
- # -s ARGS, --long ARGS
- else:
- default = action.dest.upper()
- args_string = self._format_args(action, default)
- # IPYTHON MODIFICATION: If args_string is not a plain name, wrap
- # it in <> so it's valid RST.
- if not NAME_RE.match(args_string):
- args_string = "<%s>" % args_string
- for option_string in action.option_strings:
- parts.append('%s %s' % (option_string, args_string))
-
- return ', '.join(parts)
-
- # Override the default prefix ('usage') to our % magic escape,
- # in a code block.
- def add_usage(self, usage, actions, groups, prefix="::\n\n %"):
- super(MagicHelpFormatter, self).add_usage(usage, actions, groups, prefix)
-
-class MagicArgumentParser(argparse.ArgumentParser):
- """ An ArgumentParser tweaked for use by IPython magics.
- """
- def __init__(self,
- prog=None,
- usage=None,
- description=None,
- epilog=None,
- parents=None,
- formatter_class=MagicHelpFormatter,
- prefix_chars='-',
- argument_default=None,
- conflict_handler='error',
- add_help=False):
- if parents is None:
- parents = []
- super(MagicArgumentParser, self).__init__(prog=prog, usage=usage,
- description=description, epilog=epilog,
- parents=parents, formatter_class=formatter_class,
- prefix_chars=prefix_chars, argument_default=argument_default,
- conflict_handler=conflict_handler, add_help=add_help)
-
- def error(self, message):
- """ Raise a catchable error instead of exiting.
- """
- raise UsageError(message)
-
- def parse_argstring(self, argstring):
- """ Split a string into an argument list and parse that argument list.
- """
- argv = arg_split(argstring)
- return self.parse_args(argv)
-
-
-def construct_parser(magic_func):
- """ Construct an argument parser using the function decorations.
- """
- kwds = getattr(magic_func, 'argcmd_kwds', {})
- if 'description' not in kwds:
- kwds['description'] = getattr(magic_func, '__doc__', None)
- arg_name = real_name(magic_func)
- parser = MagicArgumentParser(arg_name, **kwds)
- # Reverse the list of decorators in order to apply them in the
- # order in which they appear in the source.
- group = None
- for deco in magic_func.decorators[::-1]:
- result = deco.add_to_parser(parser, group)
- if result is not None:
- group = result
-
- # Replace the magic function's docstring with the full help text.
- magic_func.__doc__ = parser.format_help()
-
- return parser
-
-
-def parse_argstring(magic_func, argstring):
- """ Parse the string of arguments for the given magic function.
- """
- return magic_func.parser.parse_argstring(argstring)
-
-
-def real_name(magic_func):
- """ Find the real name of the magic.
- """
- magic_name = magic_func.__name__
- if magic_name.startswith('magic_'):
- magic_name = magic_name[len('magic_'):]
- return getattr(magic_func, 'argcmd_name', magic_name)
-
-
-class ArgDecorator(object):
- """ Base class for decorators to add ArgumentParser information to a method.
- """
-
- def __call__(self, func):
- if not getattr(func, 'has_arguments', False):
- func.has_arguments = True
- func.decorators = []
- func.decorators.append(self)
- return func
-
- def add_to_parser(self, parser, group):
- """ Add this object's information to the parser, if necessary.
- """
- pass
-
-
-class magic_arguments(ArgDecorator):
- """ Mark the magic as having argparse arguments and possibly adjust the
- name.
- """
-
- def __init__(self, name=None):
- self.name = name
-
- def __call__(self, func):
- if not getattr(func, 'has_arguments', False):
- func.has_arguments = True
- func.decorators = []
- if self.name is not None:
- func.argcmd_name = self.name
- # This should be the first decorator in the list of decorators, thus the
- # last to execute. Build the parser.
- func.parser = construct_parser(func)
- return func
-
-
-class ArgMethodWrapper(ArgDecorator):
-
- """
- Base class to define a wrapper for ArgumentParser method.
-
- Child class must define either `_method_name` or `add_to_parser`.
-
- """
-
- _method_name = None
-
- def __init__(self, *args, **kwds):
- self.args = args
- self.kwds = kwds
-
- def add_to_parser(self, parser, group):
- """ Add this object's information to the parser.
- """
- if group is not None:
- parser = group
- getattr(parser, self._method_name)(*self.args, **self.kwds)
- return None
-
-
-class argument(ArgMethodWrapper):
- """ Store arguments and keywords to pass to add_argument().
-
- Instances also serve to decorate command methods.
- """
- _method_name = 'add_argument'
-
-
-class defaults(ArgMethodWrapper):
- """ Store arguments and keywords to pass to set_defaults().
-
- Instances also serve to decorate command methods.
- """
- _method_name = 'set_defaults'
-
-
-class argument_group(ArgMethodWrapper):
- """ Store arguments and keywords to pass to add_argument_group().
-
- Instances also serve to decorate command methods.
- """
-
- def add_to_parser(self, parser, group):
- """ Add this object's information to the parser.
- """
- return parser.add_argument_group(*self.args, **self.kwds)
-
-
-class kwds(ArgDecorator):
- """ Provide other keywords to the sub-parser constructor.
- """
- def __init__(self, **kwds):
- self.kwds = kwds
-
- def __call__(self, func):
- func = super(kwds, self).__call__(func)
- func.argcmd_kwds = self.kwds
- return func
-
-
-__all__ = ['magic_arguments', 'argument', 'argument_group', 'kwds',
- 'parse_argstring']
+''' A decorator-based method of constructing IPython magics with `argparse`
+option handling.
+
+New magic functions can be defined like so::
+
+ from IPython.core.magic_arguments import (argument, magic_arguments,
+ parse_argstring)
+
+ @magic_arguments()
+ @argument('-o', '--option', help='An optional argument.')
+ @argument('arg', type=int, help='An integer positional argument.')
+ def magic_cool(self, arg):
+ """ A really cool magic command.
+
+ """
+ args = parse_argstring(magic_cool, arg)
+ ...
+
+The `@magic_arguments` decorator marks the function as having argparse arguments.
+The `@argument` decorator adds an argument using the same syntax as argparse's
+`add_argument()` method. More sophisticated uses may also require the
+`@argument_group` or `@kwds` decorator to customize the formatting and the
+parsing.
+
+Help text for the magic is automatically generated from the docstring and the
+arguments::
+
+ In[1]: %cool?
+ %cool [-o OPTION] arg
+
+ A really cool magic command.
+
+ positional arguments:
+ arg An integer positional argument.
+
+ optional arguments:
+ -o OPTION, --option OPTION
+ An optional argument.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.core.magic_arguments
+ :parts: 3
+
+'''
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011, IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import argparse
+import re
+
+# Our own imports
+from IPython.core.error import UsageError
+from IPython.utils.decorators import undoc
+from IPython.utils.process import arg_split
+from IPython.utils.text import dedent
+
+NAME_RE = re.compile(r"[a-zA-Z][a-zA-Z0-9_-]*$")
+
+@undoc
+class MagicHelpFormatter(argparse.RawDescriptionHelpFormatter):
+ """A HelpFormatter with a couple of changes to meet our needs.
+ """
+ # Modified to dedent text.
+ def _fill_text(self, text, width, indent):
+ return argparse.RawDescriptionHelpFormatter._fill_text(self, dedent(text), width, indent)
+
+ # Modified to wrap argument placeholders in <> where necessary.
+ def _format_action_invocation(self, action):
+ if not action.option_strings:
+ metavar, = self._metavar_formatter(action, action.dest)(1)
+ return metavar
+
+ else:
+ parts = []
+
+ # if the Optional doesn't take a value, format is:
+ # -s, --long
+ if action.nargs == 0:
+ parts.extend(action.option_strings)
+
+ # if the Optional takes a value, format is:
+ # -s ARGS, --long ARGS
+ else:
+ default = action.dest.upper()
+ args_string = self._format_args(action, default)
+ # IPYTHON MODIFICATION: If args_string is not a plain name, wrap
+ # it in <> so it's valid RST.
+ if not NAME_RE.match(args_string):
+ args_string = "<%s>" % args_string
+ for option_string in action.option_strings:
+ parts.append('%s %s' % (option_string, args_string))
+
+ return ', '.join(parts)
+
+ # Override the default prefix ('usage') to our % magic escape,
+ # in a code block.
+ def add_usage(self, usage, actions, groups, prefix="::\n\n %"):
+ super(MagicHelpFormatter, self).add_usage(usage, actions, groups, prefix)
+
+class MagicArgumentParser(argparse.ArgumentParser):
+ """ An ArgumentParser tweaked for use by IPython magics.
+ """
+ def __init__(self,
+ prog=None,
+ usage=None,
+ description=None,
+ epilog=None,
+ parents=None,
+ formatter_class=MagicHelpFormatter,
+ prefix_chars='-',
+ argument_default=None,
+ conflict_handler='error',
+ add_help=False):
+ if parents is None:
+ parents = []
+ super(MagicArgumentParser, self).__init__(prog=prog, usage=usage,
+ description=description, epilog=epilog,
+ parents=parents, formatter_class=formatter_class,
+ prefix_chars=prefix_chars, argument_default=argument_default,
+ conflict_handler=conflict_handler, add_help=add_help)
+
+ def error(self, message):
+ """ Raise a catchable error instead of exiting.
+ """
+ raise UsageError(message)
+
+ def parse_argstring(self, argstring):
+ """ Split a string into an argument list and parse that argument list.
+ """
+ argv = arg_split(argstring)
+ return self.parse_args(argv)
+
+
+def construct_parser(magic_func):
+ """ Construct an argument parser using the function decorations.
+ """
+ kwds = getattr(magic_func, 'argcmd_kwds', {})
+ if 'description' not in kwds:
+ kwds['description'] = getattr(magic_func, '__doc__', None)
+ arg_name = real_name(magic_func)
+ parser = MagicArgumentParser(arg_name, **kwds)
+ # Reverse the list of decorators in order to apply them in the
+ # order in which they appear in the source.
+ group = None
+ for deco in magic_func.decorators[::-1]:
+ result = deco.add_to_parser(parser, group)
+ if result is not None:
+ group = result
+
+ # Replace the magic function's docstring with the full help text.
+ magic_func.__doc__ = parser.format_help()
+
+ return parser
+
+
+def parse_argstring(magic_func, argstring):
+ """ Parse the string of arguments for the given magic function.
+ """
+ return magic_func.parser.parse_argstring(argstring)
+
+
+def real_name(magic_func):
+ """ Find the real name of the magic.
+ """
+ magic_name = magic_func.__name__
+ if magic_name.startswith('magic_'):
+ magic_name = magic_name[len('magic_'):]
+ return getattr(magic_func, 'argcmd_name', magic_name)
+
+
+class ArgDecorator(object):
+ """ Base class for decorators to add ArgumentParser information to a method.
+ """
+
+ def __call__(self, func):
+ if not getattr(func, 'has_arguments', False):
+ func.has_arguments = True
+ func.decorators = []
+ func.decorators.append(self)
+ return func
+
+ def add_to_parser(self, parser, group):
+ """ Add this object's information to the parser, if necessary.
+ """
+ pass
+
+
+class magic_arguments(ArgDecorator):
+ """ Mark the magic as having argparse arguments and possibly adjust the
+ name.
+ """
+
+ def __init__(self, name=None):
+ self.name = name
+
+ def __call__(self, func):
+ if not getattr(func, 'has_arguments', False):
+ func.has_arguments = True
+ func.decorators = []
+ if self.name is not None:
+ func.argcmd_name = self.name
+ # This should be the first decorator in the list of decorators, thus the
+ # last to execute. Build the parser.
+ func.parser = construct_parser(func)
+ return func
+
+
+class ArgMethodWrapper(ArgDecorator):
+
+ """
+ Base class to define a wrapper for ArgumentParser method.
+
+ Child class must define either `_method_name` or `add_to_parser`.
+
+ """
+
+ _method_name = None
+
+ def __init__(self, *args, **kwds):
+ self.args = args
+ self.kwds = kwds
+
+ def add_to_parser(self, parser, group):
+ """ Add this object's information to the parser.
+ """
+ if group is not None:
+ parser = group
+ getattr(parser, self._method_name)(*self.args, **self.kwds)
+ return None
+
+
+class argument(ArgMethodWrapper):
+ """ Store arguments and keywords to pass to add_argument().
+
+ Instances also serve to decorate command methods.
+ """
+ _method_name = 'add_argument'
+
+
+class defaults(ArgMethodWrapper):
+ """ Store arguments and keywords to pass to set_defaults().
+
+ Instances also serve to decorate command methods.
+ """
+ _method_name = 'set_defaults'
+
+
+class argument_group(ArgMethodWrapper):
+ """ Store arguments and keywords to pass to add_argument_group().
+
+ Instances also serve to decorate command methods.
+ """
+
+ def add_to_parser(self, parser, group):
+ """ Add this object's information to the parser.
+ """
+ return parser.add_argument_group(*self.args, **self.kwds)
+
+
+class kwds(ArgDecorator):
+ """ Provide other keywords to the sub-parser constructor.
+ """
+ def __init__(self, **kwds):
+ self.kwds = kwds
+
+ def __call__(self, func):
+ func = super(kwds, self).__call__(func)
+ func.argcmd_kwds = self.kwds
+ return func
+
+
+__all__ = ['magic_arguments', 'argument', 'argument_group', 'kwds',
+ 'parse_argstring']
diff --git a/contrib/python/ipython/py2/IPython/core/magics/__init__.py b/contrib/python/ipython/py2/IPython/core/magics/__init__.py
index 78d4a45aa0..d2fd5a6cfb 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/__init__.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/__init__.py
@@ -1,41 +1,41 @@
-"""Implementation of all the magic functions built into IPython.
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012 The IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-from ..magic import Magics, magics_class
-from .auto import AutoMagics
-from .basic import BasicMagics
-from .code import CodeMagics, MacroToEdit
-from .config import ConfigMagics
-from .display import DisplayMagics
-from .execution import ExecutionMagics
-from .extension import ExtensionMagics
-from .history import HistoryMagics
-from .logging import LoggingMagics
-from .namespace import NamespaceMagics
-from .osm import OSMagics
-from .pylab import PylabMagics
-from .script import ScriptMagics
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-@magics_class
-class UserMagics(Magics):
- """Placeholder for user-defined magics to be added at runtime.
-
- All magics are eventually merged into a single namespace at runtime, but we
- use this class to isolate the magics defined dynamically by the user into
- their own class.
- """
+"""Implementation of all the magic functions built into IPython.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from ..magic import Magics, magics_class
+from .auto import AutoMagics
+from .basic import BasicMagics
+from .code import CodeMagics, MacroToEdit
+from .config import ConfigMagics
+from .display import DisplayMagics
+from .execution import ExecutionMagics
+from .extension import ExtensionMagics
+from .history import HistoryMagics
+from .logging import LoggingMagics
+from .namespace import NamespaceMagics
+from .osm import OSMagics
+from .pylab import PylabMagics
+from .script import ScriptMagics
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class UserMagics(Magics):
+ """Placeholder for user-defined magics to be added at runtime.
+
+ All magics are eventually merged into a single namespace at runtime, but we
+ use this class to isolate the magics defined dynamically by the user into
+ their own class.
+ """
diff --git a/contrib/python/ipython/py2/IPython/core/magics/auto.py b/contrib/python/ipython/py2/IPython/core/magics/auto.py
index be6b218854..f87bafdeb1 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/auto.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/auto.py
@@ -1,130 +1,130 @@
-"""Implementation of magic functions that control various automatic behaviors.
-"""
-from __future__ import print_function
+"""Implementation of magic functions that control various automatic behaviors.
+"""
+from __future__ import print_function
from __future__ import absolute_import
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012 The IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Our own packages
-from IPython.core.magic import Bunch, Magics, magics_class, line_magic
-from IPython.testing.skipdoctest import skip_doctest
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Our own packages
+from IPython.core.magic import Bunch, Magics, magics_class, line_magic
+from IPython.testing.skipdoctest import skip_doctest
from logging import error
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-@magics_class
-class AutoMagics(Magics):
- """Magics that control various autoX behaviors."""
-
- def __init__(self, shell):
- super(AutoMagics, self).__init__(shell)
- # namespace for holding state we may need
- self._magic_state = Bunch()
-
- @line_magic
- def automagic(self, parameter_s=''):
- """Make magic functions callable without having to type the initial %.
-
- Without argumentsl toggles on/off (when off, you must call it as
- %automagic, of course). With arguments it sets the value, and you can
- use any of (case insensitive):
-
- - on, 1, True: to activate
-
- - off, 0, False: to deactivate.
-
- Note that magic functions have lowest priority, so if there's a
- variable whose name collides with that of a magic fn, automagic won't
- work for that function (you get the variable instead). However, if you
- delete the variable (del var), the previously shadowed magic function
- becomes visible to automagic again."""
-
- arg = parameter_s.lower()
- mman = self.shell.magics_manager
- if arg in ('on', '1', 'true'):
- val = True
- elif arg in ('off', '0', 'false'):
- val = False
- else:
- val = not mman.auto_magic
- mman.auto_magic = val
- print('\n' + self.shell.magics_manager.auto_status())
-
- @skip_doctest
- @line_magic
- def autocall(self, parameter_s=''):
- """Make functions callable without having to type parentheses.
-
- Usage:
-
- %autocall [mode]
-
- The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the
- value is toggled on and off (remembering the previous state).
-
- In more detail, these values mean:
-
- 0 -> fully disabled
-
- 1 -> active, but do not apply if there are no arguments on the line.
-
- In this mode, you get::
-
- In [1]: callable
- Out[1]: <built-in function callable>
-
- In [2]: callable 'hello'
- ------> callable('hello')
- Out[2]: False
-
- 2 -> Active always. Even if no arguments are present, the callable
- object is called::
-
- In [2]: float
- ------> float()
- Out[2]: 0.0
-
- Note that even with autocall off, you can still use '/' at the start of
- a line to treat the first argument on the command line as a function
- and add parentheses to it::
-
- In [8]: /str 43
- ------> str(43)
- Out[8]: '43'
-
- # all-random (note for auto-testing)
- """
-
- if parameter_s:
- arg = int(parameter_s)
- else:
- arg = 'toggle'
-
- if not arg in (0, 1, 2, 'toggle'):
- error('Valid modes: (0->Off, 1->Smart, 2->Full')
- return
-
- if arg in (0, 1, 2):
- self.shell.autocall = arg
- else: # toggle
- if self.shell.autocall:
- self._magic_state.autocall_save = self.shell.autocall
- self.shell.autocall = 0
- else:
- try:
- self.shell.autocall = self._magic_state.autocall_save
- except AttributeError:
- self.shell.autocall = self._magic_state.autocall_save = 1
-
- print("Automatic calling is:",['OFF','Smart','Full'][self.shell.autocall])
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class AutoMagics(Magics):
+ """Magics that control various autoX behaviors."""
+
+ def __init__(self, shell):
+ super(AutoMagics, self).__init__(shell)
+ # namespace for holding state we may need
+ self._magic_state = Bunch()
+
+ @line_magic
+ def automagic(self, parameter_s=''):
+ """Make magic functions callable without having to type the initial %.
+
+ Without argumentsl toggles on/off (when off, you must call it as
+ %automagic, of course). With arguments it sets the value, and you can
+ use any of (case insensitive):
+
+ - on, 1, True: to activate
+
+ - off, 0, False: to deactivate.
+
+ Note that magic functions have lowest priority, so if there's a
+ variable whose name collides with that of a magic fn, automagic won't
+ work for that function (you get the variable instead). However, if you
+ delete the variable (del var), the previously shadowed magic function
+ becomes visible to automagic again."""
+
+ arg = parameter_s.lower()
+ mman = self.shell.magics_manager
+ if arg in ('on', '1', 'true'):
+ val = True
+ elif arg in ('off', '0', 'false'):
+ val = False
+ else:
+ val = not mman.auto_magic
+ mman.auto_magic = val
+ print('\n' + self.shell.magics_manager.auto_status())
+
+ @skip_doctest
+ @line_magic
+ def autocall(self, parameter_s=''):
+ """Make functions callable without having to type parentheses.
+
+ Usage:
+
+ %autocall [mode]
+
+ The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the
+ value is toggled on and off (remembering the previous state).
+
+ In more detail, these values mean:
+
+ 0 -> fully disabled
+
+ 1 -> active, but do not apply if there are no arguments on the line.
+
+ In this mode, you get::
+
+ In [1]: callable
+ Out[1]: <built-in function callable>
+
+ In [2]: callable 'hello'
+ ------> callable('hello')
+ Out[2]: False
+
+ 2 -> Active always. Even if no arguments are present, the callable
+ object is called::
+
+ In [2]: float
+ ------> float()
+ Out[2]: 0.0
+
+ Note that even with autocall off, you can still use '/' at the start of
+ a line to treat the first argument on the command line as a function
+ and add parentheses to it::
+
+ In [8]: /str 43
+ ------> str(43)
+ Out[8]: '43'
+
+ # all-random (note for auto-testing)
+ """
+
+ if parameter_s:
+ arg = int(parameter_s)
+ else:
+ arg = 'toggle'
+
+ if not arg in (0, 1, 2, 'toggle'):
+ error('Valid modes: (0->Off, 1->Smart, 2->Full')
+ return
+
+ if arg in (0, 1, 2):
+ self.shell.autocall = arg
+ else: # toggle
+ if self.shell.autocall:
+ self._magic_state.autocall_save = self.shell.autocall
+ self.shell.autocall = 0
+ else:
+ try:
+ self.shell.autocall = self._magic_state.autocall_save
+ except AttributeError:
+ self.shell.autocall = self._magic_state.autocall_save = 1
+
+ print("Automatic calling is:",['OFF','Smart','Full'][self.shell.autocall])
diff --git a/contrib/python/ipython/py2/IPython/core/magics/basic.py b/contrib/python/ipython/py2/IPython/core/magics/basic.py
index 75b1275211..ca69e2e698 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/basic.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/basic.py
@@ -1,575 +1,575 @@
-"""Implementation of basic magic functions."""
-
-from __future__ import print_function
+"""Implementation of basic magic functions."""
+
+from __future__ import print_function
from __future__ import absolute_import
-
+
import argparse
-import io
-import sys
-from pprint import pformat
-
-from IPython.core import magic_arguments, page
-from IPython.core.error import UsageError
-from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
-from IPython.utils.text import format_screen, dedent, indent
-from IPython.testing.skipdoctest import skip_doctest
-from IPython.utils.ipstruct import Struct
-from IPython.utils.py3compat import unicode_type
+import io
+import sys
+from pprint import pformat
+
+from IPython.core import magic_arguments, page
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
+from IPython.utils.text import format_screen, dedent, indent
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.ipstruct import Struct
+from IPython.utils.py3compat import unicode_type
from warnings import warn
from logging import error
-
-
-class MagicsDisplay(object):
- def __init__(self, magics_manager):
- self.magics_manager = magics_manager
-
- def _lsmagic(self):
- """The main implementation of the %lsmagic"""
- mesc = magic_escapes['line']
- cesc = magic_escapes['cell']
- mman = self.magics_manager
- magics = mman.lsmagic()
- out = ['Available line magics:',
- mesc + (' '+mesc).join(sorted(magics['line'])),
- '',
- 'Available cell magics:',
- cesc + (' '+cesc).join(sorted(magics['cell'])),
- '',
- mman.auto_status()]
- return '\n'.join(out)
-
- def _repr_pretty_(self, p, cycle):
- p.text(self._lsmagic())
-
- def __str__(self):
- return self._lsmagic()
-
- def _jsonable(self):
- """turn magics dict into jsonable dict of the same structure
-
- replaces object instances with their class names as strings
- """
- magic_dict = {}
- mman = self.magics_manager
- magics = mman.lsmagic()
- for key, subdict in magics.items():
- d = {}
- magic_dict[key] = d
- for name, obj in subdict.items():
- try:
- classname = obj.__self__.__class__.__name__
- except AttributeError:
- classname = 'Other'
-
- d[name] = classname
- return magic_dict
-
- def _repr_json_(self):
- return self._jsonable()
-
-
-@magics_class
-class BasicMagics(Magics):
- """Magics that provide central IPython functionality.
-
- These are various magics that don't fit into specific categories but that
- are all part of the base 'IPython experience'."""
-
- @magic_arguments.magic_arguments()
- @magic_arguments.argument(
- '-l', '--line', action='store_true',
- help="""Create a line magic alias."""
- )
- @magic_arguments.argument(
- '-c', '--cell', action='store_true',
- help="""Create a cell magic alias."""
- )
- @magic_arguments.argument(
- 'name',
- help="""Name of the magic to be created."""
- )
- @magic_arguments.argument(
- 'target',
- help="""Name of the existing line or cell magic."""
- )
- @line_magic
- def alias_magic(self, line=''):
- """Create an alias for an existing line or cell magic.
-
- Examples
- --------
- ::
-
- In [1]: %alias_magic t timeit
- Created `%t` as an alias for `%timeit`.
- Created `%%t` as an alias for `%%timeit`.
-
- In [2]: %t -n1 pass
- 1 loops, best of 3: 954 ns per loop
-
- In [3]: %%t -n1
- ...: pass
- ...:
- 1 loops, best of 3: 954 ns per loop
-
- In [4]: %alias_magic --cell whereami pwd
- UsageError: Cell magic function `%%pwd` not found.
- In [5]: %alias_magic --line whereami pwd
- Created `%whereami` as an alias for `%pwd`.
-
- In [6]: %whereami
- Out[6]: u'/home/testuser'
- """
- args = magic_arguments.parse_argstring(self.alias_magic, line)
- shell = self.shell
- mman = self.shell.magics_manager
- escs = ''.join(magic_escapes.values())
-
- target = args.target.lstrip(escs)
- name = args.name.lstrip(escs)
-
- # Find the requested magics.
- m_line = shell.find_magic(target, 'line')
- m_cell = shell.find_magic(target, 'cell')
- if args.line and m_line is None:
- raise UsageError('Line magic function `%s%s` not found.' %
- (magic_escapes['line'], target))
- if args.cell and m_cell is None:
- raise UsageError('Cell magic function `%s%s` not found.' %
- (magic_escapes['cell'], target))
-
- # If --line and --cell are not specified, default to the ones
- # that are available.
- if not args.line and not args.cell:
- if not m_line and not m_cell:
- raise UsageError(
- 'No line or cell magic with name `%s` found.' % target
- )
- args.line = bool(m_line)
- args.cell = bool(m_cell)
-
- if args.line:
- mman.register_alias(name, target, 'line')
- print('Created `%s%s` as an alias for `%s%s`.' % (
- magic_escapes['line'], name,
- magic_escapes['line'], target))
-
- if args.cell:
- mman.register_alias(name, target, 'cell')
- print('Created `%s%s` as an alias for `%s%s`.' % (
- magic_escapes['cell'], name,
- magic_escapes['cell'], target))
-
- @line_magic
- def lsmagic(self, parameter_s=''):
- """List currently available magic functions."""
- return MagicsDisplay(self.shell.magics_manager)
-
- def _magic_docs(self, brief=False, rest=False):
- """Return docstrings from magic functions."""
- mman = self.shell.magics_manager
- docs = mman.lsmagic_docs(brief, missing='No documentation')
-
- if rest:
- format_string = '**%s%s**::\n\n%s\n\n'
- else:
- format_string = '%s%s:\n%s\n'
-
- return ''.join(
- [format_string % (magic_escapes['line'], fname,
- indent(dedent(fndoc)))
- for fname, fndoc in sorted(docs['line'].items())]
- +
- [format_string % (magic_escapes['cell'], fname,
- indent(dedent(fndoc)))
- for fname, fndoc in sorted(docs['cell'].items())]
- )
-
- @line_magic
- def magic(self, parameter_s=''):
- """Print information about the magic function system.
-
- Supported formats: -latex, -brief, -rest
- """
-
- mode = ''
- try:
- mode = parameter_s.split()[0][1:]
- except IndexError:
- pass
-
- brief = (mode == 'brief')
- rest = (mode == 'rest')
- magic_docs = self._magic_docs(brief, rest)
-
- if mode == 'latex':
- print(self.format_latex(magic_docs))
- return
- else:
- magic_docs = format_screen(magic_docs)
-
- out = ["""
-IPython's 'magic' functions
-===========================
-
-The magic function system provides a series of functions which allow you to
-control the behavior of IPython itself, plus a lot of system-type
-features. There are two kinds of magics, line-oriented and cell-oriented.
-
-Line magics are prefixed with the % character and work much like OS
-command-line calls: they get as an argument the rest of the line, where
-arguments are passed without parentheses or quotes. For example, this will
-time the given statement::
-
- %timeit range(1000)
-
-Cell magics are prefixed with a double %%, and they are functions that get as
-an argument not only the rest of the line, but also the lines below it in a
-separate argument. These magics are called with two arguments: the rest of the
-call line and the body of the cell, consisting of the lines below the first.
-For example::
-
- %%timeit x = numpy.random.randn((100, 100))
- numpy.linalg.svd(x)
-
-will time the execution of the numpy svd routine, running the assignment of x
-as part of the setup phase, which is not timed.
-
-In a line-oriented client (the terminal or Qt console IPython), starting a new
-input with %% will automatically enter cell mode, and IPython will continue
-reading input until a blank line is given. In the notebook, simply type the
-whole cell as one entity, but keep in mind that the %% escape can only be at
-the very start of the cell.
-
-NOTE: If you have 'automagic' enabled (via the command line option or with the
-%automagic function), you don't need to type in the % explicitly for line
-magics; cell magics always require an explicit '%%' escape. By default,
-IPython ships with automagic on, so you should only rarely need the % escape.
-
-Example: typing '%cd mydir' (without the quotes) changes your working directory
-to 'mydir', if it exists.
-
-For a list of the available magic functions, use %lsmagic. For a description
-of any of them, type %magic_name?, e.g. '%cd?'.
-
-Currently the magic system has the following functions:""",
- magic_docs,
- "Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
- str(self.lsmagic()),
- ]
- page.page('\n'.join(out))
-
-
- @line_magic
- def page(self, parameter_s=''):
- """Pretty print the object and display it through a pager.
-
- %page [options] OBJECT
-
- If no object is given, use _ (last output).
-
- Options:
-
- -r: page str(object), don't pretty-print it."""
-
- # After a function contributed by Olivier Aubert, slightly modified.
-
- # Process options/args
- opts, args = self.parse_options(parameter_s, 'r')
- raw = 'r' in opts
-
- oname = args and args or '_'
- info = self.shell._ofind(oname)
- if info['found']:
- txt = (raw and str or pformat)( info['obj'] )
- page.page(txt)
- else:
- print('Object `%s` not found' % oname)
-
- @line_magic
- def profile(self, parameter_s=''):
+
+
+class MagicsDisplay(object):
+ def __init__(self, magics_manager):
+ self.magics_manager = magics_manager
+
+ def _lsmagic(self):
+ """The main implementation of the %lsmagic"""
+ mesc = magic_escapes['line']
+ cesc = magic_escapes['cell']
+ mman = self.magics_manager
+ magics = mman.lsmagic()
+ out = ['Available line magics:',
+ mesc + (' '+mesc).join(sorted(magics['line'])),
+ '',
+ 'Available cell magics:',
+ cesc + (' '+cesc).join(sorted(magics['cell'])),
+ '',
+ mman.auto_status()]
+ return '\n'.join(out)
+
+ def _repr_pretty_(self, p, cycle):
+ p.text(self._lsmagic())
+
+ def __str__(self):
+ return self._lsmagic()
+
+ def _jsonable(self):
+ """turn magics dict into jsonable dict of the same structure
+
+ replaces object instances with their class names as strings
+ """
+ magic_dict = {}
+ mman = self.magics_manager
+ magics = mman.lsmagic()
+ for key, subdict in magics.items():
+ d = {}
+ magic_dict[key] = d
+ for name, obj in subdict.items():
+ try:
+ classname = obj.__self__.__class__.__name__
+ except AttributeError:
+ classname = 'Other'
+
+ d[name] = classname
+ return magic_dict
+
+ def _repr_json_(self):
+ return self._jsonable()
+
+
+@magics_class
+class BasicMagics(Magics):
+ """Magics that provide central IPython functionality.
+
+ These are various magics that don't fit into specific categories but that
+ are all part of the base 'IPython experience'."""
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '-l', '--line', action='store_true',
+ help="""Create a line magic alias."""
+ )
+ @magic_arguments.argument(
+ '-c', '--cell', action='store_true',
+ help="""Create a cell magic alias."""
+ )
+ @magic_arguments.argument(
+ 'name',
+ help="""Name of the magic to be created."""
+ )
+ @magic_arguments.argument(
+ 'target',
+ help="""Name of the existing line or cell magic."""
+ )
+ @line_magic
+ def alias_magic(self, line=''):
+ """Create an alias for an existing line or cell magic.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %alias_magic t timeit
+ Created `%t` as an alias for `%timeit`.
+ Created `%%t` as an alias for `%%timeit`.
+
+ In [2]: %t -n1 pass
+ 1 loops, best of 3: 954 ns per loop
+
+ In [3]: %%t -n1
+ ...: pass
+ ...:
+ 1 loops, best of 3: 954 ns per loop
+
+ In [4]: %alias_magic --cell whereami pwd
+ UsageError: Cell magic function `%%pwd` not found.
+ In [5]: %alias_magic --line whereami pwd
+ Created `%whereami` as an alias for `%pwd`.
+
+ In [6]: %whereami
+ Out[6]: u'/home/testuser'
+ """
+ args = magic_arguments.parse_argstring(self.alias_magic, line)
+ shell = self.shell
+ mman = self.shell.magics_manager
+ escs = ''.join(magic_escapes.values())
+
+ target = args.target.lstrip(escs)
+ name = args.name.lstrip(escs)
+
+ # Find the requested magics.
+ m_line = shell.find_magic(target, 'line')
+ m_cell = shell.find_magic(target, 'cell')
+ if args.line and m_line is None:
+ raise UsageError('Line magic function `%s%s` not found.' %
+ (magic_escapes['line'], target))
+ if args.cell and m_cell is None:
+ raise UsageError('Cell magic function `%s%s` not found.' %
+ (magic_escapes['cell'], target))
+
+ # If --line and --cell are not specified, default to the ones
+ # that are available.
+ if not args.line and not args.cell:
+ if not m_line and not m_cell:
+ raise UsageError(
+ 'No line or cell magic with name `%s` found.' % target
+ )
+ args.line = bool(m_line)
+ args.cell = bool(m_cell)
+
+ if args.line:
+ mman.register_alias(name, target, 'line')
+ print('Created `%s%s` as an alias for `%s%s`.' % (
+ magic_escapes['line'], name,
+ magic_escapes['line'], target))
+
+ if args.cell:
+ mman.register_alias(name, target, 'cell')
+ print('Created `%s%s` as an alias for `%s%s`.' % (
+ magic_escapes['cell'], name,
+ magic_escapes['cell'], target))
+
+ @line_magic
+ def lsmagic(self, parameter_s=''):
+ """List currently available magic functions."""
+ return MagicsDisplay(self.shell.magics_manager)
+
+ def _magic_docs(self, brief=False, rest=False):
+ """Return docstrings from magic functions."""
+ mman = self.shell.magics_manager
+ docs = mman.lsmagic_docs(brief, missing='No documentation')
+
+ if rest:
+ format_string = '**%s%s**::\n\n%s\n\n'
+ else:
+ format_string = '%s%s:\n%s\n'
+
+ return ''.join(
+ [format_string % (magic_escapes['line'], fname,
+ indent(dedent(fndoc)))
+ for fname, fndoc in sorted(docs['line'].items())]
+ +
+ [format_string % (magic_escapes['cell'], fname,
+ indent(dedent(fndoc)))
+ for fname, fndoc in sorted(docs['cell'].items())]
+ )
+
+ @line_magic
+ def magic(self, parameter_s=''):
+ """Print information about the magic function system.
+
+ Supported formats: -latex, -brief, -rest
+ """
+
+ mode = ''
+ try:
+ mode = parameter_s.split()[0][1:]
+ except IndexError:
+ pass
+
+ brief = (mode == 'brief')
+ rest = (mode == 'rest')
+ magic_docs = self._magic_docs(brief, rest)
+
+ if mode == 'latex':
+ print(self.format_latex(magic_docs))
+ return
+ else:
+ magic_docs = format_screen(magic_docs)
+
+ out = ["""
+IPython's 'magic' functions
+===========================
+
+The magic function system provides a series of functions which allow you to
+control the behavior of IPython itself, plus a lot of system-type
+features. There are two kinds of magics, line-oriented and cell-oriented.
+
+Line magics are prefixed with the % character and work much like OS
+command-line calls: they get as an argument the rest of the line, where
+arguments are passed without parentheses or quotes. For example, this will
+time the given statement::
+
+ %timeit range(1000)
+
+Cell magics are prefixed with a double %%, and they are functions that get as
+an argument not only the rest of the line, but also the lines below it in a
+separate argument. These magics are called with two arguments: the rest of the
+call line and the body of the cell, consisting of the lines below the first.
+For example::
+
+ %%timeit x = numpy.random.randn((100, 100))
+ numpy.linalg.svd(x)
+
+will time the execution of the numpy svd routine, running the assignment of x
+as part of the setup phase, which is not timed.
+
+In a line-oriented client (the terminal or Qt console IPython), starting a new
+input with %% will automatically enter cell mode, and IPython will continue
+reading input until a blank line is given. In the notebook, simply type the
+whole cell as one entity, but keep in mind that the %% escape can only be at
+the very start of the cell.
+
+NOTE: If you have 'automagic' enabled (via the command line option or with the
+%automagic function), you don't need to type in the % explicitly for line
+magics; cell magics always require an explicit '%%' escape. By default,
+IPython ships with automagic on, so you should only rarely need the % escape.
+
+Example: typing '%cd mydir' (without the quotes) changes your working directory
+to 'mydir', if it exists.
+
+For a list of the available magic functions, use %lsmagic. For a description
+of any of them, type %magic_name?, e.g. '%cd?'.
+
+Currently the magic system has the following functions:""",
+ magic_docs,
+ "Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
+ str(self.lsmagic()),
+ ]
+ page.page('\n'.join(out))
+
+
+ @line_magic
+ def page(self, parameter_s=''):
+ """Pretty print the object and display it through a pager.
+
+ %page [options] OBJECT
+
+ If no object is given, use _ (last output).
+
+ Options:
+
+ -r: page str(object), don't pretty-print it."""
+
+ # After a function contributed by Olivier Aubert, slightly modified.
+
+ # Process options/args
+ opts, args = self.parse_options(parameter_s, 'r')
+ raw = 'r' in opts
+
+ oname = args and args or '_'
+ info = self.shell._ofind(oname)
+ if info['found']:
+ txt = (raw and str or pformat)( info['obj'] )
+ page.page(txt)
+ else:
+ print('Object `%s` not found' % oname)
+
+ @line_magic
+ def profile(self, parameter_s=''):
"""DEPRECATED since IPython 2.0.
-
+
Raise `UsageError`. To profile code use the :magic:`prun` magic.
- See Also
- --------
+ See Also
+ --------
prun : run code using the Python profiler (:magic:`prun`)
- """
- warn("%profile is now deprecated. Please use get_ipython().profile instead.")
- from IPython.core.application import BaseIPythonApplication
- if BaseIPythonApplication.initialized():
- print(BaseIPythonApplication.instance().profile)
- else:
- error("profile is an application-level value, but you don't appear to be in an IPython application")
-
- @line_magic
- def pprint(self, parameter_s=''):
- """Toggle pretty printing on/off."""
- ptformatter = self.shell.display_formatter.formatters['text/plain']
- ptformatter.pprint = bool(1 - ptformatter.pprint)
- print('Pretty printing has been turned',
- ['OFF','ON'][ptformatter.pprint])
-
- @line_magic
- def colors(self, parameter_s=''):
- """Switch color scheme for prompts, info system and exception handlers.
-
- Currently implemented schemes: NoColor, Linux, LightBG.
-
- Color scheme names are not case-sensitive.
-
- Examples
- --------
- To get a plain black and white terminal::
-
- %colors nocolor
- """
- def color_switch_err(name):
- warn('Error changing %s color schemes.\n%s' %
+ """
+ warn("%profile is now deprecated. Please use get_ipython().profile instead.")
+ from IPython.core.application import BaseIPythonApplication
+ if BaseIPythonApplication.initialized():
+ print(BaseIPythonApplication.instance().profile)
+ else:
+ error("profile is an application-level value, but you don't appear to be in an IPython application")
+
+ @line_magic
+ def pprint(self, parameter_s=''):
+ """Toggle pretty printing on/off."""
+ ptformatter = self.shell.display_formatter.formatters['text/plain']
+ ptformatter.pprint = bool(1 - ptformatter.pprint)
+ print('Pretty printing has been turned',
+ ['OFF','ON'][ptformatter.pprint])
+
+ @line_magic
+ def colors(self, parameter_s=''):
+ """Switch color scheme for prompts, info system and exception handlers.
+
+ Currently implemented schemes: NoColor, Linux, LightBG.
+
+ Color scheme names are not case-sensitive.
+
+ Examples
+ --------
+ To get a plain black and white terminal::
+
+ %colors nocolor
+ """
+ def color_switch_err(name):
+ warn('Error changing %s color schemes.\n%s' %
(name, sys.exc_info()[1]), stacklevel=2)
-
-
- new_scheme = parameter_s.strip()
- if not new_scheme:
- raise UsageError(
- "%colors: you must specify a color scheme. See '%colors?'")
- # local shortcut
- shell = self.shell
-
+
+
+ new_scheme = parameter_s.strip()
+ if not new_scheme:
+ raise UsageError(
+ "%colors: you must specify a color scheme. See '%colors?'")
+ # local shortcut
+ shell = self.shell
+
# Set shell colour scheme
- try:
+ try:
shell.colors = new_scheme
shell.refresh_style()
- except:
+ except:
color_switch_err('shell')
- # Set exception colors
- try:
- shell.InteractiveTB.set_colors(scheme = new_scheme)
- shell.SyntaxTB.set_colors(scheme = new_scheme)
- except:
- color_switch_err('exception')
-
- # Set info (for 'object?') colors
- if shell.color_info:
- try:
- shell.inspector.set_active_scheme(new_scheme)
- except:
- color_switch_err('object inspector')
- else:
- shell.inspector.set_active_scheme('NoColor')
-
- @line_magic
- def xmode(self, parameter_s=''):
- """Switch modes for the exception handlers.
-
- Valid modes: Plain, Context and Verbose.
-
- If called without arguments, acts as a toggle."""
-
- def xmode_switch_err(name):
- warn('Error changing %s exception modes.\n%s' %
- (name,sys.exc_info()[1]))
-
- shell = self.shell
- new_mode = parameter_s.strip().capitalize()
- try:
- shell.InteractiveTB.set_mode(mode=new_mode)
- print('Exception reporting mode:',shell.InteractiveTB.mode)
- except:
- xmode_switch_err('user')
-
- @line_magic
- def quickref(self,arg):
- """ Show a quick reference sheet """
- from IPython.core.usage import quick_reference
- qr = quick_reference + self._magic_docs(brief=True)
- page.page(qr)
-
- @line_magic
- def doctest_mode(self, parameter_s=''):
- """Toggle doctest mode on and off.
-
- This mode is intended to make IPython behave as much as possible like a
- plain Python shell, from the perspective of how its prompts, exceptions
- and output look. This makes it easy to copy and paste parts of a
- session into doctests. It does so by:
-
- - Changing the prompts to the classic ``>>>`` ones.
- - Changing the exception reporting mode to 'Plain'.
- - Disabling pretty-printing of output.
-
- Note that IPython also supports the pasting of code snippets that have
- leading '>>>' and '...' prompts in them. This means that you can paste
- doctests from files or docstrings (even if they have leading
- whitespace), and the code will execute correctly. You can then use
- '%history -t' to see the translated history; this will give you the
- input after removal of all the leading prompts and whitespace, which
- can be pasted back into an editor.
-
- With these features, you can switch into this mode easily whenever you
- need to do testing and changes to doctests, without having to leave
- your existing IPython session.
- """
-
- # Shorthands
- shell = self.shell
- meta = shell.meta
- disp_formatter = self.shell.display_formatter
- ptformatter = disp_formatter.formatters['text/plain']
- # dstore is a data store kept in the instance metadata bag to track any
- # changes we make, so we can undo them later.
- dstore = meta.setdefault('doctest_mode',Struct())
- save_dstore = dstore.setdefault
-
- # save a few values we'll need to recover later
- mode = save_dstore('mode',False)
- save_dstore('rc_pprint',ptformatter.pprint)
- save_dstore('xmode',shell.InteractiveTB.mode)
- save_dstore('rc_separate_out',shell.separate_out)
- save_dstore('rc_separate_out2',shell.separate_out2)
- save_dstore('rc_separate_in',shell.separate_in)
- save_dstore('rc_active_types',disp_formatter.active_types)
-
- if not mode:
- # turn on
-
- # Prompt separators like plain python
- shell.separate_in = ''
- shell.separate_out = ''
- shell.separate_out2 = ''
-
-
- ptformatter.pprint = False
- disp_formatter.active_types = ['text/plain']
-
- shell.magic('xmode Plain')
- else:
- # turn off
- shell.separate_in = dstore.rc_separate_in
-
- shell.separate_out = dstore.rc_separate_out
- shell.separate_out2 = dstore.rc_separate_out2
-
- ptformatter.pprint = dstore.rc_pprint
- disp_formatter.active_types = dstore.rc_active_types
-
- shell.magic('xmode ' + dstore.xmode)
-
+ # Set exception colors
+ try:
+ shell.InteractiveTB.set_colors(scheme = new_scheme)
+ shell.SyntaxTB.set_colors(scheme = new_scheme)
+ except:
+ color_switch_err('exception')
+
+ # Set info (for 'object?') colors
+ if shell.color_info:
+ try:
+ shell.inspector.set_active_scheme(new_scheme)
+ except:
+ color_switch_err('object inspector')
+ else:
+ shell.inspector.set_active_scheme('NoColor')
+
+ @line_magic
+ def xmode(self, parameter_s=''):
+ """Switch modes for the exception handlers.
+
+ Valid modes: Plain, Context and Verbose.
+
+ If called without arguments, acts as a toggle."""
+
+ def xmode_switch_err(name):
+ warn('Error changing %s exception modes.\n%s' %
+ (name,sys.exc_info()[1]))
+
+ shell = self.shell
+ new_mode = parameter_s.strip().capitalize()
+ try:
+ shell.InteractiveTB.set_mode(mode=new_mode)
+ print('Exception reporting mode:',shell.InteractiveTB.mode)
+ except:
+ xmode_switch_err('user')
+
+ @line_magic
+ def quickref(self,arg):
+ """ Show a quick reference sheet """
+ from IPython.core.usage import quick_reference
+ qr = quick_reference + self._magic_docs(brief=True)
+ page.page(qr)
+
+ @line_magic
+ def doctest_mode(self, parameter_s=''):
+ """Toggle doctest mode on and off.
+
+ This mode is intended to make IPython behave as much as possible like a
+ plain Python shell, from the perspective of how its prompts, exceptions
+ and output look. This makes it easy to copy and paste parts of a
+ session into doctests. It does so by:
+
+ - Changing the prompts to the classic ``>>>`` ones.
+ - Changing the exception reporting mode to 'Plain'.
+ - Disabling pretty-printing of output.
+
+ Note that IPython also supports the pasting of code snippets that have
+ leading '>>>' and '...' prompts in them. This means that you can paste
+ doctests from files or docstrings (even if they have leading
+ whitespace), and the code will execute correctly. You can then use
+ '%history -t' to see the translated history; this will give you the
+ input after removal of all the leading prompts and whitespace, which
+ can be pasted back into an editor.
+
+ With these features, you can switch into this mode easily whenever you
+ need to do testing and changes to doctests, without having to leave
+ your existing IPython session.
+ """
+
+ # Shorthands
+ shell = self.shell
+ meta = shell.meta
+ disp_formatter = self.shell.display_formatter
+ ptformatter = disp_formatter.formatters['text/plain']
+ # dstore is a data store kept in the instance metadata bag to track any
+ # changes we make, so we can undo them later.
+ dstore = meta.setdefault('doctest_mode',Struct())
+ save_dstore = dstore.setdefault
+
+ # save a few values we'll need to recover later
+ mode = save_dstore('mode',False)
+ save_dstore('rc_pprint',ptformatter.pprint)
+ save_dstore('xmode',shell.InteractiveTB.mode)
+ save_dstore('rc_separate_out',shell.separate_out)
+ save_dstore('rc_separate_out2',shell.separate_out2)
+ save_dstore('rc_separate_in',shell.separate_in)
+ save_dstore('rc_active_types',disp_formatter.active_types)
+
+ if not mode:
+ # turn on
+
+ # Prompt separators like plain python
+ shell.separate_in = ''
+ shell.separate_out = ''
+ shell.separate_out2 = ''
+
+
+ ptformatter.pprint = False
+ disp_formatter.active_types = ['text/plain']
+
+ shell.magic('xmode Plain')
+ else:
+ # turn off
+ shell.separate_in = dstore.rc_separate_in
+
+ shell.separate_out = dstore.rc_separate_out
+ shell.separate_out2 = dstore.rc_separate_out2
+
+ ptformatter.pprint = dstore.rc_pprint
+ disp_formatter.active_types = dstore.rc_active_types
+
+ shell.magic('xmode ' + dstore.xmode)
+
# mode here is the state before we switch; switch_doctest_mode takes
# the mode we're switching to.
shell.switch_doctest_mode(not mode)
- # Store new mode and inform
+ # Store new mode and inform
dstore.mode = bool(not mode)
- mode_label = ['OFF','ON'][dstore.mode]
- print('Doctest mode is:', mode_label)
-
- @line_magic
- def gui(self, parameter_s=''):
- """Enable or disable IPython GUI event loop integration.
-
- %gui [GUINAME]
-
- This magic replaces IPython's threaded shells that were activated
- using the (pylab/wthread/etc.) command line flags. GUI toolkits
- can now be enabled at runtime and keyboard
- interrupts should work without any problems. The following toolkits
- are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
-
- %gui wx # enable wxPython event loop integration
- %gui qt4|qt # enable PyQt4 event loop integration
- %gui qt5 # enable PyQt5 event loop integration
- %gui gtk # enable PyGTK event loop integration
- %gui gtk3 # enable Gtk3 event loop integration
- %gui tk # enable Tk event loop integration
- %gui osx # enable Cocoa event loop integration
- # (requires %matplotlib 1.1)
- %gui # disable all event loop integration
-
- WARNING: after any of these has been called you can simply create
- an application object, but DO NOT start the event loop yourself, as
- we have already handled that.
- """
- opts, arg = self.parse_options(parameter_s, '')
- if arg=='': arg = None
- try:
- return self.shell.enable_gui(arg)
- except Exception as e:
- # print simple error message, rather than traceback if we can't
- # hook up the GUI
- error(str(e))
-
- @skip_doctest
- @line_magic
- def precision(self, s=''):
- """Set floating point precision for pretty printing.
-
- Can set either integer precision or a format string.
-
- If numpy has been imported and precision is an int,
- numpy display precision will also be set, via ``numpy.set_printoptions``.
-
- If no argument is given, defaults will be restored.
-
- Examples
- --------
- ::
-
- In [1]: from math import pi
-
- In [2]: %precision 3
- Out[2]: u'%.3f'
-
- In [3]: pi
- Out[3]: 3.142
-
- In [4]: %precision %i
- Out[4]: u'%i'
-
- In [5]: pi
- Out[5]: 3
-
- In [6]: %precision %e
- Out[6]: u'%e'
-
- In [7]: pi**10
- Out[7]: 9.364805e+04
-
- In [8]: %precision
- Out[8]: u'%r'
-
- In [9]: pi**10
- Out[9]: 93648.047476082982
- """
- ptformatter = self.shell.display_formatter.formatters['text/plain']
- ptformatter.float_precision = s
- return ptformatter.float_format
-
- @magic_arguments.magic_arguments()
- @magic_arguments.argument(
- '-e', '--export', action='store_true', default=False,
+ mode_label = ['OFF','ON'][dstore.mode]
+ print('Doctest mode is:', mode_label)
+
+ @line_magic
+ def gui(self, parameter_s=''):
+ """Enable or disable IPython GUI event loop integration.
+
+ %gui [GUINAME]
+
+ This magic replaces IPython's threaded shells that were activated
+ using the (pylab/wthread/etc.) command line flags. GUI toolkits
+ can now be enabled at runtime and keyboard
+ interrupts should work without any problems. The following toolkits
+ are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
+
+ %gui wx # enable wxPython event loop integration
+ %gui qt4|qt # enable PyQt4 event loop integration
+ %gui qt5 # enable PyQt5 event loop integration
+ %gui gtk # enable PyGTK event loop integration
+ %gui gtk3 # enable Gtk3 event loop integration
+ %gui tk # enable Tk event loop integration
+ %gui osx # enable Cocoa event loop integration
+ # (requires %matplotlib 1.1)
+ %gui # disable all event loop integration
+
+ WARNING: after any of these has been called you can simply create
+ an application object, but DO NOT start the event loop yourself, as
+ we have already handled that.
+ """
+ opts, arg = self.parse_options(parameter_s, '')
+ if arg=='': arg = None
+ try:
+ return self.shell.enable_gui(arg)
+ except Exception as e:
+ # print simple error message, rather than traceback if we can't
+ # hook up the GUI
+ error(str(e))
+
+ @skip_doctest
+ @line_magic
+ def precision(self, s=''):
+ """Set floating point precision for pretty printing.
+
+ Can set either integer precision or a format string.
+
+ If numpy has been imported and precision is an int,
+ numpy display precision will also be set, via ``numpy.set_printoptions``.
+
+ If no argument is given, defaults will be restored.
+
+ Examples
+ --------
+ ::
+
+ In [1]: from math import pi
+
+ In [2]: %precision 3
+ Out[2]: u'%.3f'
+
+ In [3]: pi
+ Out[3]: 3.142
+
+ In [4]: %precision %i
+ Out[4]: u'%i'
+
+ In [5]: pi
+ Out[5]: 3
+
+ In [6]: %precision %e
+ Out[6]: u'%e'
+
+ In [7]: pi**10
+ Out[7]: 9.364805e+04
+
+ In [8]: %precision
+ Out[8]: u'%r'
+
+ In [9]: pi**10
+ Out[9]: 93648.047476082982
+ """
+ ptformatter = self.shell.display_formatter.formatters['text/plain']
+ ptformatter.float_precision = s
+ return ptformatter.float_format
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '-e', '--export', action='store_true', default=False,
help=argparse.SUPPRESS
- )
- @magic_arguments.argument(
- 'filename', type=unicode_type,
- help='Notebook name or filename'
- )
- @line_magic
- def notebook(self, s):
- """Export and convert IPython notebooks.
-
- This function can export the current IPython history to a notebook file.
+ )
+ @magic_arguments.argument(
+ 'filename', type=unicode_type,
+ help='Notebook name or filename'
+ )
+ @line_magic
+ def notebook(self, s):
+ """Export and convert IPython notebooks.
+
+ This function can export the current IPython history to a notebook file.
For example, to export the history to "foo.ipynb" do "%notebook foo.ipynb".
The -e or --export flag is deprecated in IPython 5.2, and will be
removed in the future.
- """
- args = magic_arguments.parse_argstring(self.notebook, s)
-
- from nbformat import write, v4
+ """
+ args = magic_arguments.parse_argstring(self.notebook, s)
+
+ from nbformat import write, v4
cells = []
hist = list(self.shell.history_manager.get_range())
diff --git a/contrib/python/ipython/py2/IPython/core/magics/code.py b/contrib/python/ipython/py2/IPython/core/magics/code.py
index 4f17cda0c0..4c1a40f197 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/code.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/code.py
@@ -1,146 +1,146 @@
-"""Implementation of code management magic functions.
-"""
-from __future__ import print_function
+"""Implementation of code management magic functions.
+"""
+from __future__ import print_function
from __future__ import absolute_import
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012 The IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Stdlib
-import inspect
-import io
-import os
-import re
-import sys
-import ast
-from itertools import chain
-
-# Our own packages
-from IPython.core.error import TryNext, StdinNotImplementedError, UsageError
-from IPython.core.macro import Macro
-from IPython.core.magic import Magics, magics_class, line_magic
-from IPython.core.oinspect import find_file, find_source_lines
-from IPython.testing.skipdoctest import skip_doctest
-from IPython.utils import py3compat
-from IPython.utils.py3compat import string_types
-from IPython.utils.contexts import preserve_keys
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import inspect
+import io
+import os
+import re
+import sys
+import ast
+from itertools import chain
+
+# Our own packages
+from IPython.core.error import TryNext, StdinNotImplementedError, UsageError
+from IPython.core.macro import Macro
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.core.oinspect import find_file, find_source_lines
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import py3compat
+from IPython.utils.py3compat import string_types
+from IPython.utils.contexts import preserve_keys
from IPython.utils.path import get_py_filename
from warnings import warn
from logging import error
-from IPython.utils.text import get_text_list
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-# Used for exception handling in magic_edit
-class MacroToEdit(ValueError): pass
-
-ipython_input_pat = re.compile(r"<ipython\-input\-(\d+)-[a-z\d]+>$")
-
-# To match, e.g. 8-10 1:5 :10 3-
-range_re = re.compile(r"""
-(?P<start>\d+)?
-((?P<sep>[\-:])
- (?P<end>\d+)?)?
-$""", re.VERBOSE)
-
-
-def extract_code_ranges(ranges_str):
- """Turn a string of range for %%load into 2-tuples of (start, stop)
- ready to use as a slice of the content splitted by lines.
-
- Examples
- --------
- list(extract_input_ranges("5-10 2"))
- [(4, 10), (1, 2)]
- """
- for range_str in ranges_str.split():
- rmatch = range_re.match(range_str)
- if not rmatch:
- continue
- sep = rmatch.group("sep")
- start = rmatch.group("start")
- end = rmatch.group("end")
-
- if sep == '-':
- start = int(start) - 1 if start else None
- end = int(end) if end else None
- elif sep == ':':
- start = int(start) - 1 if start else None
- end = int(end) - 1 if end else None
- else:
- end = int(start)
- start = int(start) - 1
- yield (start, end)
-
-
-@skip_doctest
-def extract_symbols(code, symbols):
- """
- Return a tuple (blocks, not_found)
- where ``blocks`` is a list of code fragments
- for each symbol parsed from code, and ``not_found`` are
- symbols not found in the code.
-
- For example::
-
- >>> code = '''a = 10
-
- def b(): return 42
-
- class A: pass'''
-
- >>> extract_symbols(code, 'A,b,z')
- (["class A: pass", "def b(): return 42"], ['z'])
- """
- symbols = symbols.split(',')
-
- # this will raise SyntaxError if code isn't valid Python
- py_code = ast.parse(code)
-
- marks = [(getattr(s, 'name', None), s.lineno) for s in py_code.body]
- code = code.split('\n')
-
- symbols_lines = {}
-
- # we already know the start_lineno of each symbol (marks).
- # To find each end_lineno, we traverse in reverse order until each
- # non-blank line
- end = len(code)
- for name, start in reversed(marks):
- while not code[end - 1].strip():
- end -= 1
- if name:
- symbols_lines[name] = (start - 1, end)
- end = start - 1
-
- # Now symbols_lines is a map
- # {'symbol_name': (start_lineno, end_lineno), ...}
-
- # fill a list with chunks of codes for each requested symbol
- blocks = []
- not_found = []
- for symbol in symbols:
- if symbol in symbols_lines:
- start, end = symbols_lines[symbol]
- blocks.append('\n'.join(code[start:end]) + '\n')
- else:
- not_found.append(symbol)
-
- return blocks, not_found
-
+from IPython.utils.text import get_text_list
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+# Used for exception handling in magic_edit
+class MacroToEdit(ValueError): pass
+
+ipython_input_pat = re.compile(r"<ipython\-input\-(\d+)-[a-z\d]+>$")
+
+# To match, e.g. 8-10 1:5 :10 3-
+range_re = re.compile(r"""
+(?P<start>\d+)?
+((?P<sep>[\-:])
+ (?P<end>\d+)?)?
+$""", re.VERBOSE)
+
+
+def extract_code_ranges(ranges_str):
+ """Turn a string of range for %%load into 2-tuples of (start, stop)
+ ready to use as a slice of the content splitted by lines.
+
+ Examples
+ --------
+ list(extract_input_ranges("5-10 2"))
+ [(4, 10), (1, 2)]
+ """
+ for range_str in ranges_str.split():
+ rmatch = range_re.match(range_str)
+ if not rmatch:
+ continue
+ sep = rmatch.group("sep")
+ start = rmatch.group("start")
+ end = rmatch.group("end")
+
+ if sep == '-':
+ start = int(start) - 1 if start else None
+ end = int(end) if end else None
+ elif sep == ':':
+ start = int(start) - 1 if start else None
+ end = int(end) - 1 if end else None
+ else:
+ end = int(start)
+ start = int(start) - 1
+ yield (start, end)
+
+
+@skip_doctest
+def extract_symbols(code, symbols):
+ """
+ Return a tuple (blocks, not_found)
+ where ``blocks`` is a list of code fragments
+ for each symbol parsed from code, and ``not_found`` are
+ symbols not found in the code.
+
+ For example::
+
+ >>> code = '''a = 10
+
+ def b(): return 42
+
+ class A: pass'''
+
+ >>> extract_symbols(code, 'A,b,z')
+ (["class A: pass", "def b(): return 42"], ['z'])
+ """
+ symbols = symbols.split(',')
+
+ # this will raise SyntaxError if code isn't valid Python
+ py_code = ast.parse(code)
+
+ marks = [(getattr(s, 'name', None), s.lineno) for s in py_code.body]
+ code = code.split('\n')
+
+ symbols_lines = {}
+
+ # we already know the start_lineno of each symbol (marks).
+ # To find each end_lineno, we traverse in reverse order until each
+ # non-blank line
+ end = len(code)
+ for name, start in reversed(marks):
+ while not code[end - 1].strip():
+ end -= 1
+ if name:
+ symbols_lines[name] = (start - 1, end)
+ end = start - 1
+
+ # Now symbols_lines is a map
+ # {'symbol_name': (start_lineno, end_lineno), ...}
+
+ # fill a list with chunks of codes for each requested symbol
+ blocks = []
+ not_found = []
+ for symbol in symbols:
+ if symbol in symbols_lines:
+ start, end = symbols_lines[symbol]
+ blocks.append('\n'.join(code[start:end]) + '\n')
+ else:
+ not_found.append(symbol)
+
+ return blocks, not_found
+
def strip_initial_indent(lines):
"""For %load, strip indent from lines until finding an unindented line.
-
+
https://github.com/ipython/ipython/issues/9775
"""
indent_re = re.compile(r'\s+')
@@ -169,578 +169,578 @@ def strip_initial_indent(lines):
yield line
-class InteractivelyDefined(Exception):
- """Exception for interactively defined variable in magic_edit"""
- def __init__(self, index):
- self.index = index
-
-
-@magics_class
-class CodeMagics(Magics):
- """Magics related to code management (loading, saving, editing, ...)."""
-
- def __init__(self, *args, **kwargs):
- self._knowntemps = set()
- super(CodeMagics, self).__init__(*args, **kwargs)
-
- @line_magic
- def save(self, parameter_s=''):
- """Save a set of lines or a macro to a given filename.
-
- Usage:\\
- %save [options] filename n1-n2 n3-n4 ... n5 .. n6 ...
-
- Options:
-
- -r: use 'raw' input. By default, the 'processed' history is used,
- so that magics are loaded in their transformed version to valid
- Python. If this option is given, the raw input as typed as the
- command line is used instead.
-
- -f: force overwrite. If file exists, %save will prompt for overwrite
- unless -f is given.
-
- -a: append to the file instead of overwriting it.
-
- This function uses the same syntax as %history for input ranges,
- then saves the lines to the filename you specify.
-
- It adds a '.py' extension to the file if you don't do so yourself, and
- it asks for confirmation before overwriting existing files.
-
- If `-r` option is used, the default extension is `.ipy`.
- """
-
- opts,args = self.parse_options(parameter_s,'fra',mode='list')
- if not args:
- raise UsageError('Missing filename.')
- raw = 'r' in opts
- force = 'f' in opts
- append = 'a' in opts
- mode = 'a' if append else 'w'
- ext = u'.ipy' if raw else u'.py'
+class InteractivelyDefined(Exception):
+ """Exception for interactively defined variable in magic_edit"""
+ def __init__(self, index):
+ self.index = index
+
+
+@magics_class
+class CodeMagics(Magics):
+ """Magics related to code management (loading, saving, editing, ...)."""
+
+ def __init__(self, *args, **kwargs):
+ self._knowntemps = set()
+ super(CodeMagics, self).__init__(*args, **kwargs)
+
+ @line_magic
+ def save(self, parameter_s=''):
+ """Save a set of lines or a macro to a given filename.
+
+ Usage:\\
+ %save [options] filename n1-n2 n3-n4 ... n5 .. n6 ...
+
+ Options:
+
+ -r: use 'raw' input. By default, the 'processed' history is used,
+ so that magics are loaded in their transformed version to valid
+ Python. If this option is given, the raw input as typed as the
+ command line is used instead.
+
+ -f: force overwrite. If file exists, %save will prompt for overwrite
+ unless -f is given.
+
+ -a: append to the file instead of overwriting it.
+
+ This function uses the same syntax as %history for input ranges,
+ then saves the lines to the filename you specify.
+
+ It adds a '.py' extension to the file if you don't do so yourself, and
+ it asks for confirmation before overwriting existing files.
+
+ If `-r` option is used, the default extension is `.ipy`.
+ """
+
+ opts,args = self.parse_options(parameter_s,'fra',mode='list')
+ if not args:
+ raise UsageError('Missing filename.')
+ raw = 'r' in opts
+ force = 'f' in opts
+ append = 'a' in opts
+ mode = 'a' if append else 'w'
+ ext = u'.ipy' if raw else u'.py'
fname, codefrom = args[0], " ".join(args[1:])
- if not fname.endswith((u'.py',u'.ipy')):
- fname += ext
- file_exists = os.path.isfile(fname)
- if file_exists and not force and not append:
- try:
- overwrite = self.shell.ask_yes_no('File `%s` exists. Overwrite (y/[N])? ' % fname, default='n')
- except StdinNotImplementedError:
- print("File `%s` exists. Use `%%save -f %s` to force overwrite" % (fname, parameter_s))
- return
- if not overwrite :
- print('Operation cancelled.')
- return
- try:
- cmds = self.shell.find_user_code(codefrom,raw)
- except (TypeError, ValueError) as e:
- print(e.args[0])
- return
- out = py3compat.cast_unicode(cmds)
- with io.open(fname, mode, encoding="utf-8") as f:
- if not file_exists or not append:
- f.write(u"# coding: utf-8\n")
- f.write(out)
- # make sure we end on a newline
- if not out.endswith(u'\n'):
- f.write(u'\n')
- print('The following commands were written to file `%s`:' % fname)
- print(cmds)
-
- @line_magic
- def pastebin(self, parameter_s=''):
- """Upload code to Github's Gist paste bin, returning the URL.
-
- Usage:\\
- %pastebin [-d "Custom description"] 1-7
-
- The argument can be an input history range, a filename, or the name of a
- string or macro.
-
- Options:
-
- -d: Pass a custom description for the gist. The default will say
- "Pasted from IPython".
- """
- opts, args = self.parse_options(parameter_s, 'd:')
-
- try:
- code = self.shell.find_user_code(args)
- except (ValueError, TypeError) as e:
- print(e.args[0])
- return
-
- # Deferred import
- try:
- from urllib.request import urlopen # Py 3
- except ImportError:
- from urllib2 import urlopen
- import json
- post_data = json.dumps({
- "description": opts.get('d', "Pasted from IPython"),
- "public": True,
- "files": {
- "file1.py": {
- "content": code
- }
- }
- }).encode('utf-8')
-
- response = urlopen("https://api.github.com/gists", post_data)
- response_data = json.loads(response.read().decode('utf-8'))
- return response_data['html_url']
-
- @line_magic
- def loadpy(self, arg_s):
- """Alias of `%load`
-
- `%loadpy` has gained some flexibility and dropped the requirement of a `.py`
- extension. So it has been renamed simply into %load. You can look at
- `%load`'s docstring for more info.
- """
- self.load(arg_s)
-
- @line_magic
- def load(self, arg_s):
- """Load code into the current frontend.
-
- Usage:\\
- %load [options] source
-
- where source can be a filename, URL, input history range, macro, or
- element in the user namespace
-
- Options:
-
- -r <lines>: Specify lines or ranges of lines to load from the source.
- Ranges could be specified as x-y (x..y) or in python-style x:y
- (x..(y-1)). Both limits x and y can be left blank (meaning the
- beginning and end of the file, respectively).
-
- -s <symbols>: Specify function or classes to load from python source.
-
- -y : Don't ask confirmation for loading source above 200 000 characters.
-
- -n : Include the user's namespace when searching for source code.
-
- This magic command can either take a local filename, a URL, an history
- range (see %history) or a macro as argument, it will prompt for
- confirmation before loading source with more than 200 000 characters, unless
- -y flag is passed or if the frontend does not support raw_input::
-
- %load myscript.py
- %load 7-27
- %load myMacro
- %load http://www.example.com/myscript.py
- %load -r 5-10 myscript.py
- %load -r 10-20,30,40: foo.py
- %load -s MyClass,wonder_function myscript.py
- %load -n MyClass
- %load -n my_module.wonder_function
- """
- opts,args = self.parse_options(arg_s,'yns:r:')
-
- if not args:
- raise UsageError('Missing filename, URL, input history range, '
- 'macro, or element in the user namespace.')
-
- search_ns = 'n' in opts
-
- contents = self.shell.find_user_code(args, search_ns=search_ns)
-
- if 's' in opts:
- try:
- blocks, not_found = extract_symbols(contents, opts['s'])
- except SyntaxError:
- # non python code
- error("Unable to parse the input as valid Python code")
- return
-
- if len(not_found) == 1:
- warn('The symbol `%s` was not found' % not_found[0])
- elif len(not_found) > 1:
- warn('The symbols %s were not found' % get_text_list(not_found,
- wrap_item_with='`')
- )
-
- contents = '\n'.join(blocks)
-
- if 'r' in opts:
- ranges = opts['r'].replace(',', ' ')
- lines = contents.split('\n')
- slices = extract_code_ranges(ranges)
- contents = [lines[slice(*slc)] for slc in slices]
+ if not fname.endswith((u'.py',u'.ipy')):
+ fname += ext
+ file_exists = os.path.isfile(fname)
+ if file_exists and not force and not append:
+ try:
+ overwrite = self.shell.ask_yes_no('File `%s` exists. Overwrite (y/[N])? ' % fname, default='n')
+ except StdinNotImplementedError:
+ print("File `%s` exists. Use `%%save -f %s` to force overwrite" % (fname, parameter_s))
+ return
+ if not overwrite :
+ print('Operation cancelled.')
+ return
+ try:
+ cmds = self.shell.find_user_code(codefrom,raw)
+ except (TypeError, ValueError) as e:
+ print(e.args[0])
+ return
+ out = py3compat.cast_unicode(cmds)
+ with io.open(fname, mode, encoding="utf-8") as f:
+ if not file_exists or not append:
+ f.write(u"# coding: utf-8\n")
+ f.write(out)
+ # make sure we end on a newline
+ if not out.endswith(u'\n'):
+ f.write(u'\n')
+ print('The following commands were written to file `%s`:' % fname)
+ print(cmds)
+
+ @line_magic
+ def pastebin(self, parameter_s=''):
+ """Upload code to Github's Gist paste bin, returning the URL.
+
+ Usage:\\
+ %pastebin [-d "Custom description"] 1-7
+
+ The argument can be an input history range, a filename, or the name of a
+ string or macro.
+
+ Options:
+
+ -d: Pass a custom description for the gist. The default will say
+ "Pasted from IPython".
+ """
+ opts, args = self.parse_options(parameter_s, 'd:')
+
+ try:
+ code = self.shell.find_user_code(args)
+ except (ValueError, TypeError) as e:
+ print(e.args[0])
+ return
+
+ # Deferred import
+ try:
+ from urllib.request import urlopen # Py 3
+ except ImportError:
+ from urllib2 import urlopen
+ import json
+ post_data = json.dumps({
+ "description": opts.get('d', "Pasted from IPython"),
+ "public": True,
+ "files": {
+ "file1.py": {
+ "content": code
+ }
+ }
+ }).encode('utf-8')
+
+ response = urlopen("https://api.github.com/gists", post_data)
+ response_data = json.loads(response.read().decode('utf-8'))
+ return response_data['html_url']
+
+ @line_magic
+ def loadpy(self, arg_s):
+ """Alias of `%load`
+
+ `%loadpy` has gained some flexibility and dropped the requirement of a `.py`
+ extension. So it has been renamed simply into %load. You can look at
+ `%load`'s docstring for more info.
+ """
+ self.load(arg_s)
+
+ @line_magic
+ def load(self, arg_s):
+ """Load code into the current frontend.
+
+ Usage:\\
+ %load [options] source
+
+ where source can be a filename, URL, input history range, macro, or
+ element in the user namespace
+
+ Options:
+
+ -r <lines>: Specify lines or ranges of lines to load from the source.
+ Ranges could be specified as x-y (x..y) or in python-style x:y
+ (x..(y-1)). Both limits x and y can be left blank (meaning the
+ beginning and end of the file, respectively).
+
+ -s <symbols>: Specify function or classes to load from python source.
+
+ -y : Don't ask confirmation for loading source above 200 000 characters.
+
+ -n : Include the user's namespace when searching for source code.
+
+ This magic command can either take a local filename, a URL, an history
+ range (see %history) or a macro as argument, it will prompt for
+ confirmation before loading source with more than 200 000 characters, unless
+ -y flag is passed or if the frontend does not support raw_input::
+
+ %load myscript.py
+ %load 7-27
+ %load myMacro
+ %load http://www.example.com/myscript.py
+ %load -r 5-10 myscript.py
+ %load -r 10-20,30,40: foo.py
+ %load -s MyClass,wonder_function myscript.py
+ %load -n MyClass
+ %load -n my_module.wonder_function
+ """
+ opts,args = self.parse_options(arg_s,'yns:r:')
+
+ if not args:
+ raise UsageError('Missing filename, URL, input history range, '
+ 'macro, or element in the user namespace.')
+
+ search_ns = 'n' in opts
+
+ contents = self.shell.find_user_code(args, search_ns=search_ns)
+
+ if 's' in opts:
+ try:
+ blocks, not_found = extract_symbols(contents, opts['s'])
+ except SyntaxError:
+ # non python code
+ error("Unable to parse the input as valid Python code")
+ return
+
+ if len(not_found) == 1:
+ warn('The symbol `%s` was not found' % not_found[0])
+ elif len(not_found) > 1:
+ warn('The symbols %s were not found' % get_text_list(not_found,
+ wrap_item_with='`')
+ )
+
+ contents = '\n'.join(blocks)
+
+ if 'r' in opts:
+ ranges = opts['r'].replace(',', ' ')
+ lines = contents.split('\n')
+ slices = extract_code_ranges(ranges)
+ contents = [lines[slice(*slc)] for slc in slices]
contents = '\n'.join(strip_initial_indent(chain.from_iterable(contents)))
-
- l = len(contents)
-
- # 200 000 is ~ 2500 full 80 caracter lines
- # so in average, more than 5000 lines
- if l > 200000 and 'y' not in opts:
- try:
- ans = self.shell.ask_yes_no(("The text you're trying to load seems pretty big"\
- " (%d characters). Continue (y/[N]) ?" % l), default='n' )
- except StdinNotImplementedError:
- #asume yes if raw input not implemented
- ans = True
-
- if ans is False :
- print('Operation cancelled.')
- return
-
- contents = "# %load {}\n".format(arg_s) + contents
-
- self.shell.set_next_input(contents, replace=True)
-
- @staticmethod
- def _find_edit_target(shell, args, opts, last_call):
- """Utility method used by magic_edit to find what to edit."""
-
- def make_filename(arg):
- "Make a filename from the given args"
- try:
- filename = get_py_filename(arg)
- except IOError:
- # If it ends with .py but doesn't already exist, assume we want
- # a new file.
- if arg.endswith('.py'):
- filename = arg
- else:
- filename = None
- return filename
-
- # Set a few locals from the options for convenience:
- opts_prev = 'p' in opts
- opts_raw = 'r' in opts
-
- # custom exceptions
- class DataIsObject(Exception): pass
-
- # Default line number value
- lineno = opts.get('n',None)
-
- if opts_prev:
- args = '_%s' % last_call[0]
- if args not in shell.user_ns:
- args = last_call[1]
-
- # by default this is done with temp files, except when the given
- # arg is a filename
- use_temp = True
-
- data = ''
-
- # First, see if the arguments should be a filename.
- filename = make_filename(args)
- if filename:
- use_temp = False
- elif args:
- # Mode where user specifies ranges of lines, like in %macro.
- data = shell.extract_input_lines(args, opts_raw)
- if not data:
- try:
- # Load the parameter given as a variable. If not a string,
- # process it as an object instead (below)
-
- #print '*** args',args,'type',type(args) # dbg
- data = eval(args, shell.user_ns)
- if not isinstance(data, string_types):
- raise DataIsObject
-
- except (NameError,SyntaxError):
- # given argument is not a variable, try as a filename
- filename = make_filename(args)
- if filename is None:
- warn("Argument given (%s) can't be found as a variable "
- "or as a filename." % args)
- return (None, None, None)
- use_temp = False
-
- except DataIsObject:
- # macros have a special edit function
- if isinstance(data, Macro):
- raise MacroToEdit(data)
-
- # For objects, try to edit the file where they are defined
- filename = find_file(data)
- if filename:
- if 'fakemodule' in filename.lower() and \
- inspect.isclass(data):
- # class created by %edit? Try to find source
- # by looking for method definitions instead, the
- # __module__ in those classes is FakeModule.
- attrs = [getattr(data, aname) for aname in dir(data)]
- for attr in attrs:
- if not inspect.ismethod(attr):
- continue
- filename = find_file(attr)
- if filename and \
- 'fakemodule' not in filename.lower():
- # change the attribute to be the edit
- # target instead
- data = attr
- break
-
- m = ipython_input_pat.match(os.path.basename(filename))
- if m:
- raise InteractivelyDefined(int(m.groups()[0]))
-
- datafile = 1
- if filename is None:
- filename = make_filename(args)
- datafile = 1
- if filename is not None:
- # only warn about this if we get a real name
- warn('Could not find file where `%s` is defined.\n'
- 'Opening a file named `%s`' % (args, filename))
- # Now, make sure we can actually read the source (if it was
- # in a temp file it's gone by now).
- if datafile:
- if lineno is None:
- lineno = find_source_lines(data)
- if lineno is None:
- filename = make_filename(args)
- if filename is None:
- warn('The file where `%s` was defined '
- 'cannot be read or found.' % data)
- return (None, None, None)
- use_temp = False
-
- if use_temp:
- filename = shell.mktempfile(data)
- print('IPython will make a temporary file named:',filename)
-
- # use last_call to remember the state of the previous call, but don't
- # let it be clobbered by successive '-p' calls.
- try:
- last_call[0] = shell.displayhook.prompt_count
- if not opts_prev:
- last_call[1] = args
- except:
- pass
-
-
- return filename, lineno, use_temp
-
- def _edit_macro(self,mname,macro):
- """open an editor with the macro data in a file"""
- filename = self.shell.mktempfile(macro.value)
- self.shell.hooks.editor(filename)
-
- # and make a new macro object, to replace the old one
- with open(filename) as mfile:
- mvalue = mfile.read()
- self.shell.user_ns[mname] = Macro(mvalue)
-
- @skip_doctest
- @line_magic
- def edit(self, parameter_s='',last_call=['','']):
- """Bring up an editor and execute the resulting code.
-
- Usage:
- %edit [options] [args]
-
- %edit runs IPython's editor hook. The default version of this hook is
- set to call the editor specified by your $EDITOR environment variable.
- If this isn't found, it will default to vi under Linux/Unix and to
- notepad under Windows. See the end of this docstring for how to change
- the editor hook.
-
- You can also set the value of this editor via the
- ``TerminalInteractiveShell.editor`` option in your configuration file.
- This is useful if you wish to use a different editor from your typical
- default with IPython (and for Windows users who typically don't set
- environment variables).
-
- This command allows you to conveniently edit multi-line code right in
- your IPython session.
-
- If called without arguments, %edit opens up an empty editor with a
- temporary file and will execute the contents of this file when you
- close it (don't forget to save it!).
-
-
- Options:
-
- -n <number>: open the editor at a specified line number. By default,
- the IPython editor hook uses the unix syntax 'editor +N filename', but
- you can configure this by providing your own modified hook if your
- favorite editor supports line-number specifications with a different
- syntax.
-
- -p: this will call the editor with the same data as the previous time
- it was used, regardless of how long ago (in your current session) it
- was.
-
- -r: use 'raw' input. This option only applies to input taken from the
- user's history. By default, the 'processed' history is used, so that
- magics are loaded in their transformed version to valid Python. If
- this option is given, the raw input as typed as the command line is
- used instead. When you exit the editor, it will be executed by
- IPython's own processor.
-
- -x: do not execute the edited code immediately upon exit. This is
- mainly useful if you are editing programs which need to be called with
- command line arguments, which you can then do using %run.
-
-
- Arguments:
-
- If arguments are given, the following possibilities exist:
-
- - If the argument is a filename, IPython will load that into the
- editor. It will execute its contents with execfile() when you exit,
- loading any code in the file into your interactive namespace.
-
- - The arguments are ranges of input history, e.g. "7 ~1/4-6".
- The syntax is the same as in the %history magic.
-
- - If the argument is a string variable, its contents are loaded
- into the editor. You can thus edit any string which contains
- python code (including the result of previous edits).
-
- - If the argument is the name of an object (other than a string),
- IPython will try to locate the file where it was defined and open the
- editor at the point where it is defined. You can use `%edit function`
- to load an editor exactly at the point where 'function' is defined,
- edit it and have the file be executed automatically.
-
- - If the object is a macro (see %macro for details), this opens up your
- specified editor with a temporary file containing the macro's data.
- Upon exit, the macro is reloaded with the contents of the file.
-
- Note: opening at an exact line is only supported under Unix, and some
- editors (like kedit and gedit up to Gnome 2.8) do not understand the
- '+NUMBER' parameter necessary for this feature. Good editors like
- (X)Emacs, vi, jed, pico and joe all do.
-
- After executing your code, %edit will return as output the code you
- typed in the editor (except when it was an existing file). This way
- you can reload the code in further invocations of %edit as a variable,
- via _<NUMBER> or Out[<NUMBER>], where <NUMBER> is the prompt number of
- the output.
-
- Note that %edit is also available through the alias %ed.
-
- This is an example of creating a simple function inside the editor and
- then modifying it. First, start up the editor::
-
- In [1]: edit
- Editing... done. Executing edited code...
- Out[1]: 'def foo():\\n print "foo() was defined in an editing
- session"\\n'
-
- We can then call the function foo()::
-
- In [2]: foo()
- foo() was defined in an editing session
-
- Now we edit foo. IPython automatically loads the editor with the
- (temporary) file where foo() was previously defined::
-
- In [3]: edit foo
- Editing... done. Executing edited code...
-
- And if we call foo() again we get the modified version::
-
- In [4]: foo()
- foo() has now been changed!
-
- Here is an example of how to edit a code snippet successive
- times. First we call the editor::
-
- In [5]: edit
- Editing... done. Executing edited code...
- hello
- Out[5]: "print 'hello'\\n"
-
- Now we call it again with the previous output (stored in _)::
-
- In [6]: edit _
- Editing... done. Executing edited code...
- hello world
- Out[6]: "print 'hello world'\\n"
-
- Now we call it with the output #8 (stored in _8, also as Out[8])::
-
- In [7]: edit _8
- Editing... done. Executing edited code...
- hello again
- Out[7]: "print 'hello again'\\n"
-
-
- Changing the default editor hook:
-
- If you wish to write your own editor hook, you can put it in a
- configuration file which you load at startup time. The default hook
- is defined in the IPython.core.hooks module, and you can use that as a
- starting example for further modifications. That file also has
- general instructions on how to set a new hook for use once you've
- defined it."""
- opts,args = self.parse_options(parameter_s,'prxn:')
-
- try:
- filename, lineno, is_temp = self._find_edit_target(self.shell,
- args, opts, last_call)
- except MacroToEdit as e:
- self._edit_macro(args, e.args[0])
- return
- except InteractivelyDefined as e:
- print("Editing In[%i]" % e.index)
- args = str(e.index)
- filename, lineno, is_temp = self._find_edit_target(self.shell,
- args, opts, last_call)
- if filename is None:
- # nothing was found, warnings have already been issued,
- # just give up.
- return
-
- if is_temp:
- self._knowntemps.add(filename)
- elif (filename in self._knowntemps):
- is_temp = True
-
-
- # do actual editing here
- print('Editing...', end=' ')
- sys.stdout.flush()
- try:
- # Quote filenames that may have spaces in them
- if ' ' in filename:
- filename = "'%s'" % filename
- self.shell.hooks.editor(filename,lineno)
- except TryNext:
- warn('Could not open editor')
- return
-
- # XXX TODO: should this be generalized for all string vars?
- # For now, this is special-cased to blocks created by cpaste
- if args.strip() == 'pasted_block':
- with open(filename, 'r') as f:
- self.shell.user_ns['pasted_block'] = f.read()
-
- if 'x' in opts: # -x prevents actual execution
- print()
- else:
- print('done. Executing edited code...')
- with preserve_keys(self.shell.user_ns, '__file__'):
- if not is_temp:
- self.shell.user_ns['__file__'] = filename
- if 'r' in opts: # Untranslated IPython code
- with open(filename, 'r') as f:
- source = f.read()
- self.shell.run_cell(source, store_history=False)
- else:
- self.shell.safe_execfile(filename, self.shell.user_ns,
- self.shell.user_ns)
-
- if is_temp:
- try:
- return open(filename).read()
- except IOError as msg:
- if msg.filename == filename:
- warn('File not found. Did you forget to save?')
- return
- else:
- self.shell.showtraceback()
+
+ l = len(contents)
+
+ # 200 000 is ~ 2500 full 80 caracter lines
+ # so in average, more than 5000 lines
+ if l > 200000 and 'y' not in opts:
+ try:
+ ans = self.shell.ask_yes_no(("The text you're trying to load seems pretty big"\
+ " (%d characters). Continue (y/[N]) ?" % l), default='n' )
+ except StdinNotImplementedError:
+ #asume yes if raw input not implemented
+ ans = True
+
+ if ans is False :
+ print('Operation cancelled.')
+ return
+
+ contents = "# %load {}\n".format(arg_s) + contents
+
+ self.shell.set_next_input(contents, replace=True)
+
+ @staticmethod
+ def _find_edit_target(shell, args, opts, last_call):
+ """Utility method used by magic_edit to find what to edit."""
+
+ def make_filename(arg):
+ "Make a filename from the given args"
+ try:
+ filename = get_py_filename(arg)
+ except IOError:
+ # If it ends with .py but doesn't already exist, assume we want
+ # a new file.
+ if arg.endswith('.py'):
+ filename = arg
+ else:
+ filename = None
+ return filename
+
+ # Set a few locals from the options for convenience:
+ opts_prev = 'p' in opts
+ opts_raw = 'r' in opts
+
+ # custom exceptions
+ class DataIsObject(Exception): pass
+
+ # Default line number value
+ lineno = opts.get('n',None)
+
+ if opts_prev:
+ args = '_%s' % last_call[0]
+ if args not in shell.user_ns:
+ args = last_call[1]
+
+ # by default this is done with temp files, except when the given
+ # arg is a filename
+ use_temp = True
+
+ data = ''
+
+ # First, see if the arguments should be a filename.
+ filename = make_filename(args)
+ if filename:
+ use_temp = False
+ elif args:
+ # Mode where user specifies ranges of lines, like in %macro.
+ data = shell.extract_input_lines(args, opts_raw)
+ if not data:
+ try:
+ # Load the parameter given as a variable. If not a string,
+ # process it as an object instead (below)
+
+ #print '*** args',args,'type',type(args) # dbg
+ data = eval(args, shell.user_ns)
+ if not isinstance(data, string_types):
+ raise DataIsObject
+
+ except (NameError,SyntaxError):
+ # given argument is not a variable, try as a filename
+ filename = make_filename(args)
+ if filename is None:
+ warn("Argument given (%s) can't be found as a variable "
+ "or as a filename." % args)
+ return (None, None, None)
+ use_temp = False
+
+ except DataIsObject:
+ # macros have a special edit function
+ if isinstance(data, Macro):
+ raise MacroToEdit(data)
+
+ # For objects, try to edit the file where they are defined
+ filename = find_file(data)
+ if filename:
+ if 'fakemodule' in filename.lower() and \
+ inspect.isclass(data):
+ # class created by %edit? Try to find source
+ # by looking for method definitions instead, the
+ # __module__ in those classes is FakeModule.
+ attrs = [getattr(data, aname) for aname in dir(data)]
+ for attr in attrs:
+ if not inspect.ismethod(attr):
+ continue
+ filename = find_file(attr)
+ if filename and \
+ 'fakemodule' not in filename.lower():
+ # change the attribute to be the edit
+ # target instead
+ data = attr
+ break
+
+ m = ipython_input_pat.match(os.path.basename(filename))
+ if m:
+ raise InteractivelyDefined(int(m.groups()[0]))
+
+ datafile = 1
+ if filename is None:
+ filename = make_filename(args)
+ datafile = 1
+ if filename is not None:
+ # only warn about this if we get a real name
+ warn('Could not find file where `%s` is defined.\n'
+ 'Opening a file named `%s`' % (args, filename))
+ # Now, make sure we can actually read the source (if it was
+ # in a temp file it's gone by now).
+ if datafile:
+ if lineno is None:
+ lineno = find_source_lines(data)
+ if lineno is None:
+ filename = make_filename(args)
+ if filename is None:
+ warn('The file where `%s` was defined '
+ 'cannot be read or found.' % data)
+ return (None, None, None)
+ use_temp = False
+
+ if use_temp:
+ filename = shell.mktempfile(data)
+ print('IPython will make a temporary file named:',filename)
+
+ # use last_call to remember the state of the previous call, but don't
+ # let it be clobbered by successive '-p' calls.
+ try:
+ last_call[0] = shell.displayhook.prompt_count
+ if not opts_prev:
+ last_call[1] = args
+ except:
+ pass
+
+
+ return filename, lineno, use_temp
+
+ def _edit_macro(self,mname,macro):
+ """open an editor with the macro data in a file"""
+ filename = self.shell.mktempfile(macro.value)
+ self.shell.hooks.editor(filename)
+
+ # and make a new macro object, to replace the old one
+ with open(filename) as mfile:
+ mvalue = mfile.read()
+ self.shell.user_ns[mname] = Macro(mvalue)
+
+ @skip_doctest
+ @line_magic
+ def edit(self, parameter_s='',last_call=['','']):
+ """Bring up an editor and execute the resulting code.
+
+ Usage:
+ %edit [options] [args]
+
+ %edit runs IPython's editor hook. The default version of this hook is
+ set to call the editor specified by your $EDITOR environment variable.
+ If this isn't found, it will default to vi under Linux/Unix and to
+ notepad under Windows. See the end of this docstring for how to change
+ the editor hook.
+
+ You can also set the value of this editor via the
+ ``TerminalInteractiveShell.editor`` option in your configuration file.
+ This is useful if you wish to use a different editor from your typical
+ default with IPython (and for Windows users who typically don't set
+ environment variables).
+
+ This command allows you to conveniently edit multi-line code right in
+ your IPython session.
+
+ If called without arguments, %edit opens up an empty editor with a
+ temporary file and will execute the contents of this file when you
+ close it (don't forget to save it!).
+
+
+ Options:
+
+ -n <number>: open the editor at a specified line number. By default,
+ the IPython editor hook uses the unix syntax 'editor +N filename', but
+ you can configure this by providing your own modified hook if your
+ favorite editor supports line-number specifications with a different
+ syntax.
+
+ -p: this will call the editor with the same data as the previous time
+ it was used, regardless of how long ago (in your current session) it
+ was.
+
+ -r: use 'raw' input. This option only applies to input taken from the
+ user's history. By default, the 'processed' history is used, so that
+ magics are loaded in their transformed version to valid Python. If
+ this option is given, the raw input as typed as the command line is
+ used instead. When you exit the editor, it will be executed by
+ IPython's own processor.
+
+ -x: do not execute the edited code immediately upon exit. This is
+ mainly useful if you are editing programs which need to be called with
+ command line arguments, which you can then do using %run.
+
+
+ Arguments:
+
+ If arguments are given, the following possibilities exist:
+
+ - If the argument is a filename, IPython will load that into the
+ editor. It will execute its contents with execfile() when you exit,
+ loading any code in the file into your interactive namespace.
+
+ - The arguments are ranges of input history, e.g. "7 ~1/4-6".
+ The syntax is the same as in the %history magic.
+
+ - If the argument is a string variable, its contents are loaded
+ into the editor. You can thus edit any string which contains
+ python code (including the result of previous edits).
+
+ - If the argument is the name of an object (other than a string),
+ IPython will try to locate the file where it was defined and open the
+ editor at the point where it is defined. You can use `%edit function`
+ to load an editor exactly at the point where 'function' is defined,
+ edit it and have the file be executed automatically.
+
+ - If the object is a macro (see %macro for details), this opens up your
+ specified editor with a temporary file containing the macro's data.
+ Upon exit, the macro is reloaded with the contents of the file.
+
+ Note: opening at an exact line is only supported under Unix, and some
+ editors (like kedit and gedit up to Gnome 2.8) do not understand the
+ '+NUMBER' parameter necessary for this feature. Good editors like
+ (X)Emacs, vi, jed, pico and joe all do.
+
+ After executing your code, %edit will return as output the code you
+ typed in the editor (except when it was an existing file). This way
+ you can reload the code in further invocations of %edit as a variable,
+ via _<NUMBER> or Out[<NUMBER>], where <NUMBER> is the prompt number of
+ the output.
+
+ Note that %edit is also available through the alias %ed.
+
+ This is an example of creating a simple function inside the editor and
+ then modifying it. First, start up the editor::
+
+ In [1]: edit
+ Editing... done. Executing edited code...
+ Out[1]: 'def foo():\\n print "foo() was defined in an editing
+ session"\\n'
+
+ We can then call the function foo()::
+
+ In [2]: foo()
+ foo() was defined in an editing session
+
+ Now we edit foo. IPython automatically loads the editor with the
+ (temporary) file where foo() was previously defined::
+
+ In [3]: edit foo
+ Editing... done. Executing edited code...
+
+ And if we call foo() again we get the modified version::
+
+ In [4]: foo()
+ foo() has now been changed!
+
+ Here is an example of how to edit a code snippet successive
+ times. First we call the editor::
+
+ In [5]: edit
+ Editing... done. Executing edited code...
+ hello
+ Out[5]: "print 'hello'\\n"
+
+ Now we call it again with the previous output (stored in _)::
+
+ In [6]: edit _
+ Editing... done. Executing edited code...
+ hello world
+ Out[6]: "print 'hello world'\\n"
+
+ Now we call it with the output #8 (stored in _8, also as Out[8])::
+
+ In [7]: edit _8
+ Editing... done. Executing edited code...
+ hello again
+ Out[7]: "print 'hello again'\\n"
+
+
+ Changing the default editor hook:
+
+ If you wish to write your own editor hook, you can put it in a
+ configuration file which you load at startup time. The default hook
+ is defined in the IPython.core.hooks module, and you can use that as a
+ starting example for further modifications. That file also has
+ general instructions on how to set a new hook for use once you've
+ defined it."""
+ opts,args = self.parse_options(parameter_s,'prxn:')
+
+ try:
+ filename, lineno, is_temp = self._find_edit_target(self.shell,
+ args, opts, last_call)
+ except MacroToEdit as e:
+ self._edit_macro(args, e.args[0])
+ return
+ except InteractivelyDefined as e:
+ print("Editing In[%i]" % e.index)
+ args = str(e.index)
+ filename, lineno, is_temp = self._find_edit_target(self.shell,
+ args, opts, last_call)
+ if filename is None:
+ # nothing was found, warnings have already been issued,
+ # just give up.
+ return
+
+ if is_temp:
+ self._knowntemps.add(filename)
+ elif (filename in self._knowntemps):
+ is_temp = True
+
+
+ # do actual editing here
+ print('Editing...', end=' ')
+ sys.stdout.flush()
+ try:
+ # Quote filenames that may have spaces in them
+ if ' ' in filename:
+ filename = "'%s'" % filename
+ self.shell.hooks.editor(filename,lineno)
+ except TryNext:
+ warn('Could not open editor')
+ return
+
+ # XXX TODO: should this be generalized for all string vars?
+ # For now, this is special-cased to blocks created by cpaste
+ if args.strip() == 'pasted_block':
+ with open(filename, 'r') as f:
+ self.shell.user_ns['pasted_block'] = f.read()
+
+ if 'x' in opts: # -x prevents actual execution
+ print()
+ else:
+ print('done. Executing edited code...')
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ if not is_temp:
+ self.shell.user_ns['__file__'] = filename
+ if 'r' in opts: # Untranslated IPython code
+ with open(filename, 'r') as f:
+ source = f.read()
+ self.shell.run_cell(source, store_history=False)
+ else:
+ self.shell.safe_execfile(filename, self.shell.user_ns,
+ self.shell.user_ns)
+
+ if is_temp:
+ try:
+ return open(filename).read()
+ except IOError as msg:
+ if msg.filename == filename:
+ warn('File not found. Did you forget to save?')
+ return
+ else:
+ self.shell.showtraceback()
diff --git a/contrib/python/ipython/py2/IPython/core/magics/config.py b/contrib/python/ipython/py2/IPython/core/magics/config.py
index f023cf9267..9505697791 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/config.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/config.py
@@ -1,159 +1,159 @@
-"""Implementation of configuration-related magic functions.
-"""
-from __future__ import print_function
+"""Implementation of configuration-related magic functions.
+"""
+from __future__ import print_function
from __future__ import absolute_import
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012 The IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Stdlib
-import re
-
-# Our own packages
-from IPython.core.error import UsageError
-from IPython.core.magic import Magics, magics_class, line_magic
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import re
+
+# Our own packages
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
from logging import error
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-reg = re.compile('^\w+\.\w+$')
-@magics_class
-class ConfigMagics(Magics):
-
- def __init__(self, shell):
- super(ConfigMagics, self).__init__(shell)
- self.configurables = []
-
- @line_magic
- def config(self, s):
- """configure IPython
-
- %config Class[.trait=value]
-
- This magic exposes most of the IPython config system. Any
- Configurable class should be able to be configured with the simple
- line::
-
- %config Class.trait=value
-
- Where `value` will be resolved in the user's namespace, if it is an
- expression or variable name.
-
- Examples
- --------
-
- To see what classes are available for config, pass no arguments::
-
- In [1]: %config
- Available objects for config:
- TerminalInteractiveShell
- HistoryManager
- PrefilterManager
- AliasManager
- IPCompleter
- DisplayFormatter
-
- To view what is configurable on a given class, just pass the class
- name::
-
- In [2]: %config IPCompleter
- IPCompleter options
- -----------------
- IPCompleter.omit__names=<Enum>
- Current: 2
- Choices: (0, 1, 2)
- Instruct the completer to omit private method names
- Specifically, when completing on ``object.<tab>``.
- When 2 [default]: all names that start with '_' will be excluded.
- When 1: all 'magic' names (``__foo__``) will be excluded.
- When 0: nothing will be excluded.
- IPCompleter.merge_completions=<CBool>
- Current: True
- Whether to merge completion results into a single list
- If False, only the completion results from the first non-empty
- completer will be returned.
- IPCompleter.limit_to__all__=<CBool>
- Current: False
- Instruct the completer to use __all__ for the completion
- Specifically, when completing on ``object.<tab>``.
- When True: only those names in obj.__all__ will be included.
- When False [default]: the __all__ attribute is ignored
- IPCompleter.greedy=<CBool>
- Current: False
- Activate greedy completion
- This will enable completion on elements of lists, results of
- function calls, etc., but can be unsafe because the code is
- actually evaluated on TAB.
-
- but the real use is in setting values::
-
- In [3]: %config IPCompleter.greedy = True
-
- and these values are read from the user_ns if they are variables::
-
- In [4]: feeling_greedy=False
-
- In [5]: %config IPCompleter.greedy = feeling_greedy
-
- """
- from traitlets.config.loader import Config
- # some IPython objects are Configurable, but do not yet have
- # any configurable traits. Exclude them from the effects of
- # this magic, as their presence is just noise:
- configurables = [ c for c in self.shell.configurables
- if c.__class__.class_traits(config=True) ]
- classnames = [ c.__class__.__name__ for c in configurables ]
-
- line = s.strip()
- if not line:
- # print available configurable names
- print("Available objects for config:")
- for name in classnames:
- print(" ", name)
- return
- elif line in classnames:
- # `%config TerminalInteractiveShell` will print trait info for
- # TerminalInteractiveShell
- c = configurables[classnames.index(line)]
- cls = c.__class__
- help = cls.class_get_help(c)
- # strip leading '--' from cl-args:
- help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
- print(help)
- return
- elif reg.match(line):
- cls, attr = line.split('.')
- return getattr(configurables[classnames.index(cls)],attr)
- elif '=' not in line:
- msg = "Invalid config statement: %r, "\
- "should be `Class.trait = value`."
-
- ll = line.lower()
- for classname in classnames:
- if ll == classname.lower():
- msg = msg + '\nDid you mean %s (note the case)?' % classname
- break
-
- raise UsageError( msg % line)
-
- # otherwise, assume we are setting configurables.
- # leave quotes on args when splitting, because we want
- # unquoted args to eval in user_ns
- cfg = Config()
- exec("cfg."+line, locals(), self.shell.user_ns)
-
- for configurable in configurables:
- try:
- configurable.update_config(cfg)
- except Exception as e:
- error(e)
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+reg = re.compile('^\w+\.\w+$')
+@magics_class
+class ConfigMagics(Magics):
+
+ def __init__(self, shell):
+ super(ConfigMagics, self).__init__(shell)
+ self.configurables = []
+
+ @line_magic
+ def config(self, s):
+ """configure IPython
+
+ %config Class[.trait=value]
+
+ This magic exposes most of the IPython config system. Any
+ Configurable class should be able to be configured with the simple
+ line::
+
+ %config Class.trait=value
+
+ Where `value` will be resolved in the user's namespace, if it is an
+ expression or variable name.
+
+ Examples
+ --------
+
+ To see what classes are available for config, pass no arguments::
+
+ In [1]: %config
+ Available objects for config:
+ TerminalInteractiveShell
+ HistoryManager
+ PrefilterManager
+ AliasManager
+ IPCompleter
+ DisplayFormatter
+
+ To view what is configurable on a given class, just pass the class
+ name::
+
+ In [2]: %config IPCompleter
+ IPCompleter options
+ -----------------
+ IPCompleter.omit__names=<Enum>
+ Current: 2
+ Choices: (0, 1, 2)
+ Instruct the completer to omit private method names
+ Specifically, when completing on ``object.<tab>``.
+ When 2 [default]: all names that start with '_' will be excluded.
+ When 1: all 'magic' names (``__foo__``) will be excluded.
+ When 0: nothing will be excluded.
+ IPCompleter.merge_completions=<CBool>
+ Current: True
+ Whether to merge completion results into a single list
+ If False, only the completion results from the first non-empty
+ completer will be returned.
+ IPCompleter.limit_to__all__=<CBool>
+ Current: False
+ Instruct the completer to use __all__ for the completion
+ Specifically, when completing on ``object.<tab>``.
+ When True: only those names in obj.__all__ will be included.
+ When False [default]: the __all__ attribute is ignored
+ IPCompleter.greedy=<CBool>
+ Current: False
+ Activate greedy completion
+ This will enable completion on elements of lists, results of
+ function calls, etc., but can be unsafe because the code is
+ actually evaluated on TAB.
+
+ but the real use is in setting values::
+
+ In [3]: %config IPCompleter.greedy = True
+
+ and these values are read from the user_ns if they are variables::
+
+ In [4]: feeling_greedy=False
+
+ In [5]: %config IPCompleter.greedy = feeling_greedy
+
+ """
+ from traitlets.config.loader import Config
+ # some IPython objects are Configurable, but do not yet have
+ # any configurable traits. Exclude them from the effects of
+ # this magic, as their presence is just noise:
+ configurables = [ c for c in self.shell.configurables
+ if c.__class__.class_traits(config=True) ]
+ classnames = [ c.__class__.__name__ for c in configurables ]
+
+ line = s.strip()
+ if not line:
+ # print available configurable names
+ print("Available objects for config:")
+ for name in classnames:
+ print(" ", name)
+ return
+ elif line in classnames:
+ # `%config TerminalInteractiveShell` will print trait info for
+ # TerminalInteractiveShell
+ c = configurables[classnames.index(line)]
+ cls = c.__class__
+ help = cls.class_get_help(c)
+ # strip leading '--' from cl-args:
+ help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
+ print(help)
+ return
+ elif reg.match(line):
+ cls, attr = line.split('.')
+ return getattr(configurables[classnames.index(cls)],attr)
+ elif '=' not in line:
+ msg = "Invalid config statement: %r, "\
+ "should be `Class.trait = value`."
+
+ ll = line.lower()
+ for classname in classnames:
+ if ll == classname.lower():
+ msg = msg + '\nDid you mean %s (note the case)?' % classname
+ break
+
+ raise UsageError( msg % line)
+
+ # otherwise, assume we are setting configurables.
+ # leave quotes on args when splitting, because we want
+ # unquoted args to eval in user_ns
+ cfg = Config()
+ exec("cfg."+line, locals(), self.shell.user_ns)
+
+ for configurable in configurables:
+ try:
+ configurable.update_config(cfg)
+ except Exception as e:
+ error(e)
diff --git a/contrib/python/ipython/py2/IPython/core/magics/display.py b/contrib/python/ipython/py2/IPython/core/magics/display.py
index 156a86b10a..c4a8f44d9a 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/display.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/display.py
@@ -1,36 +1,36 @@
-"""Simple magics for display formats"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012 The IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Our own packages
-from IPython.core.display import display, Javascript, Latex, SVG, HTML
-from IPython.core.magic import (
- Magics, magics_class, cell_magic
-)
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-
-@magics_class
-class DisplayMagics(Magics):
- """Magics for displaying various output types with literals
+"""Simple magics for display formats"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Our own packages
+from IPython.core.display import display, Javascript, Latex, SVG, HTML
+from IPython.core.magic import (
+ Magics, magics_class, cell_magic
+)
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+
+@magics_class
+class DisplayMagics(Magics):
+ """Magics for displaying various output types with literals
Defines javascript/latex/svg/html cell magics for writing
- blocks in those languages, to be rendered in the frontend.
- """
+ blocks in those languages, to be rendered in the frontend.
+ """
- @cell_magic
+ @cell_magic
def js(self, line, cell):
"""Run the cell block of Javascript code
@@ -39,27 +39,27 @@ class DisplayMagics(Magics):
self.javascript(line, cell)
@cell_magic
- def javascript(self, line, cell):
- """Run the cell block of Javascript code"""
- display(Javascript(cell))
+ def javascript(self, line, cell):
+ """Run the cell block of Javascript code"""
+ display(Javascript(cell))
- @cell_magic
- def latex(self, line, cell):
- """Render the cell as a block of latex
+ @cell_magic
+ def latex(self, line, cell):
+ """Render the cell as a block of latex
- The subset of latex which is support depends on the implementation in
+ The subset of latex which is support depends on the implementation in
the client. In the Jupyter Notebook, this magic only renders the subset
of latex defined by MathJax
- [here](https://docs.mathjax.org/en/v2.5-latest/tex.html)."""
- display(Latex(cell))
-
- @cell_magic
- def svg(self, line, cell):
- """Render the cell as an SVG literal"""
- display(SVG(cell))
-
- @cell_magic
- def html(self, line, cell):
- """Render the cell as a block of HTML"""
- display(HTML(cell))
+ [here](https://docs.mathjax.org/en/v2.5-latest/tex.html)."""
+ display(Latex(cell))
+
+ @cell_magic
+ def svg(self, line, cell):
+ """Render the cell as an SVG literal"""
+ display(SVG(cell))
+
+ @cell_magic
+ def html(self, line, cell):
+ """Render the cell as a block of HTML"""
+ display(HTML(cell))
diff --git a/contrib/python/ipython/py2/IPython/core/magics/execution.py b/contrib/python/ipython/py2/IPython/core/magics/execution.py
index a10c3409bc..3734b0cdae 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/execution.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/execution.py
@@ -1,710 +1,710 @@
-# -*- coding: utf-8 -*-
-"""Implementation of execution-related magic functions."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
+# -*- coding: utf-8 -*-
+"""Implementation of execution-related magic functions."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
from __future__ import absolute_import
-
-import ast
-import bdb
-import gc
-import itertools
-import os
-import sys
-import time
-import timeit
-from pdb import Restart
-
-# cProfile was added in Python2.5
-try:
- import cProfile as profile
- import pstats
-except ImportError:
- # profile isn't bundled by default in Debian for license reasons
- try:
- import profile, pstats
- except ImportError:
- profile = pstats = None
-
+
+import ast
+import bdb
+import gc
+import itertools
+import os
+import sys
+import time
+import timeit
+from pdb import Restart
+
+# cProfile was added in Python2.5
+try:
+ import cProfile as profile
+ import pstats
+except ImportError:
+ # profile isn't bundled by default in Debian for license reasons
+ try:
+ import profile, pstats
+ except ImportError:
+ profile = pstats = None
+
from IPython.core import oinspect
-from IPython.core import magic_arguments
-from IPython.core import page
-from IPython.core.error import UsageError
-from IPython.core.macro import Macro
-from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic,
- line_cell_magic, on_off, needs_local_scope)
-from IPython.testing.skipdoctest import skip_doctest
-from IPython.utils import py3compat
-from IPython.utils.py3compat import builtin_mod, iteritems, PY3
-from IPython.utils.contexts import preserve_keys
-from IPython.utils.capture import capture_output
-from IPython.utils.ipstruct import Struct
-from IPython.utils.module_paths import find_mod
+from IPython.core import magic_arguments
+from IPython.core import page
+from IPython.core.error import UsageError
+from IPython.core.macro import Macro
+from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic,
+ line_cell_magic, on_off, needs_local_scope)
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import py3compat
+from IPython.utils.py3compat import builtin_mod, iteritems, PY3
+from IPython.utils.contexts import preserve_keys
+from IPython.utils.capture import capture_output
+from IPython.utils.ipstruct import Struct
+from IPython.utils.module_paths import find_mod
from IPython.utils.path import get_py_filename, shellglob
-from IPython.utils.timing import clock, clock2
+from IPython.utils.timing import clock, clock2
from warnings import warn
from logging import error
-
-if PY3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-
-class TimeitResult(object):
- """
- Object returned by the timeit magic with info about the run.
-
+
+if PY3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+
+class TimeitResult(object):
+ """
+ Object returned by the timeit magic with info about the run.
+
Contains the following attributes :
-
+
loops: (int) number of loops done per measurement
repeat: (int) number of times the measurement has been repeated
best: (float) best execution time / number
all_runs: (list of float) execution time of each run (in s)
- compile_time: (float) time of statement compilation (s)
-
- """
-
- def __init__(self, loops, repeat, best, worst, all_runs, compile_time, precision):
- self.loops = loops
- self.repeat = repeat
- self.best = best
- self.worst = worst
- self.all_runs = all_runs
- self.compile_time = compile_time
- self._precision = precision
-
- def _repr_pretty_(self, p , cycle):
- if self.loops == 1: # No s at "loops" if only one loop
- unic = u"%d loop, best of %d: %s per loop" % (self.loops, self.repeat,
- _format_time(self.best, self._precision))
- else:
- unic = u"%d loops, best of %d: %s per loop" % (self.loops, self.repeat,
- _format_time(self.best, self._precision))
- p.text(u'<TimeitResult : '+unic+u'>')
-
-
-class TimeitTemplateFiller(ast.NodeTransformer):
- """Fill in the AST template for timing execution.
-
- This is quite closely tied to the template definition, which is in
- :meth:`ExecutionMagics.timeit`.
- """
- def __init__(self, ast_setup, ast_stmt):
- self.ast_setup = ast_setup
- self.ast_stmt = ast_stmt
-
- def visit_FunctionDef(self, node):
- "Fill in the setup statement"
- self.generic_visit(node)
- if node.name == "inner":
- node.body[:1] = self.ast_setup.body
-
- return node
-
- def visit_For(self, node):
- "Fill in the statement to be timed"
- if getattr(getattr(node.body[0], 'value', None), 'id', None) == 'stmt':
- node.body = self.ast_stmt.body
- return node
-
-
-class Timer(timeit.Timer):
- """Timer class that explicitly uses self.inner
-
- which is an undocumented implementation detail of CPython,
- not shared by PyPy.
- """
- # Timer.timeit copied from CPython 3.4.2
- def timeit(self, number=timeit.default_number):
- """Time 'number' executions of the main statement.
-
- To be precise, this executes the setup statement once, and
- then returns the time it takes to execute the main statement
- a number of times, as a float measured in seconds. The
- argument is the number of times through the loop, defaulting
- to one million. The main statement, the setup statement and
- the timer function to be used are passed to the constructor.
- """
- it = itertools.repeat(None, number)
- gcold = gc.isenabled()
- gc.disable()
- try:
- timing = self.inner(it, self.timer)
- finally:
- if gcold:
- gc.enable()
- return timing
-
-
-@magics_class
-class ExecutionMagics(Magics):
- """Magics related to code execution, debugging, profiling, etc.
-
- """
-
- def __init__(self, shell):
- super(ExecutionMagics, self).__init__(shell)
- if profile is None:
- self.prun = self.profile_missing_notice
- # Default execution function used to actually run user code.
- self.default_runner = None
-
- def profile_missing_notice(self, *args, **kwargs):
- error("""\
-The profile module could not be found. It has been removed from the standard
-python packages because of its non-free license. To use profiling, install the
-python-profiler package from non-free.""")
-
- @skip_doctest
- @line_cell_magic
- def prun(self, parameter_s='', cell=None):
-
- """Run a statement through the python code profiler.
-
- Usage, in line mode:
- %prun [options] statement
-
- Usage, in cell mode:
- %%prun [options] [statement]
- code...
- code...
-
- In cell mode, the additional code lines are appended to the (possibly
- empty) statement in the first line. Cell mode allows you to easily
- profile multiline blocks without having to put them in a separate
- function.
-
- The given statement (which doesn't require quote marks) is run via the
- python profiler in a manner similar to the profile.run() function.
- Namespaces are internally managed to work correctly; profile.run
- cannot be used in IPython because it makes certain assumptions about
- namespaces which do not hold under IPython.
-
- Options:
-
- -l <limit>
- you can place restrictions on what or how much of the
- profile gets printed. The limit value can be:
-
- * A string: only information for function names containing this string
- is printed.
-
- * An integer: only these many lines are printed.
-
- * A float (between 0 and 1): this fraction of the report is printed
- (for example, use a limit of 0.4 to see the topmost 40% only).
-
- You can combine several limits with repeated use of the option. For
- example, ``-l __init__ -l 5`` will print only the topmost 5 lines of
- information about class constructors.
-
- -r
- return the pstats.Stats object generated by the profiling. This
- object has all the information about the profile in it, and you can
- later use it for further analysis or in other functions.
-
- -s <key>
- sort profile by given key. You can provide more than one key
- by using the option several times: '-s key1 -s key2 -s key3...'. The
- default sorting key is 'time'.
-
- The following is copied verbatim from the profile documentation
- referenced below:
-
- When more than one key is provided, additional keys are used as
- secondary criteria when the there is equality in all keys selected
- before them.
-
- Abbreviations can be used for any key names, as long as the
- abbreviation is unambiguous. The following are the keys currently
- defined:
-
- ============ =====================
- Valid Arg Meaning
- ============ =====================
- "calls" call count
- "cumulative" cumulative time
- "file" file name
- "module" file name
- "pcalls" primitive call count
- "line" line number
- "name" function name
- "nfl" name/file/line
- "stdname" standard name
- "time" internal time
- ============ =====================
-
- Note that all sorts on statistics are in descending order (placing
- most time consuming items first), where as name, file, and line number
- searches are in ascending order (i.e., alphabetical). The subtle
- distinction between "nfl" and "stdname" is that the standard name is a
- sort of the name as printed, which means that the embedded line
- numbers get compared in an odd way. For example, lines 3, 20, and 40
- would (if the file names were the same) appear in the string order
- "20" "3" and "40". In contrast, "nfl" does a numeric compare of the
- line numbers. In fact, sort_stats("nfl") is the same as
- sort_stats("name", "file", "line").
-
- -T <filename>
- save profile results as shown on screen to a text
- file. The profile is still shown on screen.
-
- -D <filename>
- save (via dump_stats) profile statistics to given
- filename. This data is in a format understood by the pstats module, and
- is generated by a call to the dump_stats() method of profile
- objects. The profile is still shown on screen.
-
- -q
- suppress output to the pager. Best used with -T and/or -D above.
-
- If you want to run complete programs under the profiler's control, use
- ``%run -p [prof_opts] filename.py [args to program]`` where prof_opts
- contains profiler specific options as described here.
-
- You can read the complete documentation for the profile module with::
-
- In [1]: import profile; profile.help()
- """
- opts, arg_str = self.parse_options(parameter_s, 'D:l:rs:T:q',
- list_all=True, posix=False)
- if cell is not None:
- arg_str += '\n' + cell
- arg_str = self.shell.input_splitter.transform_cell(arg_str)
- return self._run_with_profiler(arg_str, opts, self.shell.user_ns)
-
- def _run_with_profiler(self, code, opts, namespace):
- """
- Run `code` with profiler. Used by ``%prun`` and ``%run -p``.
-
- Parameters
- ----------
- code : str
- Code to be executed.
- opts : Struct
- Options parsed by `self.parse_options`.
- namespace : dict
- A dictionary for Python namespace (e.g., `self.shell.user_ns`).
-
- """
-
- # Fill default values for unspecified options:
- opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
-
- prof = profile.Profile()
- try:
- prof = prof.runctx(code, namespace, namespace)
- sys_exit = ''
- except SystemExit:
- sys_exit = """*** SystemExit exception caught in code being profiled."""
-
- stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
-
- lims = opts.l
- if lims:
- lims = [] # rebuild lims with ints/floats/strings
- for lim in opts.l:
- try:
- lims.append(int(lim))
- except ValueError:
- try:
- lims.append(float(lim))
- except ValueError:
- lims.append(lim)
-
- # Trap output.
- stdout_trap = StringIO()
- stats_stream = stats.stream
- try:
- stats.stream = stdout_trap
- stats.print_stats(*lims)
- finally:
- stats.stream = stats_stream
-
- output = stdout_trap.getvalue()
- output = output.rstrip()
-
- if 'q' not in opts:
- page.page(output)
- print(sys_exit, end=' ')
-
- dump_file = opts.D[0]
- text_file = opts.T[0]
- if dump_file:
- prof.dump_stats(dump_file)
- print('\n*** Profile stats marshalled to file',\
- repr(dump_file)+'.',sys_exit)
- if text_file:
- pfile = open(text_file,'w')
- pfile.write(output)
- pfile.close()
- print('\n*** Profile printout saved to text file',\
- repr(text_file)+'.',sys_exit)
-
- if 'r' in opts:
- return stats
- else:
- return None
-
- @line_magic
- def pdb(self, parameter_s=''):
- """Control the automatic calling of the pdb interactive debugger.
-
- Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
- argument it works as a toggle.
-
- When an exception is triggered, IPython can optionally call the
- interactive pdb debugger after the traceback printout. %pdb toggles
- this feature on and off.
-
- The initial state of this feature is set in your configuration
- file (the option is ``InteractiveShell.pdb``).
-
- If you want to just activate the debugger AFTER an exception has fired,
- without having to type '%pdb on' and rerunning your code, you can use
- the %debug magic."""
-
- par = parameter_s.strip().lower()
-
- if par:
- try:
- new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
- except KeyError:
- print ('Incorrect argument. Use on/1, off/0, '
- 'or nothing for a toggle.')
- return
- else:
- # toggle
- new_pdb = not self.shell.call_pdb
-
- # set on the shell
- self.shell.call_pdb = new_pdb
- print('Automatic pdb calling has been turned',on_off(new_pdb))
-
- @skip_doctest
- @magic_arguments.magic_arguments()
- @magic_arguments.argument('--breakpoint', '-b', metavar='FILE:LINE',
- help="""
- Set break point at LINE in FILE.
- """
- )
- @magic_arguments.argument('statement', nargs='*',
- help="""
- Code to run in debugger.
- You can omit this in cell magic mode.
- """
- )
- @line_cell_magic
- def debug(self, line='', cell=None):
- """Activate the interactive debugger.
-
- This magic command support two ways of activating debugger.
- One is to activate debugger before executing code. This way, you
- can set a break point, to step through the code from the point.
- You can use this mode by giving statements to execute and optionally
- a breakpoint.
-
- The other one is to activate debugger in post-mortem mode. You can
- activate this mode simply running %debug without any argument.
- If an exception has just occurred, this lets you inspect its stack
- frames interactively. Note that this will always work only on the last
- traceback that occurred, so you must call this quickly after an
- exception that you wish to inspect has fired, because if another one
- occurs, it clobbers the previous one.
-
- If you want IPython to automatically do this on every exception, see
- the %pdb magic for more details.
- """
- args = magic_arguments.parse_argstring(self.debug, line)
-
- if not (args.breakpoint or args.statement or cell):
- self._debug_post_mortem()
- else:
- code = "\n".join(args.statement)
- if cell:
- code += "\n" + cell
- self._debug_exec(code, args.breakpoint)
-
- def _debug_post_mortem(self):
- self.shell.debugger(force=True)
-
- def _debug_exec(self, code, breakpoint):
- if breakpoint:
+ compile_time: (float) time of statement compilation (s)
+
+ """
+
+ def __init__(self, loops, repeat, best, worst, all_runs, compile_time, precision):
+ self.loops = loops
+ self.repeat = repeat
+ self.best = best
+ self.worst = worst
+ self.all_runs = all_runs
+ self.compile_time = compile_time
+ self._precision = precision
+
+ def _repr_pretty_(self, p , cycle):
+ if self.loops == 1: # No s at "loops" if only one loop
+ unic = u"%d loop, best of %d: %s per loop" % (self.loops, self.repeat,
+ _format_time(self.best, self._precision))
+ else:
+ unic = u"%d loops, best of %d: %s per loop" % (self.loops, self.repeat,
+ _format_time(self.best, self._precision))
+ p.text(u'<TimeitResult : '+unic+u'>')
+
+
+class TimeitTemplateFiller(ast.NodeTransformer):
+ """Fill in the AST template for timing execution.
+
+ This is quite closely tied to the template definition, which is in
+ :meth:`ExecutionMagics.timeit`.
+ """
+ def __init__(self, ast_setup, ast_stmt):
+ self.ast_setup = ast_setup
+ self.ast_stmt = ast_stmt
+
+ def visit_FunctionDef(self, node):
+ "Fill in the setup statement"
+ self.generic_visit(node)
+ if node.name == "inner":
+ node.body[:1] = self.ast_setup.body
+
+ return node
+
+ def visit_For(self, node):
+ "Fill in the statement to be timed"
+ if getattr(getattr(node.body[0], 'value', None), 'id', None) == 'stmt':
+ node.body = self.ast_stmt.body
+ return node
+
+
+class Timer(timeit.Timer):
+ """Timer class that explicitly uses self.inner
+
+ which is an undocumented implementation detail of CPython,
+ not shared by PyPy.
+ """
+ # Timer.timeit copied from CPython 3.4.2
+ def timeit(self, number=timeit.default_number):
+ """Time 'number' executions of the main statement.
+
+ To be precise, this executes the setup statement once, and
+ then returns the time it takes to execute the main statement
+ a number of times, as a float measured in seconds. The
+ argument is the number of times through the loop, defaulting
+ to one million. The main statement, the setup statement and
+ the timer function to be used are passed to the constructor.
+ """
+ it = itertools.repeat(None, number)
+ gcold = gc.isenabled()
+ gc.disable()
+ try:
+ timing = self.inner(it, self.timer)
+ finally:
+ if gcold:
+ gc.enable()
+ return timing
+
+
+@magics_class
+class ExecutionMagics(Magics):
+ """Magics related to code execution, debugging, profiling, etc.
+
+ """
+
+ def __init__(self, shell):
+ super(ExecutionMagics, self).__init__(shell)
+ if profile is None:
+ self.prun = self.profile_missing_notice
+ # Default execution function used to actually run user code.
+ self.default_runner = None
+
+ def profile_missing_notice(self, *args, **kwargs):
+ error("""\
+The profile module could not be found. It has been removed from the standard
+python packages because of its non-free license. To use profiling, install the
+python-profiler package from non-free.""")
+
+ @skip_doctest
+ @line_cell_magic
+ def prun(self, parameter_s='', cell=None):
+
+ """Run a statement through the python code profiler.
+
+ Usage, in line mode:
+ %prun [options] statement
+
+ Usage, in cell mode:
+ %%prun [options] [statement]
+ code...
+ code...
+
+ In cell mode, the additional code lines are appended to the (possibly
+ empty) statement in the first line. Cell mode allows you to easily
+ profile multiline blocks without having to put them in a separate
+ function.
+
+ The given statement (which doesn't require quote marks) is run via the
+ python profiler in a manner similar to the profile.run() function.
+ Namespaces are internally managed to work correctly; profile.run
+ cannot be used in IPython because it makes certain assumptions about
+ namespaces which do not hold under IPython.
+
+ Options:
+
+ -l <limit>
+ you can place restrictions on what or how much of the
+ profile gets printed. The limit value can be:
+
+ * A string: only information for function names containing this string
+ is printed.
+
+ * An integer: only these many lines are printed.
+
+ * A float (between 0 and 1): this fraction of the report is printed
+ (for example, use a limit of 0.4 to see the topmost 40% only).
+
+ You can combine several limits with repeated use of the option. For
+ example, ``-l __init__ -l 5`` will print only the topmost 5 lines of
+ information about class constructors.
+
+ -r
+ return the pstats.Stats object generated by the profiling. This
+ object has all the information about the profile in it, and you can
+ later use it for further analysis or in other functions.
+
+ -s <key>
+ sort profile by given key. You can provide more than one key
+ by using the option several times: '-s key1 -s key2 -s key3...'. The
+ default sorting key is 'time'.
+
+ The following is copied verbatim from the profile documentation
+ referenced below:
+
+ When more than one key is provided, additional keys are used as
+ secondary criteria when the there is equality in all keys selected
+ before them.
+
+ Abbreviations can be used for any key names, as long as the
+ abbreviation is unambiguous. The following are the keys currently
+ defined:
+
+ ============ =====================
+ Valid Arg Meaning
+ ============ =====================
+ "calls" call count
+ "cumulative" cumulative time
+ "file" file name
+ "module" file name
+ "pcalls" primitive call count
+ "line" line number
+ "name" function name
+ "nfl" name/file/line
+ "stdname" standard name
+ "time" internal time
+ ============ =====================
+
+ Note that all sorts on statistics are in descending order (placing
+ most time consuming items first), where as name, file, and line number
+ searches are in ascending order (i.e., alphabetical). The subtle
+ distinction between "nfl" and "stdname" is that the standard name is a
+ sort of the name as printed, which means that the embedded line
+ numbers get compared in an odd way. For example, lines 3, 20, and 40
+ would (if the file names were the same) appear in the string order
+ "20" "3" and "40". In contrast, "nfl" does a numeric compare of the
+ line numbers. In fact, sort_stats("nfl") is the same as
+ sort_stats("name", "file", "line").
+
+ -T <filename>
+ save profile results as shown on screen to a text
+ file. The profile is still shown on screen.
+
+ -D <filename>
+ save (via dump_stats) profile statistics to given
+ filename. This data is in a format understood by the pstats module, and
+ is generated by a call to the dump_stats() method of profile
+ objects. The profile is still shown on screen.
+
+ -q
+ suppress output to the pager. Best used with -T and/or -D above.
+
+ If you want to run complete programs under the profiler's control, use
+ ``%run -p [prof_opts] filename.py [args to program]`` where prof_opts
+ contains profiler specific options as described here.
+
+ You can read the complete documentation for the profile module with::
+
+ In [1]: import profile; profile.help()
+ """
+ opts, arg_str = self.parse_options(parameter_s, 'D:l:rs:T:q',
+ list_all=True, posix=False)
+ if cell is not None:
+ arg_str += '\n' + cell
+ arg_str = self.shell.input_splitter.transform_cell(arg_str)
+ return self._run_with_profiler(arg_str, opts, self.shell.user_ns)
+
+ def _run_with_profiler(self, code, opts, namespace):
+ """
+ Run `code` with profiler. Used by ``%prun`` and ``%run -p``.
+
+ Parameters
+ ----------
+ code : str
+ Code to be executed.
+ opts : Struct
+ Options parsed by `self.parse_options`.
+ namespace : dict
+ A dictionary for Python namespace (e.g., `self.shell.user_ns`).
+
+ """
+
+ # Fill default values for unspecified options:
+ opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
+
+ prof = profile.Profile()
+ try:
+ prof = prof.runctx(code, namespace, namespace)
+ sys_exit = ''
+ except SystemExit:
+ sys_exit = """*** SystemExit exception caught in code being profiled."""
+
+ stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
+
+ lims = opts.l
+ if lims:
+ lims = [] # rebuild lims with ints/floats/strings
+ for lim in opts.l:
+ try:
+ lims.append(int(lim))
+ except ValueError:
+ try:
+ lims.append(float(lim))
+ except ValueError:
+ lims.append(lim)
+
+ # Trap output.
+ stdout_trap = StringIO()
+ stats_stream = stats.stream
+ try:
+ stats.stream = stdout_trap
+ stats.print_stats(*lims)
+ finally:
+ stats.stream = stats_stream
+
+ output = stdout_trap.getvalue()
+ output = output.rstrip()
+
+ if 'q' not in opts:
+ page.page(output)
+ print(sys_exit, end=' ')
+
+ dump_file = opts.D[0]
+ text_file = opts.T[0]
+ if dump_file:
+ prof.dump_stats(dump_file)
+ print('\n*** Profile stats marshalled to file',\
+ repr(dump_file)+'.',sys_exit)
+ if text_file:
+ pfile = open(text_file,'w')
+ pfile.write(output)
+ pfile.close()
+ print('\n*** Profile printout saved to text file',\
+ repr(text_file)+'.',sys_exit)
+
+ if 'r' in opts:
+ return stats
+ else:
+ return None
+
+ @line_magic
+ def pdb(self, parameter_s=''):
+ """Control the automatic calling of the pdb interactive debugger.
+
+ Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
+ argument it works as a toggle.
+
+ When an exception is triggered, IPython can optionally call the
+ interactive pdb debugger after the traceback printout. %pdb toggles
+ this feature on and off.
+
+ The initial state of this feature is set in your configuration
+ file (the option is ``InteractiveShell.pdb``).
+
+ If you want to just activate the debugger AFTER an exception has fired,
+ without having to type '%pdb on' and rerunning your code, you can use
+ the %debug magic."""
+
+ par = parameter_s.strip().lower()
+
+ if par:
+ try:
+ new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
+ except KeyError:
+ print ('Incorrect argument. Use on/1, off/0, '
+ 'or nothing for a toggle.')
+ return
+ else:
+ # toggle
+ new_pdb = not self.shell.call_pdb
+
+ # set on the shell
+ self.shell.call_pdb = new_pdb
+ print('Automatic pdb calling has been turned',on_off(new_pdb))
+
+ @skip_doctest
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument('--breakpoint', '-b', metavar='FILE:LINE',
+ help="""
+ Set break point at LINE in FILE.
+ """
+ )
+ @magic_arguments.argument('statement', nargs='*',
+ help="""
+ Code to run in debugger.
+ You can omit this in cell magic mode.
+ """
+ )
+ @line_cell_magic
+ def debug(self, line='', cell=None):
+ """Activate the interactive debugger.
+
+ This magic command support two ways of activating debugger.
+ One is to activate debugger before executing code. This way, you
+ can set a break point, to step through the code from the point.
+ You can use this mode by giving statements to execute and optionally
+ a breakpoint.
+
+ The other one is to activate debugger in post-mortem mode. You can
+ activate this mode simply running %debug without any argument.
+ If an exception has just occurred, this lets you inspect its stack
+ frames interactively. Note that this will always work only on the last
+ traceback that occurred, so you must call this quickly after an
+ exception that you wish to inspect has fired, because if another one
+ occurs, it clobbers the previous one.
+
+ If you want IPython to automatically do this on every exception, see
+ the %pdb magic for more details.
+ """
+ args = magic_arguments.parse_argstring(self.debug, line)
+
+ if not (args.breakpoint or args.statement or cell):
+ self._debug_post_mortem()
+ else:
+ code = "\n".join(args.statement)
+ if cell:
+ code += "\n" + cell
+ self._debug_exec(code, args.breakpoint)
+
+ def _debug_post_mortem(self):
+ self.shell.debugger(force=True)
+
+ def _debug_exec(self, code, breakpoint):
+ if breakpoint:
(filename, bp_line) = breakpoint.rsplit(':', 1)
- bp_line = int(bp_line)
- else:
- (filename, bp_line) = (None, None)
- self._run_with_debugger(code, self.shell.user_ns, filename, bp_line)
-
- @line_magic
- def tb(self, s):
- """Print the last traceback with the currently active exception mode.
-
- See %xmode for changing exception reporting modes."""
- self.shell.showtraceback()
-
- @skip_doctest
- @line_magic
- def run(self, parameter_s='', runner=None,
- file_finder=get_py_filename):
- """Run the named file inside IPython as a program.
-
- Usage::
-
- %run [-n -i -e -G]
- [( -t [-N<N>] | -d [-b<N>] | -p [profile options] )]
- ( -m mod | file ) [args]
-
- Parameters after the filename are passed as command-line arguments to
- the program (put in sys.argv). Then, control returns to IPython's
- prompt.
-
- This is similar to running at a system prompt ``python file args``,
- but with the advantage of giving you IPython's tracebacks, and of
- loading all variables into your interactive namespace for further use
- (unless -p is used, see below).
-
- The file is executed in a namespace initially consisting only of
- ``__name__=='__main__'`` and sys.argv constructed as indicated. It thus
- sees its environment as if it were being run as a stand-alone program
- (except for sharing global objects such as previously imported
- modules). But after execution, the IPython interactive namespace gets
- updated with all variables defined in the program (except for __name__
- and sys.argv). This allows for very convenient loading of code for
- interactive work, while giving each program a 'clean sheet' to run in.
-
- Arguments are expanded using shell-like glob match. Patterns
- '*', '?', '[seq]' and '[!seq]' can be used. Additionally,
- tilde '~' will be expanded into user's home directory. Unlike
- real shells, quotation does not suppress expansions. Use
- *two* back slashes (e.g. ``\\\\*``) to suppress expansions.
- To completely disable these expansions, you can use -G flag.
-
- Options:
-
- -n
- __name__ is NOT set to '__main__', but to the running file's name
- without extension (as python does under import). This allows running
- scripts and reloading the definitions in them without calling code
- protected by an ``if __name__ == "__main__"`` clause.
-
- -i
- run the file in IPython's namespace instead of an empty one. This
- is useful if you are experimenting with code written in a text editor
- which depends on variables defined interactively.
-
- -e
- ignore sys.exit() calls or SystemExit exceptions in the script
- being run. This is particularly useful if IPython is being used to
- run unittests, which always exit with a sys.exit() call. In such
- cases you are interested in the output of the test results, not in
- seeing a traceback of the unittest module.
-
- -t
- print timing information at the end of the run. IPython will give
- you an estimated CPU time consumption for your script, which under
- Unix uses the resource module to avoid the wraparound problems of
- time.clock(). Under Unix, an estimate of time spent on system tasks
- is also given (for Windows platforms this is reported as 0.0).
-
- If -t is given, an additional ``-N<N>`` option can be given, where <N>
- must be an integer indicating how many times you want the script to
- run. The final timing report will include total and per run results.
-
- For example (testing the script uniq_stable.py)::
-
- In [1]: run -t uniq_stable
-
- IPython CPU timings (estimated):
- User : 0.19597 s.
- System: 0.0 s.
-
- In [2]: run -t -N5 uniq_stable
-
- IPython CPU timings (estimated):
- Total runs performed: 5
- Times : Total Per run
- User : 0.910862 s, 0.1821724 s.
- System: 0.0 s, 0.0 s.
-
- -d
- run your program under the control of pdb, the Python debugger.
- This allows you to execute your program step by step, watch variables,
- etc. Internally, what IPython does is similar to calling::
-
- pdb.run('execfile("YOURFILENAME")')
-
- with a breakpoint set on line 1 of your file. You can change the line
- number for this automatic breakpoint to be <N> by using the -bN option
- (where N must be an integer). For example::
-
- %run -d -b40 myscript
-
- will set the first breakpoint at line 40 in myscript.py. Note that
- the first breakpoint must be set on a line which actually does
- something (not a comment or docstring) for it to stop execution.
-
- Or you can specify a breakpoint in a different file::
-
- %run -d -b myotherfile.py:20 myscript
-
- When the pdb debugger starts, you will see a (Pdb) prompt. You must
- first enter 'c' (without quotes) to start execution up to the first
- breakpoint.
-
- Entering 'help' gives information about the use of the debugger. You
- can easily see pdb's full documentation with "import pdb;pdb.help()"
- at a prompt.
-
- -p
- run program under the control of the Python profiler module (which
- prints a detailed report of execution times, function calls, etc).
-
- You can pass other options after -p which affect the behavior of the
- profiler itself. See the docs for %prun for details.
-
- In this mode, the program's variables do NOT propagate back to the
- IPython interactive namespace (because they remain in the namespace
- where the profiler executes them).
-
- Internally this triggers a call to %prun, see its documentation for
- details on the options available specifically for profiling.
-
- There is one special usage for which the text above doesn't apply:
- if the filename ends with .ipy[nb], the file is run as ipython script,
- just as if the commands were written on IPython prompt.
-
- -m
- specify module name to load instead of script path. Similar to
- the -m option for the python interpreter. Use this option last if you
- want to combine with other %run options. Unlike the python interpreter
- only source modules are allowed no .pyc or .pyo files.
- For example::
-
- %run -m example
-
- will run the example module.
-
- -G
- disable shell-like glob expansion of arguments.
-
- """
-
- # get arguments and set sys.argv for program to be run.
- opts, arg_lst = self.parse_options(parameter_s,
- 'nidtN:b:pD:l:rs:T:em:G',
- mode='list', list_all=1)
- if "m" in opts:
- modulename = opts["m"][0]
- modpath = find_mod(modulename)
- if modpath is None:
- warn('%r is not a valid modulename on sys.path'%modulename)
- return
- arg_lst = [modpath] + arg_lst
- try:
- filename = file_finder(arg_lst[0])
- except IndexError:
- warn('you must provide at least a filename.')
- print('\n%run:\n', oinspect.getdoc(self.run))
- return
- except IOError as e:
- try:
- msg = str(e)
- except UnicodeError:
- msg = e.message
- error(msg)
- return
-
- if filename.lower().endswith(('.ipy', '.ipynb')):
- with preserve_keys(self.shell.user_ns, '__file__'):
- self.shell.user_ns['__file__'] = filename
- self.shell.safe_execfile_ipy(filename)
- return
-
- # Control the response to exit() calls made by the script being run
- exit_ignore = 'e' in opts
-
- # Make sure that the running script gets a proper sys.argv as if it
- # were run from a system shell.
- save_argv = sys.argv # save it for later restoring
-
- if 'G' in opts:
- args = arg_lst[1:]
- else:
- # tilde and glob expansion
- args = shellglob(map(os.path.expanduser, arg_lst[1:]))
-
- sys.argv = [filename] + args # put in the proper filename
- # protect sys.argv from potential unicode strings on Python 2:
- if not py3compat.PY3:
- sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
-
- if 'i' in opts:
- # Run in user's interactive namespace
- prog_ns = self.shell.user_ns
- __name__save = self.shell.user_ns['__name__']
- prog_ns['__name__'] = '__main__'
- main_mod = self.shell.user_module
-
- # Since '%run foo' emulates 'python foo.py' at the cmd line, we must
- # set the __file__ global in the script's namespace
- # TK: Is this necessary in interactive mode?
- prog_ns['__file__'] = filename
- else:
- # Run in a fresh, empty namespace
- if 'n' in opts:
- name = os.path.splitext(os.path.basename(filename))[0]
- else:
- name = '__main__'
-
- # The shell MUST hold a reference to prog_ns so after %run
- # exits, the python deletion mechanism doesn't zero it out
- # (leaving dangling references). See interactiveshell for details
- main_mod = self.shell.new_main_mod(filename, name)
- prog_ns = main_mod.__dict__
-
- # pickle fix. See interactiveshell for an explanation. But we need to
- # make sure that, if we overwrite __main__, we replace it at the end
- main_mod_name = prog_ns['__name__']
-
- if main_mod_name == '__main__':
- restore_main = sys.modules['__main__']
- else:
- restore_main = False
-
- # This needs to be undone at the end to prevent holding references to
- # every single object ever created.
- sys.modules[main_mod_name] = main_mod
-
- if 'p' in opts or 'd' in opts:
- if 'm' in opts:
- code = 'run_module(modulename, prog_ns)'
- code_ns = {
- 'run_module': self.shell.safe_run_module,
- 'prog_ns': prog_ns,
- 'modulename': modulename,
- }
- else:
- if 'd' in opts:
- # allow exceptions to raise in debug mode
- code = 'execfile(filename, prog_ns, raise_exceptions=True)'
- else:
- code = 'execfile(filename, prog_ns)'
- code_ns = {
- 'execfile': self.shell.safe_execfile,
- 'prog_ns': prog_ns,
- 'filename': get_py_filename(filename),
- }
-
- try:
- stats = None
+ bp_line = int(bp_line)
+ else:
+ (filename, bp_line) = (None, None)
+ self._run_with_debugger(code, self.shell.user_ns, filename, bp_line)
+
+ @line_magic
+ def tb(self, s):
+ """Print the last traceback with the currently active exception mode.
+
+ See %xmode for changing exception reporting modes."""
+ self.shell.showtraceback()
+
+ @skip_doctest
+ @line_magic
+ def run(self, parameter_s='', runner=None,
+ file_finder=get_py_filename):
+ """Run the named file inside IPython as a program.
+
+ Usage::
+
+ %run [-n -i -e -G]
+ [( -t [-N<N>] | -d [-b<N>] | -p [profile options] )]
+ ( -m mod | file ) [args]
+
+ Parameters after the filename are passed as command-line arguments to
+ the program (put in sys.argv). Then, control returns to IPython's
+ prompt.
+
+ This is similar to running at a system prompt ``python file args``,
+ but with the advantage of giving you IPython's tracebacks, and of
+ loading all variables into your interactive namespace for further use
+ (unless -p is used, see below).
+
+ The file is executed in a namespace initially consisting only of
+ ``__name__=='__main__'`` and sys.argv constructed as indicated. It thus
+ sees its environment as if it were being run as a stand-alone program
+ (except for sharing global objects such as previously imported
+ modules). But after execution, the IPython interactive namespace gets
+ updated with all variables defined in the program (except for __name__
+ and sys.argv). This allows for very convenient loading of code for
+ interactive work, while giving each program a 'clean sheet' to run in.
+
+ Arguments are expanded using shell-like glob match. Patterns
+ '*', '?', '[seq]' and '[!seq]' can be used. Additionally,
+ tilde '~' will be expanded into user's home directory. Unlike
+ real shells, quotation does not suppress expansions. Use
+ *two* back slashes (e.g. ``\\\\*``) to suppress expansions.
+ To completely disable these expansions, you can use -G flag.
+
+ Options:
+
+ -n
+ __name__ is NOT set to '__main__', but to the running file's name
+ without extension (as python does under import). This allows running
+ scripts and reloading the definitions in them without calling code
+ protected by an ``if __name__ == "__main__"`` clause.
+
+ -i
+ run the file in IPython's namespace instead of an empty one. This
+ is useful if you are experimenting with code written in a text editor
+ which depends on variables defined interactively.
+
+ -e
+ ignore sys.exit() calls or SystemExit exceptions in the script
+ being run. This is particularly useful if IPython is being used to
+ run unittests, which always exit with a sys.exit() call. In such
+ cases you are interested in the output of the test results, not in
+ seeing a traceback of the unittest module.
+
+ -t
+ print timing information at the end of the run. IPython will give
+ you an estimated CPU time consumption for your script, which under
+ Unix uses the resource module to avoid the wraparound problems of
+ time.clock(). Under Unix, an estimate of time spent on system tasks
+ is also given (for Windows platforms this is reported as 0.0).
+
+ If -t is given, an additional ``-N<N>`` option can be given, where <N>
+ must be an integer indicating how many times you want the script to
+ run. The final timing report will include total and per run results.
+
+ For example (testing the script uniq_stable.py)::
+
+ In [1]: run -t uniq_stable
+
+ IPython CPU timings (estimated):
+ User : 0.19597 s.
+ System: 0.0 s.
+
+ In [2]: run -t -N5 uniq_stable
+
+ IPython CPU timings (estimated):
+ Total runs performed: 5
+ Times : Total Per run
+ User : 0.910862 s, 0.1821724 s.
+ System: 0.0 s, 0.0 s.
+
+ -d
+ run your program under the control of pdb, the Python debugger.
+ This allows you to execute your program step by step, watch variables,
+ etc. Internally, what IPython does is similar to calling::
+
+ pdb.run('execfile("YOURFILENAME")')
+
+ with a breakpoint set on line 1 of your file. You can change the line
+ number for this automatic breakpoint to be <N> by using the -bN option
+ (where N must be an integer). For example::
+
+ %run -d -b40 myscript
+
+ will set the first breakpoint at line 40 in myscript.py. Note that
+ the first breakpoint must be set on a line which actually does
+ something (not a comment or docstring) for it to stop execution.
+
+ Or you can specify a breakpoint in a different file::
+
+ %run -d -b myotherfile.py:20 myscript
+
+ When the pdb debugger starts, you will see a (Pdb) prompt. You must
+ first enter 'c' (without quotes) to start execution up to the first
+ breakpoint.
+
+ Entering 'help' gives information about the use of the debugger. You
+ can easily see pdb's full documentation with "import pdb;pdb.help()"
+ at a prompt.
+
+ -p
+ run program under the control of the Python profiler module (which
+ prints a detailed report of execution times, function calls, etc).
+
+ You can pass other options after -p which affect the behavior of the
+ profiler itself. See the docs for %prun for details.
+
+ In this mode, the program's variables do NOT propagate back to the
+ IPython interactive namespace (because they remain in the namespace
+ where the profiler executes them).
+
+ Internally this triggers a call to %prun, see its documentation for
+ details on the options available specifically for profiling.
+
+ There is one special usage for which the text above doesn't apply:
+ if the filename ends with .ipy[nb], the file is run as ipython script,
+ just as if the commands were written on IPython prompt.
+
+ -m
+ specify module name to load instead of script path. Similar to
+ the -m option for the python interpreter. Use this option last if you
+ want to combine with other %run options. Unlike the python interpreter
+ only source modules are allowed no .pyc or .pyo files.
+ For example::
+
+ %run -m example
+
+ will run the example module.
+
+ -G
+ disable shell-like glob expansion of arguments.
+
+ """
+
+ # get arguments and set sys.argv for program to be run.
+ opts, arg_lst = self.parse_options(parameter_s,
+ 'nidtN:b:pD:l:rs:T:em:G',
+ mode='list', list_all=1)
+ if "m" in opts:
+ modulename = opts["m"][0]
+ modpath = find_mod(modulename)
+ if modpath is None:
+ warn('%r is not a valid modulename on sys.path'%modulename)
+ return
+ arg_lst = [modpath] + arg_lst
+ try:
+ filename = file_finder(arg_lst[0])
+ except IndexError:
+ warn('you must provide at least a filename.')
+ print('\n%run:\n', oinspect.getdoc(self.run))
+ return
+ except IOError as e:
+ try:
+ msg = str(e)
+ except UnicodeError:
+ msg = e.message
+ error(msg)
+ return
+
+ if filename.lower().endswith(('.ipy', '.ipynb')):
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ self.shell.user_ns['__file__'] = filename
+ self.shell.safe_execfile_ipy(filename)
+ return
+
+ # Control the response to exit() calls made by the script being run
+ exit_ignore = 'e' in opts
+
+ # Make sure that the running script gets a proper sys.argv as if it
+ # were run from a system shell.
+ save_argv = sys.argv # save it for later restoring
+
+ if 'G' in opts:
+ args = arg_lst[1:]
+ else:
+ # tilde and glob expansion
+ args = shellglob(map(os.path.expanduser, arg_lst[1:]))
+
+ sys.argv = [filename] + args # put in the proper filename
+ # protect sys.argv from potential unicode strings on Python 2:
+ if not py3compat.PY3:
+ sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
+
+ if 'i' in opts:
+ # Run in user's interactive namespace
+ prog_ns = self.shell.user_ns
+ __name__save = self.shell.user_ns['__name__']
+ prog_ns['__name__'] = '__main__'
+ main_mod = self.shell.user_module
+
+ # Since '%run foo' emulates 'python foo.py' at the cmd line, we must
+ # set the __file__ global in the script's namespace
+ # TK: Is this necessary in interactive mode?
+ prog_ns['__file__'] = filename
+ else:
+ # Run in a fresh, empty namespace
+ if 'n' in opts:
+ name = os.path.splitext(os.path.basename(filename))[0]
+ else:
+ name = '__main__'
+
+ # The shell MUST hold a reference to prog_ns so after %run
+ # exits, the python deletion mechanism doesn't zero it out
+ # (leaving dangling references). See interactiveshell for details
+ main_mod = self.shell.new_main_mod(filename, name)
+ prog_ns = main_mod.__dict__
+
+ # pickle fix. See interactiveshell for an explanation. But we need to
+ # make sure that, if we overwrite __main__, we replace it at the end
+ main_mod_name = prog_ns['__name__']
+
+ if main_mod_name == '__main__':
+ restore_main = sys.modules['__main__']
+ else:
+ restore_main = False
+
+ # This needs to be undone at the end to prevent holding references to
+ # every single object ever created.
+ sys.modules[main_mod_name] = main_mod
+
+ if 'p' in opts or 'd' in opts:
+ if 'm' in opts:
+ code = 'run_module(modulename, prog_ns)'
+ code_ns = {
+ 'run_module': self.shell.safe_run_module,
+ 'prog_ns': prog_ns,
+ 'modulename': modulename,
+ }
+ else:
+ if 'd' in opts:
+ # allow exceptions to raise in debug mode
+ code = 'execfile(filename, prog_ns, raise_exceptions=True)'
+ else:
+ code = 'execfile(filename, prog_ns)'
+ code_ns = {
+ 'execfile': self.shell.safe_execfile,
+ 'prog_ns': prog_ns,
+ 'filename': get_py_filename(filename),
+ }
+
+ try:
+ stats = None
if 'p' in opts:
stats = self._run_with_profiler(code, opts, code_ns)
else:
@@ -713,20 +713,20 @@ python-profiler package from non-free.""")
opts.get('b', ['1'])[0], filename)
self._run_with_debugger(
code, code_ns, filename, bp_line, bp_file)
- else:
+ else:
if 'm' in opts:
def run():
self.shell.safe_run_module(modulename, prog_ns)
- else:
+ else:
if runner is None:
runner = self.default_runner
if runner is None:
runner = self.shell.safe_execfile
-
+
def run():
runner(filename, prog_ns, prog_ns,
exit_ignore=exit_ignore)
-
+
if 't' in opts:
# timed execution
try:
@@ -740,67 +740,67 @@ python-profiler package from non-free.""")
else:
# regular execution
run()
-
+
if 'i' in opts:
self.shell.user_ns['__name__'] = __name__save
else:
# update IPython interactive namespace
-
+
# Some forms of read errors on the file may mean the
# __name__ key was never set; using pop we don't have to
# worry about a possible KeyError.
prog_ns.pop('__name__', None)
-
+
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns.update(prog_ns)
- finally:
- # It's a bit of a mystery why, but __builtins__ can change from
- # being a module to becoming a dict missing some key data after
- # %run. As best I can see, this is NOT something IPython is doing
- # at all, and similar problems have been reported before:
- # http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
- # Since this seems to be done by the interpreter itself, the best
- # we can do is to at least restore __builtins__ for the user on
- # exit.
- self.shell.user_ns['__builtins__'] = builtin_mod
-
- # Ensure key global structures are restored
- sys.argv = save_argv
- if restore_main:
- sys.modules['__main__'] = restore_main
- else:
- # Remove from sys.modules the reference to main_mod we'd
- # added. Otherwise it will trap references to objects
- # contained therein.
- del sys.modules[main_mod_name]
-
- return stats
-
- def _run_with_debugger(self, code, code_ns, filename=None,
- bp_line=None, bp_file=None):
- """
- Run `code` in debugger with a break point.
-
- Parameters
- ----------
- code : str
- Code to execute.
- code_ns : dict
- A namespace in which `code` is executed.
- filename : str
- `code` is ran as if it is in `filename`.
- bp_line : int, optional
- Line number of the break point.
- bp_file : str, optional
- Path to the file in which break point is specified.
- `filename` is used if not given.
-
- Raises
- ------
- UsageError
- If the break point given by `bp_line` is not valid.
-
- """
+ finally:
+ # It's a bit of a mystery why, but __builtins__ can change from
+ # being a module to becoming a dict missing some key data after
+ # %run. As best I can see, this is NOT something IPython is doing
+ # at all, and similar problems have been reported before:
+ # http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
+ # Since this seems to be done by the interpreter itself, the best
+ # we can do is to at least restore __builtins__ for the user on
+ # exit.
+ self.shell.user_ns['__builtins__'] = builtin_mod
+
+ # Ensure key global structures are restored
+ sys.argv = save_argv
+ if restore_main:
+ sys.modules['__main__'] = restore_main
+ else:
+ # Remove from sys.modules the reference to main_mod we'd
+ # added. Otherwise it will trap references to objects
+ # contained therein.
+ del sys.modules[main_mod_name]
+
+ return stats
+
+ def _run_with_debugger(self, code, code_ns, filename=None,
+ bp_line=None, bp_file=None):
+ """
+ Run `code` in debugger with a break point.
+
+ Parameters
+ ----------
+ code : str
+ Code to execute.
+ code_ns : dict
+ A namespace in which `code` is executed.
+ filename : str
+ `code` is ran as if it is in `filename`.
+ bp_line : int, optional
+ Line number of the break point.
+ bp_file : str, optional
+ Path to the file in which break point is specified.
+ `filename` is used if not given.
+
+ Raises
+ ------
+ UsageError
+ If the break point given by `bp_line` is not valid.
+
+ """
deb = self.shell.InteractiveTB.pdb
if not deb:
self.shell.InteractiveTB.pdb = self.shell.InteractiveTB.debugger_cls()
@@ -811,208 +811,208 @@ python-profiler package from non-free.""")
if hasattr(deb, 'curframe'):
del deb.curframe
- # reset Breakpoint state, which is moronically kept
- # in a class
- bdb.Breakpoint.next = 1
- bdb.Breakpoint.bplist = {}
- bdb.Breakpoint.bpbynumber = [None]
+ # reset Breakpoint state, which is moronically kept
+ # in a class
+ bdb.Breakpoint.next = 1
+ bdb.Breakpoint.bplist = {}
+ bdb.Breakpoint.bpbynumber = [None]
deb.clear_all_breaks()
- if bp_line is not None:
- # Set an initial breakpoint to stop execution
- maxtries = 10
- bp_file = bp_file or filename
- checkline = deb.checkline(bp_file, bp_line)
- if not checkline:
- for bp in range(bp_line + 1, bp_line + maxtries + 1):
- if deb.checkline(bp_file, bp):
- break
- else:
- msg = ("\nI failed to find a valid line to set "
- "a breakpoint\n"
- "after trying up to line: %s.\n"
- "Please set a valid breakpoint manually "
- "with the -b option." % bp)
- raise UsageError(msg)
- # if we find a good linenumber, set the breakpoint
- deb.do_break('%s:%s' % (bp_file, bp_line))
-
- if filename:
- # Mimic Pdb._runscript(...)
- deb._wait_for_mainpyfile = True
- deb.mainpyfile = deb.canonic(filename)
-
- # Start file run
- print("NOTE: Enter 'c' at the %s prompt to continue execution." % deb.prompt)
- try:
- if filename:
- # save filename so it can be used by methods on the deb object
- deb._exec_filename = filename
- while True:
- try:
- deb.run(code, code_ns)
- except Restart:
- print("Restarting")
- if filename:
- deb._wait_for_mainpyfile = True
- deb.mainpyfile = deb.canonic(filename)
- continue
- else:
- break
-
-
- except:
- etype, value, tb = sys.exc_info()
- # Skip three frames in the traceback: the %run one,
- # one inside bdb.py, and the command-line typed by the
- # user (run by exec in pdb itself).
- self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
-
- @staticmethod
- def _run_with_timing(run, nruns):
- """
- Run function `run` and print timing information.
-
- Parameters
- ----------
- run : callable
- Any callable object which takes no argument.
- nruns : int
- Number of times to execute `run`.
-
- """
- twall0 = time.time()
- if nruns == 1:
- t0 = clock2()
- run()
- t1 = clock2()
- t_usr = t1[0] - t0[0]
- t_sys = t1[1] - t0[1]
- print("\nIPython CPU timings (estimated):")
- print(" User : %10.2f s." % t_usr)
- print(" System : %10.2f s." % t_sys)
- else:
- runs = range(nruns)
- t0 = clock2()
- for nr in runs:
- run()
- t1 = clock2()
- t_usr = t1[0] - t0[0]
- t_sys = t1[1] - t0[1]
- print("\nIPython CPU timings (estimated):")
- print("Total runs performed:", nruns)
- print(" Times : %10s %10s" % ('Total', 'Per run'))
- print(" User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns))
- print(" System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns))
- twall1 = time.time()
- print("Wall time: %10.2f s." % (twall1 - twall0))
-
- @skip_doctest
- @line_cell_magic
- def timeit(self, line='', cell=None):
- """Time execution of a Python statement or expression
-
- Usage, in line mode:
- %timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] statement
- or in cell mode:
- %%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] setup_code
- code
- code...
-
- Time execution of a Python statement or expression using the timeit
- module. This function can be used both as a line and cell magic:
-
- - In line mode you can time a single-line statement (though multiple
- ones can be chained with using semicolons).
-
- - In cell mode, the statement in the first line is used as setup code
- (executed but not timed) and the body of the cell is timed. The cell
- body has access to any variables created in the setup code.
-
- Options:
- -n<N>: execute the given statement <N> times in a loop. If this value
- is not given, a fitting value is chosen.
-
- -r<R>: repeat the loop iteration <R> times and take the best result.
- Default: 3
-
- -t: use time.time to measure the time, which is the default on Unix.
- This function measures wall time.
-
- -c: use time.clock to measure the time, which is the default on
- Windows and measures wall time. On Unix, resource.getrusage is used
- instead and returns the CPU user time.
-
- -p<P>: use a precision of <P> digits to display the timing result.
- Default: 3
-
- -q: Quiet, do not print result.
-
- -o: return a TimeitResult that can be stored in a variable to inspect
- the result in more details.
-
-
- Examples
- --------
- ::
-
- In [1]: %timeit pass
- 10000000 loops, best of 3: 53.3 ns per loop
-
- In [2]: u = None
-
- In [3]: %timeit u is None
- 10000000 loops, best of 3: 184 ns per loop
-
- In [4]: %timeit -r 4 u == None
- 1000000 loops, best of 4: 242 ns per loop
-
- In [5]: import time
-
- In [6]: %timeit -n1 time.sleep(2)
- 1 loop, best of 3: 2 s per loop
-
-
- The times reported by %timeit will be slightly higher than those
- reported by the timeit.py script when variables are accessed. This is
- due to the fact that %timeit executes the statement in the namespace
- of the shell, compared with timeit.py, which uses a single setup
- statement to import function or create variables. Generally, the bias
- does not matter as long as results from timeit.py are not mixed with
- those from %timeit."""
-
- opts, stmt = self.parse_options(line,'n:r:tcp:qo',
- posix=False, strict=False)
- if stmt == "" and cell is None:
- return
-
- timefunc = timeit.default_timer
- number = int(getattr(opts, "n", 0))
- repeat = int(getattr(opts, "r", timeit.default_repeat))
- precision = int(getattr(opts, "p", 3))
- quiet = 'q' in opts
- return_result = 'o' in opts
- if hasattr(opts, "t"):
- timefunc = time.time
- if hasattr(opts, "c"):
- timefunc = clock
-
- timer = Timer(timer=timefunc)
- # this code has tight coupling to the inner workings of timeit.Timer,
- # but is there a better way to achieve that the code stmt has access
- # to the shell namespace?
- transform = self.shell.input_splitter.transform_cell
-
- if cell is None:
- # called as line magic
- ast_setup = self.shell.compile.ast_parse("pass")
- ast_stmt = self.shell.compile.ast_parse(transform(stmt))
- else:
- ast_setup = self.shell.compile.ast_parse(transform(stmt))
- ast_stmt = self.shell.compile.ast_parse(transform(cell))
-
- ast_setup = self.shell.transform_ast(ast_setup)
- ast_stmt = self.shell.transform_ast(ast_stmt)
-
+ if bp_line is not None:
+ # Set an initial breakpoint to stop execution
+ maxtries = 10
+ bp_file = bp_file or filename
+ checkline = deb.checkline(bp_file, bp_line)
+ if not checkline:
+ for bp in range(bp_line + 1, bp_line + maxtries + 1):
+ if deb.checkline(bp_file, bp):
+ break
+ else:
+ msg = ("\nI failed to find a valid line to set "
+ "a breakpoint\n"
+ "after trying up to line: %s.\n"
+ "Please set a valid breakpoint manually "
+ "with the -b option." % bp)
+ raise UsageError(msg)
+ # if we find a good linenumber, set the breakpoint
+ deb.do_break('%s:%s' % (bp_file, bp_line))
+
+ if filename:
+ # Mimic Pdb._runscript(...)
+ deb._wait_for_mainpyfile = True
+ deb.mainpyfile = deb.canonic(filename)
+
+ # Start file run
+ print("NOTE: Enter 'c' at the %s prompt to continue execution." % deb.prompt)
+ try:
+ if filename:
+ # save filename so it can be used by methods on the deb object
+ deb._exec_filename = filename
+ while True:
+ try:
+ deb.run(code, code_ns)
+ except Restart:
+ print("Restarting")
+ if filename:
+ deb._wait_for_mainpyfile = True
+ deb.mainpyfile = deb.canonic(filename)
+ continue
+ else:
+ break
+
+
+ except:
+ etype, value, tb = sys.exc_info()
+ # Skip three frames in the traceback: the %run one,
+ # one inside bdb.py, and the command-line typed by the
+ # user (run by exec in pdb itself).
+ self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
+
+ @staticmethod
+ def _run_with_timing(run, nruns):
+ """
+ Run function `run` and print timing information.
+
+ Parameters
+ ----------
+ run : callable
+ Any callable object which takes no argument.
+ nruns : int
+ Number of times to execute `run`.
+
+ """
+ twall0 = time.time()
+ if nruns == 1:
+ t0 = clock2()
+ run()
+ t1 = clock2()
+ t_usr = t1[0] - t0[0]
+ t_sys = t1[1] - t0[1]
+ print("\nIPython CPU timings (estimated):")
+ print(" User : %10.2f s." % t_usr)
+ print(" System : %10.2f s." % t_sys)
+ else:
+ runs = range(nruns)
+ t0 = clock2()
+ for nr in runs:
+ run()
+ t1 = clock2()
+ t_usr = t1[0] - t0[0]
+ t_sys = t1[1] - t0[1]
+ print("\nIPython CPU timings (estimated):")
+ print("Total runs performed:", nruns)
+ print(" Times : %10s %10s" % ('Total', 'Per run'))
+ print(" User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns))
+ print(" System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns))
+ twall1 = time.time()
+ print("Wall time: %10.2f s." % (twall1 - twall0))
+
+ @skip_doctest
+ @line_cell_magic
+ def timeit(self, line='', cell=None):
+ """Time execution of a Python statement or expression
+
+ Usage, in line mode:
+ %timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] statement
+ or in cell mode:
+ %%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] setup_code
+ code
+ code...
+
+ Time execution of a Python statement or expression using the timeit
+ module. This function can be used both as a line and cell magic:
+
+ - In line mode you can time a single-line statement (though multiple
+ ones can be chained with using semicolons).
+
+ - In cell mode, the statement in the first line is used as setup code
+ (executed but not timed) and the body of the cell is timed. The cell
+ body has access to any variables created in the setup code.
+
+ Options:
+ -n<N>: execute the given statement <N> times in a loop. If this value
+ is not given, a fitting value is chosen.
+
+ -r<R>: repeat the loop iteration <R> times and take the best result.
+ Default: 3
+
+ -t: use time.time to measure the time, which is the default on Unix.
+ This function measures wall time.
+
+ -c: use time.clock to measure the time, which is the default on
+ Windows and measures wall time. On Unix, resource.getrusage is used
+ instead and returns the CPU user time.
+
+ -p<P>: use a precision of <P> digits to display the timing result.
+ Default: 3
+
+ -q: Quiet, do not print result.
+
+ -o: return a TimeitResult that can be stored in a variable to inspect
+ the result in more details.
+
+
+ Examples
+ --------
+ ::
+
+ In [1]: %timeit pass
+ 10000000 loops, best of 3: 53.3 ns per loop
+
+ In [2]: u = None
+
+ In [3]: %timeit u is None
+ 10000000 loops, best of 3: 184 ns per loop
+
+ In [4]: %timeit -r 4 u == None
+ 1000000 loops, best of 4: 242 ns per loop
+
+ In [5]: import time
+
+ In [6]: %timeit -n1 time.sleep(2)
+ 1 loop, best of 3: 2 s per loop
+
+
+ The times reported by %timeit will be slightly higher than those
+ reported by the timeit.py script when variables are accessed. This is
+ due to the fact that %timeit executes the statement in the namespace
+ of the shell, compared with timeit.py, which uses a single setup
+ statement to import function or create variables. Generally, the bias
+ does not matter as long as results from timeit.py are not mixed with
+ those from %timeit."""
+
+ opts, stmt = self.parse_options(line,'n:r:tcp:qo',
+ posix=False, strict=False)
+ if stmt == "" and cell is None:
+ return
+
+ timefunc = timeit.default_timer
+ number = int(getattr(opts, "n", 0))
+ repeat = int(getattr(opts, "r", timeit.default_repeat))
+ precision = int(getattr(opts, "p", 3))
+ quiet = 'q' in opts
+ return_result = 'o' in opts
+ if hasattr(opts, "t"):
+ timefunc = time.time
+ if hasattr(opts, "c"):
+ timefunc = clock
+
+ timer = Timer(timer=timefunc)
+ # this code has tight coupling to the inner workings of timeit.Timer,
+ # but is there a better way to achieve that the code stmt has access
+ # to the shell namespace?
+ transform = self.shell.input_splitter.transform_cell
+
+ if cell is None:
+ # called as line magic
+ ast_setup = self.shell.compile.ast_parse("pass")
+ ast_stmt = self.shell.compile.ast_parse(transform(stmt))
+ else:
+ ast_setup = self.shell.compile.ast_parse(transform(stmt))
+ ast_stmt = self.shell.compile.ast_parse(transform(cell))
+
+ ast_setup = self.shell.transform_ast(ast_setup)
+ ast_stmt = self.shell.transform_ast(ast_stmt)
+
# Check that these compile to valid Python code *outside* the timer func
# Invalid code may become valid when put inside the function & loop,
# which messes up error messages.
@@ -1020,359 +1020,359 @@ python-profiler package from non-free.""")
self.shell.compile(ast_setup, "<magic-timeit-setup>", "exec")
self.shell.compile(ast_stmt, "<magic-timeit-stmt>", "exec")
- # This codestring is taken from timeit.template - we fill it in as an
- # AST, so that we can apply our AST transformations to the user code
- # without affecting the timing code.
- timeit_ast_template = ast.parse('def inner(_it, _timer):\n'
- ' setup\n'
- ' _t0 = _timer()\n'
- ' for _i in _it:\n'
- ' stmt\n'
- ' _t1 = _timer()\n'
- ' return _t1 - _t0\n')
-
- timeit_ast = TimeitTemplateFiller(ast_setup, ast_stmt).visit(timeit_ast_template)
- timeit_ast = ast.fix_missing_locations(timeit_ast)
-
- # Track compilation time so it can be reported if too long
- # Minimum time above which compilation time will be reported
- tc_min = 0.1
-
- t0 = clock()
- code = self.shell.compile(timeit_ast, "<magic-timeit>", "exec")
- tc = clock()-t0
-
- ns = {}
- exec(code, self.shell.user_ns, ns)
- timer.inner = ns["inner"]
-
- # This is used to check if there is a huge difference between the
- # best and worst timings.
- # Issue: https://github.com/ipython/ipython/issues/6471
- worst_tuning = 0
- if number == 0:
- # determine number so that 0.2 <= total time < 2.0
- number = 1
- for _ in range(1, 10):
- time_number = timer.timeit(number)
- worst_tuning = max(worst_tuning, time_number / number)
- if time_number >= 0.2:
- break
- number *= 10
- all_runs = timer.repeat(repeat, number)
- best = min(all_runs) / number
-
- worst = max(all_runs) / number
- if worst_tuning:
- worst = max(worst, worst_tuning)
-
- if not quiet :
- # Check best timing is greater than zero to avoid a
- # ZeroDivisionError.
- # In cases where the slowest timing is lesser than a micosecond
- # we assume that it does not really matter if the fastest
- # timing is 4 times faster than the slowest timing or not.
- if worst > 4 * best and best > 0 and worst > 1e-6:
- print("The slowest run took %0.2f times longer than the "
- "fastest. This could mean that an intermediate result "
- "is being cached." % (worst / best))
- if number == 1: # No s at "loops" if only one loop
- print(u"%d loop, best of %d: %s per loop" % (number, repeat,
- _format_time(best, precision)))
- else:
- print(u"%d loops, best of %d: %s per loop" % (number, repeat,
- _format_time(best, precision)))
- if tc > tc_min:
- print("Compiler time: %.2f s" % tc)
- if return_result:
- return TimeitResult(number, repeat, best, worst, all_runs, tc, precision)
-
- @skip_doctest
- @needs_local_scope
- @line_cell_magic
- def time(self,line='', cell=None, local_ns=None):
- """Time execution of a Python statement or expression.
-
- The CPU and wall clock times are printed, and the value of the
- expression (if any) is returned. Note that under Win32, system time
- is always reported as 0, since it can not be measured.
-
- This function can be used both as a line and cell magic:
-
- - In line mode you can time a single-line statement (though multiple
- ones can be chained with using semicolons).
-
- - In cell mode, you can time the cell body (a directly
- following statement raises an error).
-
- This function provides very basic timing functionality. Use the timeit
- magic for more control over the measurement.
-
- Examples
- --------
- ::
-
- In [1]: %time 2**128
- CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
- Wall time: 0.00
- Out[1]: 340282366920938463463374607431768211456L
-
- In [2]: n = 1000000
-
- In [3]: %time sum(range(n))
- CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
- Wall time: 1.37
- Out[3]: 499999500000L
-
- In [4]: %time print 'hello world'
- hello world
- CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
- Wall time: 0.00
-
- Note that the time needed by Python to compile the given expression
- will be reported if it is more than 0.1s. In this example, the
- actual exponentiation is done by Python at compilation time, so while
- the expression can take a noticeable amount of time to compute, that
- time is purely due to the compilation:
-
- In [5]: %time 3**9999;
- CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
- Wall time: 0.00 s
-
- In [6]: %time 3**999999;
- CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
- Wall time: 0.00 s
- Compiler : 0.78 s
- """
-
- # fail immediately if the given expression can't be compiled
-
- if line and cell:
- raise UsageError("Can't use statement directly after '%%time'!")
-
- if cell:
- expr = self.shell.input_transformer_manager.transform_cell(cell)
- else:
- expr = self.shell.input_transformer_manager.transform_cell(line)
-
- # Minimum time above which parse time will be reported
- tp_min = 0.1
-
- t0 = clock()
- expr_ast = self.shell.compile.ast_parse(expr)
- tp = clock()-t0
-
- # Apply AST transformations
- expr_ast = self.shell.transform_ast(expr_ast)
-
- # Minimum time above which compilation time will be reported
- tc_min = 0.1
-
- if len(expr_ast.body)==1 and isinstance(expr_ast.body[0], ast.Expr):
- mode = 'eval'
- source = '<timed eval>'
- expr_ast = ast.Expression(expr_ast.body[0].value)
- else:
- mode = 'exec'
- source = '<timed exec>'
- t0 = clock()
- code = self.shell.compile(expr_ast, source, mode)
- tc = clock()-t0
-
- # skew measurement as little as possible
- glob = self.shell.user_ns
- wtime = time.time
- # time execution
- wall_st = wtime()
- if mode=='eval':
- st = clock2()
- out = eval(code, glob, local_ns)
- end = clock2()
- else:
- st = clock2()
- exec(code, glob, local_ns)
- end = clock2()
- out = None
- wall_end = wtime()
- # Compute actual times and report
- wall_time = wall_end-wall_st
- cpu_user = end[0]-st[0]
- cpu_sys = end[1]-st[1]
- cpu_tot = cpu_user+cpu_sys
- # On windows cpu_sys is always zero, so no new information to the next print
- if sys.platform != 'win32':
- print("CPU times: user %s, sys: %s, total: %s" % \
- (_format_time(cpu_user),_format_time(cpu_sys),_format_time(cpu_tot)))
- print("Wall time: %s" % _format_time(wall_time))
- if tc > tc_min:
- print("Compiler : %s" % _format_time(tc))
- if tp > tp_min:
- print("Parser : %s" % _format_time(tp))
- return out
-
- @skip_doctest
- @line_magic
- def macro(self, parameter_s=''):
- """Define a macro for future re-execution. It accepts ranges of history,
- filenames or string objects.
-
- Usage:\\
- %macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
-
- Options:
-
- -r: use 'raw' input. By default, the 'processed' history is used,
- so that magics are loaded in their transformed version to valid
- Python. If this option is given, the raw input as typed at the
- command line is used instead.
-
- -q: quiet macro definition. By default, a tag line is printed
- to indicate the macro has been created, and then the contents of
- the macro are printed. If this option is given, then no printout
- is produced once the macro is created.
-
- This will define a global variable called `name` which is a string
- made of joining the slices and lines you specify (n1,n2,... numbers
- above) from your input history into a single string. This variable
- acts like an automatic function which re-executes those lines as if
- you had typed them. You just type 'name' at the prompt and the code
- executes.
-
- The syntax for indicating input ranges is described in %history.
-
- Note: as a 'hidden' feature, you can also use traditional python slice
- notation, where N:M means numbers N through M-1.
-
- For example, if your history contains (print using %hist -n )::
-
- 44: x=1
- 45: y=3
- 46: z=x+y
- 47: print x
- 48: a=5
- 49: print 'x',x,'y',y
-
- you can create a macro with lines 44 through 47 (included) and line 49
- called my_macro with::
-
- In [55]: %macro my_macro 44-47 49
-
- Now, typing `my_macro` (without quotes) will re-execute all this code
- in one pass.
-
- You don't need to give the line-numbers in order, and any given line
- number can appear multiple times. You can assemble macros with any
- lines from your input history in any order.
-
- The macro is a simple object which holds its value in an attribute,
- but IPython's display system checks for macros and executes them as
- code instead of printing them when you type their name.
-
- You can view a macro's contents by explicitly printing it with::
-
- print macro_name
-
- """
- opts,args = self.parse_options(parameter_s,'rq',mode='list')
- if not args: # List existing macros
- return sorted(k for k,v in iteritems(self.shell.user_ns) if\
- isinstance(v, Macro))
- if len(args) == 1:
- raise UsageError(
- "%macro insufficient args; usage '%macro name n1-n2 n3-4...")
- name, codefrom = args[0], " ".join(args[1:])
-
- #print 'rng',ranges # dbg
- try:
- lines = self.shell.find_user_code(codefrom, 'r' in opts)
- except (ValueError, TypeError) as e:
- print(e.args[0])
- return
- macro = Macro(lines)
- self.shell.define_macro(name, macro)
- if not ( 'q' in opts) :
- print('Macro `%s` created. To execute, type its name (without quotes).' % name)
- print('=== Macro contents: ===')
- print(macro, end=' ')
-
- @magic_arguments.magic_arguments()
- @magic_arguments.argument('output', type=str, default='', nargs='?',
- help="""The name of the variable in which to store output.
- This is a utils.io.CapturedIO object with stdout/err attributes
- for the text of the captured output.
-
- CapturedOutput also has a show() method for displaying the output,
- and __call__ as well, so you can use that to quickly display the
- output.
-
- If unspecified, captured output is discarded.
- """
- )
- @magic_arguments.argument('--no-stderr', action="store_true",
- help="""Don't capture stderr."""
- )
- @magic_arguments.argument('--no-stdout', action="store_true",
- help="""Don't capture stdout."""
- )
- @magic_arguments.argument('--no-display', action="store_true",
- help="""Don't capture IPython's rich display."""
- )
- @cell_magic
- def capture(self, line, cell):
- """run the cell, capturing stdout, stderr, and IPython's rich display() calls."""
- args = magic_arguments.parse_argstring(self.capture, line)
- out = not args.no_stdout
- err = not args.no_stderr
- disp = not args.no_display
- with capture_output(out, err, disp) as io:
- self.shell.run_cell(cell)
- if args.output:
- self.shell.user_ns[args.output] = io
-
-def parse_breakpoint(text, current_file):
- '''Returns (file, line) for file:line and (current_file, line) for line'''
- colon = text.find(':')
- if colon == -1:
- return current_file, int(text)
- else:
- return text[:colon], int(text[colon+1:])
-
-def _format_time(timespan, precision=3):
- """Formats the timespan in a human readable form"""
- import math
-
- if timespan >= 60.0:
- # we have more than a minute, format that in a human readable form
- # Idea from http://snipplr.com/view/5713/
- parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)]
- time = []
- leftover = timespan
- for suffix, length in parts:
- value = int(leftover / length)
- if value > 0:
- leftover = leftover % length
- time.append(u'%s%s' % (str(value), suffix))
- if leftover < 1:
- break
- return " ".join(time)
-
-
- # Unfortunately the unicode 'micro' symbol can cause problems in
- # certain terminals.
- # See bug: https://bugs.launchpad.net/ipython/+bug/348466
- # Try to prevent crashes by being more secure than it needs to
- # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
- units = [u"s", u"ms",u'us',"ns"] # the save value
- if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
- try:
- u'\xb5'.encode(sys.stdout.encoding)
- units = [u"s", u"ms",u'\xb5s',"ns"]
- except:
- pass
- scaling = [1, 1e3, 1e6, 1e9]
-
- if timespan > 0.0:
- order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
- else:
- order = 3
- return u"%.*g %s" % (precision, timespan * scaling[order], units[order])
+ # This codestring is taken from timeit.template - we fill it in as an
+ # AST, so that we can apply our AST transformations to the user code
+ # without affecting the timing code.
+ timeit_ast_template = ast.parse('def inner(_it, _timer):\n'
+ ' setup\n'
+ ' _t0 = _timer()\n'
+ ' for _i in _it:\n'
+ ' stmt\n'
+ ' _t1 = _timer()\n'
+ ' return _t1 - _t0\n')
+
+ timeit_ast = TimeitTemplateFiller(ast_setup, ast_stmt).visit(timeit_ast_template)
+ timeit_ast = ast.fix_missing_locations(timeit_ast)
+
+ # Track compilation time so it can be reported if too long
+ # Minimum time above which compilation time will be reported
+ tc_min = 0.1
+
+ t0 = clock()
+ code = self.shell.compile(timeit_ast, "<magic-timeit>", "exec")
+ tc = clock()-t0
+
+ ns = {}
+ exec(code, self.shell.user_ns, ns)
+ timer.inner = ns["inner"]
+
+ # This is used to check if there is a huge difference between the
+ # best and worst timings.
+ # Issue: https://github.com/ipython/ipython/issues/6471
+ worst_tuning = 0
+ if number == 0:
+ # determine number so that 0.2 <= total time < 2.0
+ number = 1
+ for _ in range(1, 10):
+ time_number = timer.timeit(number)
+ worst_tuning = max(worst_tuning, time_number / number)
+ if time_number >= 0.2:
+ break
+ number *= 10
+ all_runs = timer.repeat(repeat, number)
+ best = min(all_runs) / number
+
+ worst = max(all_runs) / number
+ if worst_tuning:
+ worst = max(worst, worst_tuning)
+
+ if not quiet :
+ # Check best timing is greater than zero to avoid a
+ # ZeroDivisionError.
+ # In cases where the slowest timing is lesser than a micosecond
+ # we assume that it does not really matter if the fastest
+ # timing is 4 times faster than the slowest timing or not.
+ if worst > 4 * best and best > 0 and worst > 1e-6:
+ print("The slowest run took %0.2f times longer than the "
+ "fastest. This could mean that an intermediate result "
+ "is being cached." % (worst / best))
+ if number == 1: # No s at "loops" if only one loop
+ print(u"%d loop, best of %d: %s per loop" % (number, repeat,
+ _format_time(best, precision)))
+ else:
+ print(u"%d loops, best of %d: %s per loop" % (number, repeat,
+ _format_time(best, precision)))
+ if tc > tc_min:
+ print("Compiler time: %.2f s" % tc)
+ if return_result:
+ return TimeitResult(number, repeat, best, worst, all_runs, tc, precision)
+
+ @skip_doctest
+ @needs_local_scope
+ @line_cell_magic
+ def time(self,line='', cell=None, local_ns=None):
+ """Time execution of a Python statement or expression.
+
+ The CPU and wall clock times are printed, and the value of the
+ expression (if any) is returned. Note that under Win32, system time
+ is always reported as 0, since it can not be measured.
+
+ This function can be used both as a line and cell magic:
+
+ - In line mode you can time a single-line statement (though multiple
+ ones can be chained with using semicolons).
+
+ - In cell mode, you can time the cell body (a directly
+ following statement raises an error).
+
+ This function provides very basic timing functionality. Use the timeit
+ magic for more control over the measurement.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %time 2**128
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00
+ Out[1]: 340282366920938463463374607431768211456L
+
+ In [2]: n = 1000000
+
+ In [3]: %time sum(range(n))
+ CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
+ Wall time: 1.37
+ Out[3]: 499999500000L
+
+ In [4]: %time print 'hello world'
+ hello world
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00
+
+ Note that the time needed by Python to compile the given expression
+ will be reported if it is more than 0.1s. In this example, the
+ actual exponentiation is done by Python at compilation time, so while
+ the expression can take a noticeable amount of time to compute, that
+ time is purely due to the compilation:
+
+ In [5]: %time 3**9999;
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00 s
+
+ In [6]: %time 3**999999;
+ CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
+ Wall time: 0.00 s
+ Compiler : 0.78 s
+ """
+
+ # fail immediately if the given expression can't be compiled
+
+ if line and cell:
+ raise UsageError("Can't use statement directly after '%%time'!")
+
+ if cell:
+ expr = self.shell.input_transformer_manager.transform_cell(cell)
+ else:
+ expr = self.shell.input_transformer_manager.transform_cell(line)
+
+ # Minimum time above which parse time will be reported
+ tp_min = 0.1
+
+ t0 = clock()
+ expr_ast = self.shell.compile.ast_parse(expr)
+ tp = clock()-t0
+
+ # Apply AST transformations
+ expr_ast = self.shell.transform_ast(expr_ast)
+
+ # Minimum time above which compilation time will be reported
+ tc_min = 0.1
+
+ if len(expr_ast.body)==1 and isinstance(expr_ast.body[0], ast.Expr):
+ mode = 'eval'
+ source = '<timed eval>'
+ expr_ast = ast.Expression(expr_ast.body[0].value)
+ else:
+ mode = 'exec'
+ source = '<timed exec>'
+ t0 = clock()
+ code = self.shell.compile(expr_ast, source, mode)
+ tc = clock()-t0
+
+ # skew measurement as little as possible
+ glob = self.shell.user_ns
+ wtime = time.time
+ # time execution
+ wall_st = wtime()
+ if mode=='eval':
+ st = clock2()
+ out = eval(code, glob, local_ns)
+ end = clock2()
+ else:
+ st = clock2()
+ exec(code, glob, local_ns)
+ end = clock2()
+ out = None
+ wall_end = wtime()
+ # Compute actual times and report
+ wall_time = wall_end-wall_st
+ cpu_user = end[0]-st[0]
+ cpu_sys = end[1]-st[1]
+ cpu_tot = cpu_user+cpu_sys
+ # On windows cpu_sys is always zero, so no new information to the next print
+ if sys.platform != 'win32':
+ print("CPU times: user %s, sys: %s, total: %s" % \
+ (_format_time(cpu_user),_format_time(cpu_sys),_format_time(cpu_tot)))
+ print("Wall time: %s" % _format_time(wall_time))
+ if tc > tc_min:
+ print("Compiler : %s" % _format_time(tc))
+ if tp > tp_min:
+ print("Parser : %s" % _format_time(tp))
+ return out
+
+ @skip_doctest
+ @line_magic
+ def macro(self, parameter_s=''):
+ """Define a macro for future re-execution. It accepts ranges of history,
+ filenames or string objects.
+
+ Usage:\\
+ %macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
+
+ Options:
+
+ -r: use 'raw' input. By default, the 'processed' history is used,
+ so that magics are loaded in their transformed version to valid
+ Python. If this option is given, the raw input as typed at the
+ command line is used instead.
+
+ -q: quiet macro definition. By default, a tag line is printed
+ to indicate the macro has been created, and then the contents of
+ the macro are printed. If this option is given, then no printout
+ is produced once the macro is created.
+
+ This will define a global variable called `name` which is a string
+ made of joining the slices and lines you specify (n1,n2,... numbers
+ above) from your input history into a single string. This variable
+ acts like an automatic function which re-executes those lines as if
+ you had typed them. You just type 'name' at the prompt and the code
+ executes.
+
+ The syntax for indicating input ranges is described in %history.
+
+ Note: as a 'hidden' feature, you can also use traditional python slice
+ notation, where N:M means numbers N through M-1.
+
+ For example, if your history contains (print using %hist -n )::
+
+ 44: x=1
+ 45: y=3
+ 46: z=x+y
+ 47: print x
+ 48: a=5
+ 49: print 'x',x,'y',y
+
+ you can create a macro with lines 44 through 47 (included) and line 49
+ called my_macro with::
+
+ In [55]: %macro my_macro 44-47 49
+
+ Now, typing `my_macro` (without quotes) will re-execute all this code
+ in one pass.
+
+ You don't need to give the line-numbers in order, and any given line
+ number can appear multiple times. You can assemble macros with any
+ lines from your input history in any order.
+
+ The macro is a simple object which holds its value in an attribute,
+ but IPython's display system checks for macros and executes them as
+ code instead of printing them when you type their name.
+
+ You can view a macro's contents by explicitly printing it with::
+
+ print macro_name
+
+ """
+ opts,args = self.parse_options(parameter_s,'rq',mode='list')
+ if not args: # List existing macros
+ return sorted(k for k,v in iteritems(self.shell.user_ns) if\
+ isinstance(v, Macro))
+ if len(args) == 1:
+ raise UsageError(
+ "%macro insufficient args; usage '%macro name n1-n2 n3-4...")
+ name, codefrom = args[0], " ".join(args[1:])
+
+ #print 'rng',ranges # dbg
+ try:
+ lines = self.shell.find_user_code(codefrom, 'r' in opts)
+ except (ValueError, TypeError) as e:
+ print(e.args[0])
+ return
+ macro = Macro(lines)
+ self.shell.define_macro(name, macro)
+ if not ( 'q' in opts) :
+ print('Macro `%s` created. To execute, type its name (without quotes).' % name)
+ print('=== Macro contents: ===')
+ print(macro, end=' ')
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument('output', type=str, default='', nargs='?',
+ help="""The name of the variable in which to store output.
+ This is a utils.io.CapturedIO object with stdout/err attributes
+ for the text of the captured output.
+
+ CapturedOutput also has a show() method for displaying the output,
+ and __call__ as well, so you can use that to quickly display the
+ output.
+
+ If unspecified, captured output is discarded.
+ """
+ )
+ @magic_arguments.argument('--no-stderr', action="store_true",
+ help="""Don't capture stderr."""
+ )
+ @magic_arguments.argument('--no-stdout', action="store_true",
+ help="""Don't capture stdout."""
+ )
+ @magic_arguments.argument('--no-display', action="store_true",
+ help="""Don't capture IPython's rich display."""
+ )
+ @cell_magic
+ def capture(self, line, cell):
+ """run the cell, capturing stdout, stderr, and IPython's rich display() calls."""
+ args = magic_arguments.parse_argstring(self.capture, line)
+ out = not args.no_stdout
+ err = not args.no_stderr
+ disp = not args.no_display
+ with capture_output(out, err, disp) as io:
+ self.shell.run_cell(cell)
+ if args.output:
+ self.shell.user_ns[args.output] = io
+
+def parse_breakpoint(text, current_file):
+ '''Returns (file, line) for file:line and (current_file, line) for line'''
+ colon = text.find(':')
+ if colon == -1:
+ return current_file, int(text)
+ else:
+ return text[:colon], int(text[colon+1:])
+
+def _format_time(timespan, precision=3):
+ """Formats the timespan in a human readable form"""
+ import math
+
+ if timespan >= 60.0:
+ # we have more than a minute, format that in a human readable form
+ # Idea from http://snipplr.com/view/5713/
+ parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)]
+ time = []
+ leftover = timespan
+ for suffix, length in parts:
+ value = int(leftover / length)
+ if value > 0:
+ leftover = leftover % length
+ time.append(u'%s%s' % (str(value), suffix))
+ if leftover < 1:
+ break
+ return " ".join(time)
+
+
+ # Unfortunately the unicode 'micro' symbol can cause problems in
+ # certain terminals.
+ # See bug: https://bugs.launchpad.net/ipython/+bug/348466
+ # Try to prevent crashes by being more secure than it needs to
+ # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
+ units = [u"s", u"ms",u'us',"ns"] # the save value
+ if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
+ try:
+ u'\xb5'.encode(sys.stdout.encoding)
+ units = [u"s", u"ms",u'\xb5s',"ns"]
+ except:
+ pass
+ scaling = [1, 1e3, 1e6, 1e9]
+
+ if timespan > 0.0:
+ order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
+ else:
+ order = 3
+ return u"%.*g %s" % (precision, timespan * scaling[order], units[order])
diff --git a/contrib/python/ipython/py2/IPython/core/magics/extension.py b/contrib/python/ipython/py2/IPython/core/magics/extension.py
index cf9a9ab9d1..2991d55ca4 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/extension.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/extension.py
@@ -1,67 +1,67 @@
-"""Implementation of magic functions for the extension machinery.
-"""
-from __future__ import print_function
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012 The IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Stdlib
-import os
-
-# Our own packages
-from IPython.core.error import UsageError
-from IPython.core.magic import Magics, magics_class, line_magic
-from warnings import warn
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-@magics_class
-class ExtensionMagics(Magics):
- """Magics to manage the IPython extensions system."""
-
- @line_magic
- def load_ext(self, module_str):
- """Load an IPython extension by its module name."""
- if not module_str:
- raise UsageError('Missing module name.')
- res = self.shell.extension_manager.load_extension(module_str)
-
- if res == 'already loaded':
- print("The %s extension is already loaded. To reload it, use:" % module_str)
- print(" %reload_ext", module_str)
- elif res == 'no load function':
- print("The %s module is not an IPython extension." % module_str)
-
- @line_magic
- def unload_ext(self, module_str):
- """Unload an IPython extension by its module name.
-
- Not all extensions can be unloaded, only those which define an
- ``unload_ipython_extension`` function.
- """
- if not module_str:
- raise UsageError('Missing module name.')
-
- res = self.shell.extension_manager.unload_extension(module_str)
-
- if res == 'no unload function':
- print("The %s extension doesn't define how to unload it." % module_str)
- elif res == "not loaded":
- print("The %s extension is not loaded." % module_str)
-
- @line_magic
- def reload_ext(self, module_str):
- """Reload an IPython extension by its module name."""
- if not module_str:
- raise UsageError('Missing module name.')
- self.shell.extension_manager.reload_extension(module_str)
+"""Implementation of magic functions for the extension machinery.
+"""
+from __future__ import print_function
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import os
+
+# Our own packages
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+from warnings import warn
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class ExtensionMagics(Magics):
+ """Magics to manage the IPython extensions system."""
+
+ @line_magic
+ def load_ext(self, module_str):
+ """Load an IPython extension by its module name."""
+ if not module_str:
+ raise UsageError('Missing module name.')
+ res = self.shell.extension_manager.load_extension(module_str)
+
+ if res == 'already loaded':
+ print("The %s extension is already loaded. To reload it, use:" % module_str)
+ print(" %reload_ext", module_str)
+ elif res == 'no load function':
+ print("The %s module is not an IPython extension." % module_str)
+
+ @line_magic
+ def unload_ext(self, module_str):
+ """Unload an IPython extension by its module name.
+
+ Not all extensions can be unloaded, only those which define an
+ ``unload_ipython_extension`` function.
+ """
+ if not module_str:
+ raise UsageError('Missing module name.')
+
+ res = self.shell.extension_manager.unload_extension(module_str)
+
+ if res == 'no unload function':
+ print("The %s extension doesn't define how to unload it." % module_str)
+ elif res == "not loaded":
+ print("The %s extension is not loaded." % module_str)
+
+ @line_magic
+ def reload_ext(self, module_str):
+ """Reload an IPython extension by its module name."""
+ if not module_str:
+ raise UsageError('Missing module name.')
+ self.shell.extension_manager.reload_extension(module_str)
diff --git a/contrib/python/ipython/py2/IPython/core/magics/history.py b/contrib/python/ipython/py2/IPython/core/magics/history.py
index 342e641d48..5967591394 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/history.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/history.py
@@ -1,320 +1,320 @@
-"""Implementation of magic functions related to History.
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012, IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-from __future__ import print_function
-
-# Stdlib
-import os
+"""Implementation of magic functions related to History.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012, IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+from __future__ import print_function
+
+# Stdlib
+import os
import sys
-from io import open as io_open
-
-# Our own packages
-from IPython.core.error import StdinNotImplementedError
-from IPython.core.magic import Magics, magics_class, line_magic
-from IPython.core.magic_arguments import (argument, magic_arguments,
- parse_argstring)
-from IPython.testing.skipdoctest import skip_doctest
-from IPython.utils import io
-from IPython.utils.py3compat import cast_unicode_py2
-
-#-----------------------------------------------------------------------------
-# Magics class implementation
-#-----------------------------------------------------------------------------
-
-
-_unspecified = object()
-
-
-@magics_class
-class HistoryMagics(Magics):
-
- @magic_arguments()
- @argument(
- '-n', dest='print_nums', action='store_true', default=False,
- help="""
- print line numbers for each input.
- This feature is only available if numbered prompts are in use.
- """)
- @argument(
- '-o', dest='get_output', action='store_true', default=False,
- help="also print outputs for each input.")
- @argument(
- '-p', dest='pyprompts', action='store_true', default=False,
- help="""
- print classic '>>>' python prompts before each input.
- This is useful for making documentation, and in conjunction
- with -o, for producing doctest-ready output.
- """)
- @argument(
- '-t', dest='raw', action='store_false', default=True,
- help="""
- print the 'translated' history, as IPython understands it.
- IPython filters your input and converts it all into valid Python
- source before executing it (things like magics or aliases are turned
- into function calls, for example). With this option, you'll see the
- native history instead of the user-entered version: '%%cd /' will be
- seen as 'get_ipython().magic("%%cd /")' instead of '%%cd /'.
- """)
- @argument(
- '-f', dest='filename',
- help="""
- FILENAME: instead of printing the output to the screen, redirect
- it to the given file. The file is always overwritten, though *when
- it can*, IPython asks for confirmation first. In particular, running
- the command 'history -f FILENAME' from the IPython Notebook
- interface will replace FILENAME even if it already exists *without*
- confirmation.
- """)
- @argument(
- '-g', dest='pattern', nargs='*', default=None,
- help="""
- treat the arg as a glob pattern to search for in (full) history.
- This includes the saved history (almost all commands ever written).
- The pattern may contain '?' to match one unknown character and '*'
- to match any number of unknown characters. Use '%%hist -g' to show
- full saved history (may be very long).
- """)
- @argument(
- '-l', dest='limit', type=int, nargs='?', default=_unspecified,
- help="""
- get the last n lines from all sessions. Specify n as a single
- arg, or the default is the last 10 lines.
- """)
- @argument(
- '-u', dest='unique', action='store_true',
- help="""
- when searching history using `-g`, show only unique history.
- """)
- @argument('range', nargs='*')
- @skip_doctest
- @line_magic
- def history(self, parameter_s = ''):
- """Print input history (_i<n> variables), with most recent last.
-
- By default, input history is printed without line numbers so it can be
- directly pasted into an editor. Use -n to show them.
-
- By default, all input history from the current session is displayed.
- Ranges of history can be indicated using the syntax:
-
- ``4``
- Line 4, current session
- ``4-6``
- Lines 4-6, current session
- ``243/1-5``
- Lines 1-5, session 243
- ``~2/7``
- Line 7, session 2 before current
- ``~8/1-~6/5``
- From the first line of 8 sessions ago, to the fifth line of 6
- sessions ago.
-
- Multiple ranges can be entered, separated by spaces
-
- The same syntax is used by %macro, %save, %edit, %rerun
-
- Examples
- --------
- ::
-
- In [6]: %history -n 4-6
- 4:a = 12
- 5:print a**2
- 6:%history -n 4-6
-
- """
-
- args = parse_argstring(self.history, parameter_s)
-
- # For brevity
- history_manager = self.shell.history_manager
-
- def _format_lineno(session, line):
- """Helper function to format line numbers properly."""
- if session in (0, history_manager.session_number):
- return str(line)
- return "%s/%s" % (session, line)
-
- # Check if output to specific file was requested.
- outfname = args.filename
- if not outfname:
+from io import open as io_open
+
+# Our own packages
+from IPython.core.error import StdinNotImplementedError
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.core.magic_arguments import (argument, magic_arguments,
+ parse_argstring)
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils import io
+from IPython.utils.py3compat import cast_unicode_py2
+
+#-----------------------------------------------------------------------------
+# Magics class implementation
+#-----------------------------------------------------------------------------
+
+
+_unspecified = object()
+
+
+@magics_class
+class HistoryMagics(Magics):
+
+ @magic_arguments()
+ @argument(
+ '-n', dest='print_nums', action='store_true', default=False,
+ help="""
+ print line numbers for each input.
+ This feature is only available if numbered prompts are in use.
+ """)
+ @argument(
+ '-o', dest='get_output', action='store_true', default=False,
+ help="also print outputs for each input.")
+ @argument(
+ '-p', dest='pyprompts', action='store_true', default=False,
+ help="""
+ print classic '>>>' python prompts before each input.
+ This is useful for making documentation, and in conjunction
+ with -o, for producing doctest-ready output.
+ """)
+ @argument(
+ '-t', dest='raw', action='store_false', default=True,
+ help="""
+ print the 'translated' history, as IPython understands it.
+ IPython filters your input and converts it all into valid Python
+ source before executing it (things like magics or aliases are turned
+ into function calls, for example). With this option, you'll see the
+ native history instead of the user-entered version: '%%cd /' will be
+ seen as 'get_ipython().magic("%%cd /")' instead of '%%cd /'.
+ """)
+ @argument(
+ '-f', dest='filename',
+ help="""
+ FILENAME: instead of printing the output to the screen, redirect
+ it to the given file. The file is always overwritten, though *when
+ it can*, IPython asks for confirmation first. In particular, running
+ the command 'history -f FILENAME' from the IPython Notebook
+ interface will replace FILENAME even if it already exists *without*
+ confirmation.
+ """)
+ @argument(
+ '-g', dest='pattern', nargs='*', default=None,
+ help="""
+ treat the arg as a glob pattern to search for in (full) history.
+ This includes the saved history (almost all commands ever written).
+ The pattern may contain '?' to match one unknown character and '*'
+ to match any number of unknown characters. Use '%%hist -g' to show
+ full saved history (may be very long).
+ """)
+ @argument(
+ '-l', dest='limit', type=int, nargs='?', default=_unspecified,
+ help="""
+ get the last n lines from all sessions. Specify n as a single
+ arg, or the default is the last 10 lines.
+ """)
+ @argument(
+ '-u', dest='unique', action='store_true',
+ help="""
+ when searching history using `-g`, show only unique history.
+ """)
+ @argument('range', nargs='*')
+ @skip_doctest
+ @line_magic
+ def history(self, parameter_s = ''):
+ """Print input history (_i<n> variables), with most recent last.
+
+ By default, input history is printed without line numbers so it can be
+ directly pasted into an editor. Use -n to show them.
+
+ By default, all input history from the current session is displayed.
+ Ranges of history can be indicated using the syntax:
+
+ ``4``
+ Line 4, current session
+ ``4-6``
+ Lines 4-6, current session
+ ``243/1-5``
+ Lines 1-5, session 243
+ ``~2/7``
+ Line 7, session 2 before current
+ ``~8/1-~6/5``
+ From the first line of 8 sessions ago, to the fifth line of 6
+ sessions ago.
+
+ Multiple ranges can be entered, separated by spaces
+
+ The same syntax is used by %macro, %save, %edit, %rerun
+
+ Examples
+ --------
+ ::
+
+ In [6]: %history -n 4-6
+ 4:a = 12
+ 5:print a**2
+ 6:%history -n 4-6
+
+ """
+
+ args = parse_argstring(self.history, parameter_s)
+
+ # For brevity
+ history_manager = self.shell.history_manager
+
+ def _format_lineno(session, line):
+ """Helper function to format line numbers properly."""
+ if session in (0, history_manager.session_number):
+ return str(line)
+ return "%s/%s" % (session, line)
+
+ # Check if output to specific file was requested.
+ outfname = args.filename
+ if not outfname:
outfile = sys.stdout # default
- # We don't want to close stdout at the end!
- close_at_end = False
- else:
- if os.path.exists(outfname):
- try:
- ans = io.ask_yes_no("File %r exists. Overwrite?" % outfname)
- except StdinNotImplementedError:
- ans = True
- if not ans:
- print('Aborting.')
- return
- print("Overwriting file.")
- outfile = io_open(outfname, 'w', encoding='utf-8')
- close_at_end = True
-
- print_nums = args.print_nums
- get_output = args.get_output
- pyprompts = args.pyprompts
- raw = args.raw
-
- pattern = None
- limit = None if args.limit is _unspecified else args.limit
-
- if args.pattern is not None:
- if args.pattern:
- pattern = "*" + " ".join(args.pattern) + "*"
- else:
- pattern = "*"
- hist = history_manager.search(pattern, raw=raw, output=get_output,
- n=limit, unique=args.unique)
- print_nums = True
- elif args.limit is not _unspecified:
- n = 10 if limit is None else limit
- hist = history_manager.get_tail(n, raw=raw, output=get_output)
- else:
- if args.range: # Get history by ranges
- hist = history_manager.get_range_by_str(" ".join(args.range),
- raw, get_output)
- else: # Just get history for the current session
- hist = history_manager.get_range(raw=raw, output=get_output)
-
- # We could be displaying the entire history, so let's not try to pull
- # it into a list in memory. Anything that needs more space will just
- # misalign.
- width = 4
-
- for session, lineno, inline in hist:
- # Print user history with tabs expanded to 4 spaces. The GUI
- # clients use hard tabs for easier usability in auto-indented code,
- # but we want to produce PEP-8 compliant history for safe pasting
- # into an editor.
- if get_output:
- inline, output = inline
- inline = inline.expandtabs(4).rstrip()
-
- multiline = "\n" in inline
- line_sep = '\n' if multiline else ' '
- if print_nums:
- print(u'%s:%s' % (_format_lineno(session, lineno).rjust(width),
- line_sep), file=outfile, end=u'')
- if pyprompts:
- print(u">>> ", end=u"", file=outfile)
- if multiline:
- inline = "\n... ".join(inline.splitlines()) + "\n..."
- print(inline, file=outfile)
- if get_output and output:
- print(cast_unicode_py2(output), file=outfile)
-
- if close_at_end:
- outfile.close()
-
- @line_magic
- def recall(self, arg):
- r"""Repeat a command, or get command to input line for editing.
-
- %recall and %rep are equivalent.
-
- - %recall (no arguments):
-
- Place a string version of last computation result (stored in the
- special '_' variable) to the next input prompt. Allows you to create
- elaborate command lines without using copy-paste::
-
- In[1]: l = ["hei", "vaan"]
- In[2]: "".join(l)
- Out[2]: heivaan
- In[3]: %recall
- In[4]: heivaan_ <== cursor blinking
-
- %recall 45
-
- Place history line 45 on the next input prompt. Use %hist to find
- out the number.
-
- %recall 1-4
-
- Combine the specified lines into one cell, and place it on the next
- input prompt. See %history for the slice syntax.
-
- %recall foo+bar
-
- If foo+bar can be evaluated in the user namespace, the result is
- placed at the next input prompt. Otherwise, the history is searched
- for lines which contain that substring, and the most recent one is
- placed at the next input prompt.
- """
- if not arg: # Last output
- self.shell.set_next_input(str(self.shell.user_ns["_"]))
- return
- # Get history range
- histlines = self.shell.history_manager.get_range_by_str(arg)
- cmd = "\n".join(x[2] for x in histlines)
- if cmd:
- self.shell.set_next_input(cmd.rstrip())
- return
-
- try: # Variable in user namespace
- cmd = str(eval(arg, self.shell.user_ns))
- except Exception: # Search for term in history
- histlines = self.shell.history_manager.search("*"+arg+"*")
- for h in reversed([x[2] for x in histlines]):
- if 'recall' in h or 'rep' in h:
- continue
- self.shell.set_next_input(h.rstrip())
- return
- else:
- self.shell.set_next_input(cmd.rstrip())
- print("Couldn't evaluate or find in history:", arg)
-
- @line_magic
- def rerun(self, parameter_s=''):
- """Re-run previous input
-
- By default, you can specify ranges of input history to be repeated
- (as with %history). With no arguments, it will repeat the last line.
-
- Options:
-
- -l <n> : Repeat the last n lines of input, not including the
- current command.
-
- -g foo : Repeat the most recent line which contains foo
- """
- opts, args = self.parse_options(parameter_s, 'l:g:', mode='string')
- if "l" in opts: # Last n lines
- n = int(opts['l'])
- hist = self.shell.history_manager.get_tail(n)
- elif "g" in opts: # Search
- p = "*"+opts['g']+"*"
- hist = list(self.shell.history_manager.search(p))
- for l in reversed(hist):
- if "rerun" not in l[2]:
- hist = [l] # The last match which isn't a %rerun
- break
- else:
- hist = [] # No matches except %rerun
- elif args: # Specify history ranges
- hist = self.shell.history_manager.get_range_by_str(args)
- else: # Last line
- hist = self.shell.history_manager.get_tail(1)
- hist = [x[2] for x in hist]
- if not hist:
- print("No lines in history match specification")
- return
- histlines = "\n".join(hist)
- print("=== Executing: ===")
- print(histlines)
- print("=== Output: ===")
- self.shell.run_cell("\n".join(hist), store_history=False)
+ # We don't want to close stdout at the end!
+ close_at_end = False
+ else:
+ if os.path.exists(outfname):
+ try:
+ ans = io.ask_yes_no("File %r exists. Overwrite?" % outfname)
+ except StdinNotImplementedError:
+ ans = True
+ if not ans:
+ print('Aborting.')
+ return
+ print("Overwriting file.")
+ outfile = io_open(outfname, 'w', encoding='utf-8')
+ close_at_end = True
+
+ print_nums = args.print_nums
+ get_output = args.get_output
+ pyprompts = args.pyprompts
+ raw = args.raw
+
+ pattern = None
+ limit = None if args.limit is _unspecified else args.limit
+
+ if args.pattern is not None:
+ if args.pattern:
+ pattern = "*" + " ".join(args.pattern) + "*"
+ else:
+ pattern = "*"
+ hist = history_manager.search(pattern, raw=raw, output=get_output,
+ n=limit, unique=args.unique)
+ print_nums = True
+ elif args.limit is not _unspecified:
+ n = 10 if limit is None else limit
+ hist = history_manager.get_tail(n, raw=raw, output=get_output)
+ else:
+ if args.range: # Get history by ranges
+ hist = history_manager.get_range_by_str(" ".join(args.range),
+ raw, get_output)
+ else: # Just get history for the current session
+ hist = history_manager.get_range(raw=raw, output=get_output)
+
+ # We could be displaying the entire history, so let's not try to pull
+ # it into a list in memory. Anything that needs more space will just
+ # misalign.
+ width = 4
+
+ for session, lineno, inline in hist:
+ # Print user history with tabs expanded to 4 spaces. The GUI
+ # clients use hard tabs for easier usability in auto-indented code,
+ # but we want to produce PEP-8 compliant history for safe pasting
+ # into an editor.
+ if get_output:
+ inline, output = inline
+ inline = inline.expandtabs(4).rstrip()
+
+ multiline = "\n" in inline
+ line_sep = '\n' if multiline else ' '
+ if print_nums:
+ print(u'%s:%s' % (_format_lineno(session, lineno).rjust(width),
+ line_sep), file=outfile, end=u'')
+ if pyprompts:
+ print(u">>> ", end=u"", file=outfile)
+ if multiline:
+ inline = "\n... ".join(inline.splitlines()) + "\n..."
+ print(inline, file=outfile)
+ if get_output and output:
+ print(cast_unicode_py2(output), file=outfile)
+
+ if close_at_end:
+ outfile.close()
+
+ @line_magic
+ def recall(self, arg):
+ r"""Repeat a command, or get command to input line for editing.
+
+ %recall and %rep are equivalent.
+
+ - %recall (no arguments):
+
+ Place a string version of last computation result (stored in the
+ special '_' variable) to the next input prompt. Allows you to create
+ elaborate command lines without using copy-paste::
+
+ In[1]: l = ["hei", "vaan"]
+ In[2]: "".join(l)
+ Out[2]: heivaan
+ In[3]: %recall
+ In[4]: heivaan_ <== cursor blinking
+
+ %recall 45
+
+ Place history line 45 on the next input prompt. Use %hist to find
+ out the number.
+
+ %recall 1-4
+
+ Combine the specified lines into one cell, and place it on the next
+ input prompt. See %history for the slice syntax.
+
+ %recall foo+bar
+
+ If foo+bar can be evaluated in the user namespace, the result is
+ placed at the next input prompt. Otherwise, the history is searched
+ for lines which contain that substring, and the most recent one is
+ placed at the next input prompt.
+ """
+ if not arg: # Last output
+ self.shell.set_next_input(str(self.shell.user_ns["_"]))
+ return
+ # Get history range
+ histlines = self.shell.history_manager.get_range_by_str(arg)
+ cmd = "\n".join(x[2] for x in histlines)
+ if cmd:
+ self.shell.set_next_input(cmd.rstrip())
+ return
+
+ try: # Variable in user namespace
+ cmd = str(eval(arg, self.shell.user_ns))
+ except Exception: # Search for term in history
+ histlines = self.shell.history_manager.search("*"+arg+"*")
+ for h in reversed([x[2] for x in histlines]):
+ if 'recall' in h or 'rep' in h:
+ continue
+ self.shell.set_next_input(h.rstrip())
+ return
+ else:
+ self.shell.set_next_input(cmd.rstrip())
+ print("Couldn't evaluate or find in history:", arg)
+
+ @line_magic
+ def rerun(self, parameter_s=''):
+ """Re-run previous input
+
+ By default, you can specify ranges of input history to be repeated
+ (as with %history). With no arguments, it will repeat the last line.
+
+ Options:
+
+ -l <n> : Repeat the last n lines of input, not including the
+ current command.
+
+ -g foo : Repeat the most recent line which contains foo
+ """
+ opts, args = self.parse_options(parameter_s, 'l:g:', mode='string')
+ if "l" in opts: # Last n lines
+ n = int(opts['l'])
+ hist = self.shell.history_manager.get_tail(n)
+ elif "g" in opts: # Search
+ p = "*"+opts['g']+"*"
+ hist = list(self.shell.history_manager.search(p))
+ for l in reversed(hist):
+ if "rerun" not in l[2]:
+ hist = [l] # The last match which isn't a %rerun
+ break
+ else:
+ hist = [] # No matches except %rerun
+ elif args: # Specify history ranges
+ hist = self.shell.history_manager.get_range_by_str(args)
+ else: # Last line
+ hist = self.shell.history_manager.get_tail(1)
+ hist = [x[2] for x in hist]
+ if not hist:
+ print("No lines in history match specification")
+ return
+ histlines = "\n".join(hist)
+ print("=== Executing: ===")
+ print(histlines)
+ print("=== Output: ===")
+ self.shell.run_cell("\n".join(hist), store_history=False)
diff --git a/contrib/python/ipython/py2/IPython/core/magics/logging.py b/contrib/python/ipython/py2/IPython/core/magics/logging.py
index 0fafdeff6b..90214ab54a 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/logging.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/logging.py
@@ -1,184 +1,184 @@
-"""Implementation of magic functions for IPython's own logging.
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012 The IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Stdlib
-import os
-import sys
-
-# Our own packages
-from IPython.core.magic import Magics, magics_class, line_magic
+"""Implementation of magic functions for IPython's own logging.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import os
+import sys
+
+# Our own packages
+from IPython.core.magic import Magics, magics_class, line_magic
from warnings import warn
-from IPython.utils.py3compat import str_to_unicode
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-@magics_class
-class LoggingMagics(Magics):
- """Magics related to all logging machinery."""
-
- @line_magic
- def logstart(self, parameter_s=''):
- """Start logging anywhere in a session.
-
- %logstart [-o|-r|-t] [log_name [log_mode]]
-
- If no name is given, it defaults to a file named 'ipython_log.py' in your
- current directory, in 'rotate' mode (see below).
-
- '%logstart name' saves to file 'name' in 'backup' mode. It saves your
- history up to that point and then continues logging.
-
- %logstart takes a second optional parameter: logging mode. This can be one
- of (note that the modes are given unquoted):
-
- append
- Keep logging at the end of any existing file.
-
- backup
- Rename any existing file to name~ and start name.
-
- global
- Append to a single logfile in your home directory.
-
- over
- Overwrite any existing log.
-
- rotate
- Create rotating logs: name.1~, name.2~, etc.
-
- Options:
-
- -o
- log also IPython's output. In this mode, all commands which
- generate an Out[NN] prompt are recorded to the logfile, right after
- their corresponding input line. The output lines are always
- prepended with a '#[Out]# ' marker, so that the log remains valid
- Python code.
-
- Since this marker is always the same, filtering only the output from
- a log is very easy, using for example a simple awk call::
-
- awk -F'#\\[Out\\]# ' '{if($2) {print $2}}' ipython_log.py
-
- -r
- log 'raw' input. Normally, IPython's logs contain the processed
- input, so that user lines are logged in their final form, converted
- into valid Python. For example, %Exit is logged as
- _ip.magic("Exit"). If the -r flag is given, all input is logged
- exactly as typed, with no transformations applied.
-
- -t
- put timestamps before each input line logged (these are put in
- comments).
- """
-
- opts,par = self.parse_options(parameter_s,'ort')
- log_output = 'o' in opts
- log_raw_input = 'r' in opts
- timestamp = 't' in opts
-
- logger = self.shell.logger
-
- # if no args are given, the defaults set in the logger constructor by
- # ipython remain valid
- if par:
- try:
- logfname,logmode = par.split()
- except:
- logfname = par
- logmode = 'backup'
- else:
- logfname = logger.logfname
- logmode = logger.logmode
- # put logfname into rc struct as if it had been called on the command
- # line, so it ends up saved in the log header Save it in case we need
- # to restore it...
- old_logfile = self.shell.logfile
- if logfname:
- logfname = os.path.expanduser(logfname)
- self.shell.logfile = logfname
-
- loghead = u'# IPython log file\n\n'
- try:
- logger.logstart(logfname, loghead, logmode, log_output, timestamp,
- log_raw_input)
- except:
- self.shell.logfile = old_logfile
- warn("Couldn't start log: %s" % sys.exc_info()[1])
- else:
- # log input history up to this point, optionally interleaving
- # output if requested
-
- if timestamp:
- # disable timestamping for the previous history, since we've
- # lost those already (no time machine here).
- logger.timestamp = False
-
- if log_raw_input:
- input_hist = self.shell.history_manager.input_hist_raw
- else:
- input_hist = self.shell.history_manager.input_hist_parsed
-
- if log_output:
- log_write = logger.log_write
- output_hist = self.shell.history_manager.output_hist
- for n in range(1,len(input_hist)-1):
- log_write(input_hist[n].rstrip() + u'\n')
- if n in output_hist:
- log_write(str_to_unicode(repr(output_hist[n])),'output')
- else:
- logger.log_write(u'\n'.join(input_hist[1:]))
- logger.log_write(u'\n')
- if timestamp:
- # re-enable timestamping
- logger.timestamp = True
-
- print ('Activating auto-logging. '
- 'Current session state plus future input saved.')
- logger.logstate()
-
- @line_magic
- def logstop(self, parameter_s=''):
- """Fully stop logging and close log file.
-
- In order to start logging again, a new %logstart call needs to be made,
- possibly (though not necessarily) with a new filename, mode and other
- options."""
- self.shell.logger.logstop()
-
- @line_magic
- def logoff(self, parameter_s=''):
- """Temporarily stop logging.
-
- You must have previously started logging."""
- self.shell.logger.switch_log(0)
-
- @line_magic
- def logon(self, parameter_s=''):
- """Restart logging.
-
- This function is for restarting logging which you've temporarily
- stopped with %logoff. For starting logging for the first time, you
- must use the %logstart function, which allows you to specify an
- optional log filename."""
-
- self.shell.logger.switch_log(1)
-
- @line_magic
- def logstate(self, parameter_s=''):
- """Print the status of the logging system."""
-
- self.shell.logger.logstate()
+from IPython.utils.py3compat import str_to_unicode
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class LoggingMagics(Magics):
+ """Magics related to all logging machinery."""
+
+ @line_magic
+ def logstart(self, parameter_s=''):
+ """Start logging anywhere in a session.
+
+ %logstart [-o|-r|-t] [log_name [log_mode]]
+
+ If no name is given, it defaults to a file named 'ipython_log.py' in your
+ current directory, in 'rotate' mode (see below).
+
+ '%logstart name' saves to file 'name' in 'backup' mode. It saves your
+ history up to that point and then continues logging.
+
+ %logstart takes a second optional parameter: logging mode. This can be one
+ of (note that the modes are given unquoted):
+
+ append
+ Keep logging at the end of any existing file.
+
+ backup
+ Rename any existing file to name~ and start name.
+
+ global
+ Append to a single logfile in your home directory.
+
+ over
+ Overwrite any existing log.
+
+ rotate
+ Create rotating logs: name.1~, name.2~, etc.
+
+ Options:
+
+ -o
+ log also IPython's output. In this mode, all commands which
+ generate an Out[NN] prompt are recorded to the logfile, right after
+ their corresponding input line. The output lines are always
+ prepended with a '#[Out]# ' marker, so that the log remains valid
+ Python code.
+
+ Since this marker is always the same, filtering only the output from
+ a log is very easy, using for example a simple awk call::
+
+ awk -F'#\\[Out\\]# ' '{if($2) {print $2}}' ipython_log.py
+
+ -r
+ log 'raw' input. Normally, IPython's logs contain the processed
+ input, so that user lines are logged in their final form, converted
+ into valid Python. For example, %Exit is logged as
+ _ip.magic("Exit"). If the -r flag is given, all input is logged
+ exactly as typed, with no transformations applied.
+
+ -t
+ put timestamps before each input line logged (these are put in
+ comments).
+ """
+
+ opts,par = self.parse_options(parameter_s,'ort')
+ log_output = 'o' in opts
+ log_raw_input = 'r' in opts
+ timestamp = 't' in opts
+
+ logger = self.shell.logger
+
+ # if no args are given, the defaults set in the logger constructor by
+ # ipython remain valid
+ if par:
+ try:
+ logfname,logmode = par.split()
+ except:
+ logfname = par
+ logmode = 'backup'
+ else:
+ logfname = logger.logfname
+ logmode = logger.logmode
+ # put logfname into rc struct as if it had been called on the command
+ # line, so it ends up saved in the log header Save it in case we need
+ # to restore it...
+ old_logfile = self.shell.logfile
+ if logfname:
+ logfname = os.path.expanduser(logfname)
+ self.shell.logfile = logfname
+
+ loghead = u'# IPython log file\n\n'
+ try:
+ logger.logstart(logfname, loghead, logmode, log_output, timestamp,
+ log_raw_input)
+ except:
+ self.shell.logfile = old_logfile
+ warn("Couldn't start log: %s" % sys.exc_info()[1])
+ else:
+ # log input history up to this point, optionally interleaving
+ # output if requested
+
+ if timestamp:
+ # disable timestamping for the previous history, since we've
+ # lost those already (no time machine here).
+ logger.timestamp = False
+
+ if log_raw_input:
+ input_hist = self.shell.history_manager.input_hist_raw
+ else:
+ input_hist = self.shell.history_manager.input_hist_parsed
+
+ if log_output:
+ log_write = logger.log_write
+ output_hist = self.shell.history_manager.output_hist
+ for n in range(1,len(input_hist)-1):
+ log_write(input_hist[n].rstrip() + u'\n')
+ if n in output_hist:
+ log_write(str_to_unicode(repr(output_hist[n])),'output')
+ else:
+ logger.log_write(u'\n'.join(input_hist[1:]))
+ logger.log_write(u'\n')
+ if timestamp:
+ # re-enable timestamping
+ logger.timestamp = True
+
+ print ('Activating auto-logging. '
+ 'Current session state plus future input saved.')
+ logger.logstate()
+
+ @line_magic
+ def logstop(self, parameter_s=''):
+ """Fully stop logging and close log file.
+
+ In order to start logging again, a new %logstart call needs to be made,
+ possibly (though not necessarily) with a new filename, mode and other
+ options."""
+ self.shell.logger.logstop()
+
+ @line_magic
+ def logoff(self, parameter_s=''):
+ """Temporarily stop logging.
+
+ You must have previously started logging."""
+ self.shell.logger.switch_log(0)
+
+ @line_magic
+ def logon(self, parameter_s=''):
+ """Restart logging.
+
+ This function is for restarting logging which you've temporarily
+ stopped with %logoff. For starting logging for the first time, you
+ must use the %logstart function, which allows you to specify an
+ optional log filename."""
+
+ self.shell.logger.switch_log(1)
+
+ @line_magic
+ def logstate(self, parameter_s=''):
+ """Print the status of the logging system."""
+
+ self.shell.logger.logstate()
diff --git a/contrib/python/ipython/py2/IPython/core/magics/namespace.py b/contrib/python/ipython/py2/IPython/core/magics/namespace.py
index fafecb191b..c02b38716b 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/namespace.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/namespace.py
@@ -1,704 +1,704 @@
-"""Implementation of namespace-related magic functions.
-"""
-from __future__ import print_function
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012 The IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Stdlib
-import gc
-import re
-import sys
-
-# Our own packages
-from IPython.core import page
-from IPython.core.error import StdinNotImplementedError, UsageError
-from IPython.core.magic import Magics, magics_class, line_magic
-from IPython.testing.skipdoctest import skip_doctest
-from IPython.utils.encoding import DEFAULT_ENCODING
-from IPython.utils.openpy import read_py_file
-from IPython.utils.path import get_py_filename
-from IPython.utils.py3compat import unicode_type
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-@magics_class
-class NamespaceMagics(Magics):
- """Magics to manage various aspects of the user's namespace.
-
- These include listing variables, introspecting into them, etc.
- """
-
- @line_magic
- def pinfo(self, parameter_s='', namespaces=None):
- """Provide detailed information about an object.
-
- '%pinfo object' is just a synonym for object? or ?object."""
-
- #print 'pinfo par: <%s>' % parameter_s # dbg
- # detail_level: 0 -> obj? , 1 -> obj??
- detail_level = 0
- # We need to detect if we got called as 'pinfo pinfo foo', which can
- # happen if the user types 'pinfo foo?' at the cmd line.
- pinfo,qmark1,oname,qmark2 = \
- re.match('(pinfo )?(\?*)(.*?)(\??$)',parameter_s).groups()
- if pinfo or qmark1 or qmark2:
- detail_level = 1
- if "*" in oname:
- self.psearch(oname)
- else:
- self.shell._inspect('pinfo', oname, detail_level=detail_level,
- namespaces=namespaces)
-
- @line_magic
- def pinfo2(self, parameter_s='', namespaces=None):
- """Provide extra detailed information about an object.
-
- '%pinfo2 object' is just a synonym for object?? or ??object."""
- self.shell._inspect('pinfo', parameter_s, detail_level=1,
- namespaces=namespaces)
-
- @skip_doctest
- @line_magic
- def pdef(self, parameter_s='', namespaces=None):
- """Print the call signature for any callable object.
-
- If the object is a class, print the constructor information.
-
- Examples
- --------
- ::
-
- In [3]: %pdef urllib.urlopen
- urllib.urlopen(url, data=None, proxies=None)
- """
- self.shell._inspect('pdef',parameter_s, namespaces)
-
- @line_magic
- def pdoc(self, parameter_s='', namespaces=None):
- """Print the docstring for an object.
-
- If the given object is a class, it will print both the class and the
- constructor docstrings."""
- self.shell._inspect('pdoc',parameter_s, namespaces)
-
- @line_magic
- def psource(self, parameter_s='', namespaces=None):
- """Print (or run through pager) the source code for an object."""
- if not parameter_s:
- raise UsageError('Missing object name.')
- self.shell._inspect('psource',parameter_s, namespaces)
-
- @line_magic
- def pfile(self, parameter_s='', namespaces=None):
- """Print (or run through pager) the file where an object is defined.
-
- The file opens at the line where the object definition begins. IPython
- will honor the environment variable PAGER if set, and otherwise will
- do its best to print the file in a convenient form.
-
- If the given argument is not an object currently defined, IPython will
- try to interpret it as a filename (automatically adding a .py extension
- if needed). You can thus use %pfile as a syntax highlighting code
- viewer."""
-
- # first interpret argument as an object name
- out = self.shell._inspect('pfile',parameter_s, namespaces)
- # if not, try the input as a filename
- if out == 'not found':
- try:
- filename = get_py_filename(parameter_s)
- except IOError as msg:
- print(msg)
- return
- page.page(self.shell.pycolorize(read_py_file(filename, skip_encoding_cookie=False)))
-
- @line_magic
- def psearch(self, parameter_s=''):
- """Search for object in namespaces by wildcard.
-
- %psearch [options] PATTERN [OBJECT TYPE]
-
- Note: ? can be used as a synonym for %psearch, at the beginning or at
- the end: both a*? and ?a* are equivalent to '%psearch a*'. Still, the
- rest of the command line must be unchanged (options come first), so
- for example the following forms are equivalent
-
- %psearch -i a* function
- -i a* function?
- ?-i a* function
-
- Arguments:
-
- PATTERN
-
- where PATTERN is a string containing * as a wildcard similar to its
- use in a shell. The pattern is matched in all namespaces on the
- search path. By default objects starting with a single _ are not
- matched, many IPython generated objects have a single
- underscore. The default is case insensitive matching. Matching is
- also done on the attributes of objects and not only on the objects
- in a module.
-
- [OBJECT TYPE]
-
- Is the name of a python type from the types module. The name is
- given in lowercase without the ending type, ex. StringType is
- written string. By adding a type here only objects matching the
- given type are matched. Using all here makes the pattern match all
- types (this is the default).
-
- Options:
-
- -a: makes the pattern match even objects whose names start with a
- single underscore. These names are normally omitted from the
- search.
-
- -i/-c: make the pattern case insensitive/sensitive. If neither of
- these options are given, the default is read from your configuration
- file, with the option ``InteractiveShell.wildcards_case_sensitive``.
- If this option is not specified in your configuration file, IPython's
- internal default is to do a case sensitive search.
-
- -e/-s NAMESPACE: exclude/search a given namespace. The pattern you
- specify can be searched in any of the following namespaces:
- 'builtin', 'user', 'user_global','internal', 'alias', where
- 'builtin' and 'user' are the search defaults. Note that you should
- not use quotes when specifying namespaces.
-
- 'Builtin' contains the python module builtin, 'user' contains all
- user data, 'alias' only contain the shell aliases and no python
- objects, 'internal' contains objects used by IPython. The
- 'user_global' namespace is only used by embedded IPython instances,
- and it contains module-level globals. You can add namespaces to the
- search with -s or exclude them with -e (these options can be given
- more than once).
-
- Examples
- --------
- ::
-
- %psearch a* -> objects beginning with an a
- %psearch -e builtin a* -> objects NOT in the builtin space starting in a
- %psearch a* function -> all functions beginning with an a
- %psearch re.e* -> objects beginning with an e in module re
- %psearch r*.e* -> objects that start with e in modules starting in r
- %psearch r*.* string -> all strings in modules beginning with r
-
- Case sensitive search::
-
- %psearch -c a* list all object beginning with lower case a
-
- Show objects beginning with a single _::
-
- %psearch -a _* list objects beginning with a single underscore
- """
- try:
- parameter_s.encode('ascii')
- except UnicodeEncodeError:
- print('Python identifiers can only contain ascii characters.')
- return
-
- # default namespaces to be searched
- def_search = ['user_local', 'user_global', 'builtin']
-
- # Process options/args
- opts,args = self.parse_options(parameter_s,'cias:e:',list_all=True)
- opt = opts.get
- shell = self.shell
- psearch = shell.inspector.psearch
-
- # select case options
- if 'i' in opts:
- ignore_case = True
- elif 'c' in opts:
- ignore_case = False
- else:
- ignore_case = not shell.wildcards_case_sensitive
-
- # Build list of namespaces to search from user options
- def_search.extend(opt('s',[]))
- ns_exclude = ns_exclude=opt('e',[])
- ns_search = [nm for nm in def_search if nm not in ns_exclude]
-
- # Call the actual search
- try:
- psearch(args,shell.ns_table,ns_search,
- show_all=opt('a'),ignore_case=ignore_case)
- except:
- shell.showtraceback()
-
- @skip_doctest
- @line_magic
- def who_ls(self, parameter_s=''):
- """Return a sorted list of all interactive variables.
-
- If arguments are given, only variables of types matching these
- arguments are returned.
-
- Examples
- --------
-
- Define two variables and list them with who_ls::
-
- In [1]: alpha = 123
-
- In [2]: beta = 'test'
-
- In [3]: %who_ls
- Out[3]: ['alpha', 'beta']
-
- In [4]: %who_ls int
- Out[4]: ['alpha']
-
- In [5]: %who_ls str
- Out[5]: ['beta']
- """
-
- user_ns = self.shell.user_ns
- user_ns_hidden = self.shell.user_ns_hidden
- nonmatching = object() # This can never be in user_ns
- out = [ i for i in user_ns
- if not i.startswith('_') \
- and (user_ns[i] is not user_ns_hidden.get(i, nonmatching)) ]
-
- typelist = parameter_s.split()
- if typelist:
- typeset = set(typelist)
- out = [i for i in out if type(user_ns[i]).__name__ in typeset]
-
- out.sort()
- return out
-
- @skip_doctest
- @line_magic
- def who(self, parameter_s=''):
- """Print all interactive variables, with some minimal formatting.
-
- If any arguments are given, only variables whose type matches one of
- these are printed. For example::
-
- %who function str
-
- will only list functions and strings, excluding all other types of
- variables. To find the proper type names, simply use type(var) at a
- command line to see how python prints type names. For example:
-
- ::
-
- In [1]: type('hello')\\
- Out[1]: <type 'str'>
-
- indicates that the type name for strings is 'str'.
-
- ``%who`` always excludes executed names loaded through your configuration
- file and things which are internal to IPython.
-
- This is deliberate, as typically you may load many modules and the
- purpose of %who is to show you only what you've manually defined.
-
- Examples
- --------
-
- Define two variables and list them with who::
-
- In [1]: alpha = 123
-
- In [2]: beta = 'test'
-
- In [3]: %who
- alpha beta
-
- In [4]: %who int
- alpha
-
- In [5]: %who str
- beta
- """
-
- varlist = self.who_ls(parameter_s)
- if not varlist:
- if parameter_s:
- print('No variables match your requested type.')
- else:
- print('Interactive namespace is empty.')
- return
-
- # if we have variables, move on...
- count = 0
- for i in varlist:
- print(i+'\t', end=' ')
- count += 1
- if count > 8:
- count = 0
- print()
- print()
-
- @skip_doctest
- @line_magic
- def whos(self, parameter_s=''):
- """Like %who, but gives some extra information about each variable.
-
- The same type filtering of %who can be applied here.
-
- For all variables, the type is printed. Additionally it prints:
-
- - For {},[],(): their length.
-
- - For numpy arrays, a summary with shape, number of
- elements, typecode and size in memory.
-
- - Everything else: a string representation, snipping their middle if
- too long.
-
- Examples
- --------
-
- Define two variables and list them with whos::
-
- In [1]: alpha = 123
-
- In [2]: beta = 'test'
-
- In [3]: %whos
- Variable Type Data/Info
- --------------------------------
- alpha int 123
- beta str test
- """
-
- varnames = self.who_ls(parameter_s)
- if not varnames:
- if parameter_s:
- print('No variables match your requested type.')
- else:
- print('Interactive namespace is empty.')
- return
-
- # if we have variables, move on...
-
- # for these types, show len() instead of data:
- seq_types = ['dict', 'list', 'tuple']
-
- # for numpy arrays, display summary info
- ndarray_type = None
- if 'numpy' in sys.modules:
- try:
- from numpy import ndarray
- except ImportError:
- pass
- else:
- ndarray_type = ndarray.__name__
-
- # Find all variable names and types so we can figure out column sizes
-
- # some types are well known and can be shorter
- abbrevs = {'IPython.core.macro.Macro' : 'Macro'}
- def type_name(v):
- tn = type(v).__name__
- return abbrevs.get(tn,tn)
-
- varlist = [self.shell.user_ns[n] for n in varnames]
-
- typelist = []
- for vv in varlist:
- tt = type_name(vv)
-
- if tt=='instance':
- typelist.append( abbrevs.get(str(vv.__class__),
- str(vv.__class__)))
- else:
- typelist.append(tt)
-
- # column labels and # of spaces as separator
- varlabel = 'Variable'
- typelabel = 'Type'
- datalabel = 'Data/Info'
- colsep = 3
- # variable format strings
- vformat = "{0:<{varwidth}}{1:<{typewidth}}"
- aformat = "%s: %s elems, type `%s`, %s bytes"
- # find the size of the columns to format the output nicely
- varwidth = max(max(map(len,varnames)), len(varlabel)) + colsep
- typewidth = max(max(map(len,typelist)), len(typelabel)) + colsep
- # table header
- print(varlabel.ljust(varwidth) + typelabel.ljust(typewidth) + \
- ' '+datalabel+'\n' + '-'*(varwidth+typewidth+len(datalabel)+1))
- # and the table itself
- kb = 1024
- Mb = 1048576 # kb**2
- for vname,var,vtype in zip(varnames,varlist,typelist):
- print(vformat.format(vname, vtype, varwidth=varwidth, typewidth=typewidth), end=' ')
- if vtype in seq_types:
- print("n="+str(len(var)))
- elif vtype == ndarray_type:
- vshape = str(var.shape).replace(',','').replace(' ','x')[1:-1]
- if vtype==ndarray_type:
- # numpy
- vsize = var.size
- vbytes = vsize*var.itemsize
- vdtype = var.dtype
-
- if vbytes < 100000:
- print(aformat % (vshape, vsize, vdtype, vbytes))
- else:
- print(aformat % (vshape, vsize, vdtype, vbytes), end=' ')
- if vbytes < Mb:
- print('(%s kb)' % (vbytes/kb,))
- else:
- print('(%s Mb)' % (vbytes/Mb,))
- else:
- try:
- vstr = str(var)
- except UnicodeEncodeError:
- vstr = unicode_type(var).encode(DEFAULT_ENCODING,
- 'backslashreplace')
- except:
- vstr = "<object with id %d (str() failed)>" % id(var)
- vstr = vstr.replace('\n', '\\n')
- if len(vstr) < 50:
- print(vstr)
- else:
- print(vstr[:25] + "<...>" + vstr[-25:])
-
- @line_magic
- def reset(self, parameter_s=''):
- """Resets the namespace by removing all names defined by the user, if
- called without arguments, or by removing some types of objects, such
- as everything currently in IPython's In[] and Out[] containers (see
- the parameters for details).
-
- Parameters
- ----------
- -f : force reset without asking for confirmation.
-
- -s : 'Soft' reset: Only clears your namespace, leaving history intact.
- References to objects may be kept. By default (without this option),
- we do a 'hard' reset, giving you a new session and removing all
- references to objects from the current session.
-
- in : reset input history
-
- out : reset output history
-
- dhist : reset directory history
-
- array : reset only variables that are NumPy arrays
-
- See Also
- --------
- reset_selective : invoked as ``%reset_selective``
-
- Examples
- --------
- ::
-
- In [6]: a = 1
-
- In [7]: a
- Out[7]: 1
-
- In [8]: 'a' in _ip.user_ns
- Out[8]: True
-
- In [9]: %reset -f
-
- In [1]: 'a' in _ip.user_ns
- Out[1]: False
-
- In [2]: %reset -f in
- Flushing input history
-
- In [3]: %reset -f dhist in
- Flushing directory history
- Flushing input history
-
- Notes
- -----
- Calling this magic from clients that do not implement standard input,
- such as the ipython notebook interface, will reset the namespace
- without confirmation.
- """
- opts, args = self.parse_options(parameter_s,'sf', mode='list')
- if 'f' in opts:
- ans = True
- else:
- try:
- ans = self.shell.ask_yes_no(
- "Once deleted, variables cannot be recovered. Proceed (y/[n])?",
- default='n')
- except StdinNotImplementedError:
- ans = True
- if not ans:
- print('Nothing done.')
- return
-
- if 's' in opts: # Soft reset
- user_ns = self.shell.user_ns
- for i in self.who_ls():
- del(user_ns[i])
- elif len(args) == 0: # Hard reset
- self.shell.reset(new_session = False)
-
- # reset in/out/dhist/array: previously extensinions/clearcmd.py
- ip = self.shell
- user_ns = self.shell.user_ns # local lookup, heavily used
-
- for target in args:
- target = target.lower() # make matches case insensitive
- if target == 'out':
- print("Flushing output cache (%d entries)" % len(user_ns['_oh']))
- self.shell.displayhook.flush()
-
- elif target == 'in':
- print("Flushing input history")
- pc = self.shell.displayhook.prompt_count + 1
- for n in range(1, pc):
- key = '_i'+repr(n)
- user_ns.pop(key,None)
- user_ns.update(dict(_i=u'',_ii=u'',_iii=u''))
- hm = ip.history_manager
- # don't delete these, as %save and %macro depending on the
- # length of these lists to be preserved
- hm.input_hist_parsed[:] = [''] * pc
- hm.input_hist_raw[:] = [''] * pc
- # hm has internal machinery for _i,_ii,_iii, clear it out
- hm._i = hm._ii = hm._iii = hm._i00 = u''
-
- elif target == 'array':
- # Support cleaning up numpy arrays
- try:
- from numpy import ndarray
- # This must be done with items and not iteritems because
- # we're going to modify the dict in-place.
- for x,val in list(user_ns.items()):
- if isinstance(val,ndarray):
- del user_ns[x]
- except ImportError:
- print("reset array only works if Numpy is available.")
-
- elif target == 'dhist':
- print("Flushing directory history")
- del user_ns['_dh'][:]
-
- else:
- print("Don't know how to reset ", end=' ')
- print(target + ", please run `%reset?` for details")
-
- gc.collect()
-
- @line_magic
- def reset_selective(self, parameter_s=''):
- """Resets the namespace by removing names defined by the user.
-
- Input/Output history are left around in case you need them.
-
- %reset_selective [-f] regex
-
- No action is taken if regex is not included
-
- Options
- -f : force reset without asking for confirmation.
-
- See Also
- --------
- reset : invoked as ``%reset``
-
- Examples
- --------
-
- We first fully reset the namespace so your output looks identical to
- this example for pedagogical reasons; in practice you do not need a
- full reset::
-
- In [1]: %reset -f
-
- Now, with a clean namespace we can make a few variables and use
- ``%reset_selective`` to only delete names that match our regexp::
-
- In [2]: a=1; b=2; c=3; b1m=4; b2m=5; b3m=6; b4m=7; b2s=8
-
- In [3]: who_ls
- Out[3]: ['a', 'b', 'b1m', 'b2m', 'b2s', 'b3m', 'b4m', 'c']
-
- In [4]: %reset_selective -f b[2-3]m
-
- In [5]: who_ls
- Out[5]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
-
- In [6]: %reset_selective -f d
-
- In [7]: who_ls
- Out[7]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
-
- In [8]: %reset_selective -f c
-
- In [9]: who_ls
- Out[9]: ['a', 'b', 'b1m', 'b2s', 'b4m']
-
- In [10]: %reset_selective -f b
-
- In [11]: who_ls
- Out[11]: ['a']
-
- Notes
- -----
- Calling this magic from clients that do not implement standard input,
- such as the ipython notebook interface, will reset the namespace
- without confirmation.
- """
-
- opts, regex = self.parse_options(parameter_s,'f')
-
- if 'f' in opts:
- ans = True
- else:
- try:
- ans = self.shell.ask_yes_no(
- "Once deleted, variables cannot be recovered. Proceed (y/[n])? ",
- default='n')
- except StdinNotImplementedError:
- ans = True
- if not ans:
- print('Nothing done.')
- return
- user_ns = self.shell.user_ns
- if not regex:
- print('No regex pattern specified. Nothing done.')
- return
- else:
- try:
- m = re.compile(regex)
- except TypeError:
- raise TypeError('regex must be a string or compiled pattern')
- for i in self.who_ls():
- if m.search(i):
- del(user_ns[i])
-
- @line_magic
- def xdel(self, parameter_s=''):
- """Delete a variable, trying to clear it from anywhere that
- IPython's machinery has references to it. By default, this uses
- the identity of the named object in the user namespace to remove
- references held under other names. The object is also removed
- from the output history.
-
- Options
- -n : Delete the specified name from all namespaces, without
- checking their identity.
- """
- opts, varname = self.parse_options(parameter_s,'n')
- try:
- self.shell.del_var(varname, ('n' in opts))
- except (NameError, ValueError) as e:
- print(type(e).__name__ +": "+ str(e))
+"""Implementation of namespace-related magic functions.
+"""
+from __future__ import print_function
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import gc
+import re
+import sys
+
+# Our own packages
+from IPython.core import page
+from IPython.core.error import StdinNotImplementedError, UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.encoding import DEFAULT_ENCODING
+from IPython.utils.openpy import read_py_file
+from IPython.utils.path import get_py_filename
+from IPython.utils.py3compat import unicode_type
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+@magics_class
+class NamespaceMagics(Magics):
+ """Magics to manage various aspects of the user's namespace.
+
+ These include listing variables, introspecting into them, etc.
+ """
+
+ @line_magic
+ def pinfo(self, parameter_s='', namespaces=None):
+ """Provide detailed information about an object.
+
+ '%pinfo object' is just a synonym for object? or ?object."""
+
+ #print 'pinfo par: <%s>' % parameter_s # dbg
+ # detail_level: 0 -> obj? , 1 -> obj??
+ detail_level = 0
+ # We need to detect if we got called as 'pinfo pinfo foo', which can
+ # happen if the user types 'pinfo foo?' at the cmd line.
+ pinfo,qmark1,oname,qmark2 = \
+ re.match('(pinfo )?(\?*)(.*?)(\??$)',parameter_s).groups()
+ if pinfo or qmark1 or qmark2:
+ detail_level = 1
+ if "*" in oname:
+ self.psearch(oname)
+ else:
+ self.shell._inspect('pinfo', oname, detail_level=detail_level,
+ namespaces=namespaces)
+
+ @line_magic
+ def pinfo2(self, parameter_s='', namespaces=None):
+ """Provide extra detailed information about an object.
+
+ '%pinfo2 object' is just a synonym for object?? or ??object."""
+ self.shell._inspect('pinfo', parameter_s, detail_level=1,
+ namespaces=namespaces)
+
+ @skip_doctest
+ @line_magic
+ def pdef(self, parameter_s='', namespaces=None):
+ """Print the call signature for any callable object.
+
+ If the object is a class, print the constructor information.
+
+ Examples
+ --------
+ ::
+
+ In [3]: %pdef urllib.urlopen
+ urllib.urlopen(url, data=None, proxies=None)
+ """
+ self.shell._inspect('pdef',parameter_s, namespaces)
+
+ @line_magic
+ def pdoc(self, parameter_s='', namespaces=None):
+ """Print the docstring for an object.
+
+ If the given object is a class, it will print both the class and the
+ constructor docstrings."""
+ self.shell._inspect('pdoc',parameter_s, namespaces)
+
+ @line_magic
+ def psource(self, parameter_s='', namespaces=None):
+ """Print (or run through pager) the source code for an object."""
+ if not parameter_s:
+ raise UsageError('Missing object name.')
+ self.shell._inspect('psource',parameter_s, namespaces)
+
+ @line_magic
+ def pfile(self, parameter_s='', namespaces=None):
+ """Print (or run through pager) the file where an object is defined.
+
+ The file opens at the line where the object definition begins. IPython
+ will honor the environment variable PAGER if set, and otherwise will
+ do its best to print the file in a convenient form.
+
+ If the given argument is not an object currently defined, IPython will
+ try to interpret it as a filename (automatically adding a .py extension
+ if needed). You can thus use %pfile as a syntax highlighting code
+ viewer."""
+
+ # first interpret argument as an object name
+ out = self.shell._inspect('pfile',parameter_s, namespaces)
+ # if not, try the input as a filename
+ if out == 'not found':
+ try:
+ filename = get_py_filename(parameter_s)
+ except IOError as msg:
+ print(msg)
+ return
+ page.page(self.shell.pycolorize(read_py_file(filename, skip_encoding_cookie=False)))
+
+ @line_magic
+ def psearch(self, parameter_s=''):
+ """Search for object in namespaces by wildcard.
+
+ %psearch [options] PATTERN [OBJECT TYPE]
+
+ Note: ? can be used as a synonym for %psearch, at the beginning or at
+ the end: both a*? and ?a* are equivalent to '%psearch a*'. Still, the
+ rest of the command line must be unchanged (options come first), so
+ for example the following forms are equivalent
+
+ %psearch -i a* function
+ -i a* function?
+ ?-i a* function
+
+ Arguments:
+
+ PATTERN
+
+ where PATTERN is a string containing * as a wildcard similar to its
+ use in a shell. The pattern is matched in all namespaces on the
+ search path. By default objects starting with a single _ are not
+ matched, many IPython generated objects have a single
+ underscore. The default is case insensitive matching. Matching is
+ also done on the attributes of objects and not only on the objects
+ in a module.
+
+ [OBJECT TYPE]
+
+ Is the name of a python type from the types module. The name is
+ given in lowercase without the ending type, ex. StringType is
+ written string. By adding a type here only objects matching the
+ given type are matched. Using all here makes the pattern match all
+ types (this is the default).
+
+ Options:
+
+ -a: makes the pattern match even objects whose names start with a
+ single underscore. These names are normally omitted from the
+ search.
+
+ -i/-c: make the pattern case insensitive/sensitive. If neither of
+ these options are given, the default is read from your configuration
+ file, with the option ``InteractiveShell.wildcards_case_sensitive``.
+ If this option is not specified in your configuration file, IPython's
+ internal default is to do a case sensitive search.
+
+ -e/-s NAMESPACE: exclude/search a given namespace. The pattern you
+ specify can be searched in any of the following namespaces:
+ 'builtin', 'user', 'user_global','internal', 'alias', where
+ 'builtin' and 'user' are the search defaults. Note that you should
+ not use quotes when specifying namespaces.
+
+ 'Builtin' contains the python module builtin, 'user' contains all
+ user data, 'alias' only contain the shell aliases and no python
+ objects, 'internal' contains objects used by IPython. The
+ 'user_global' namespace is only used by embedded IPython instances,
+ and it contains module-level globals. You can add namespaces to the
+ search with -s or exclude them with -e (these options can be given
+ more than once).
+
+ Examples
+ --------
+ ::
+
+ %psearch a* -> objects beginning with an a
+ %psearch -e builtin a* -> objects NOT in the builtin space starting in a
+ %psearch a* function -> all functions beginning with an a
+ %psearch re.e* -> objects beginning with an e in module re
+ %psearch r*.e* -> objects that start with e in modules starting in r
+ %psearch r*.* string -> all strings in modules beginning with r
+
+ Case sensitive search::
+
+ %psearch -c a* list all object beginning with lower case a
+
+ Show objects beginning with a single _::
+
+ %psearch -a _* list objects beginning with a single underscore
+ """
+ try:
+ parameter_s.encode('ascii')
+ except UnicodeEncodeError:
+ print('Python identifiers can only contain ascii characters.')
+ return
+
+ # default namespaces to be searched
+ def_search = ['user_local', 'user_global', 'builtin']
+
+ # Process options/args
+ opts,args = self.parse_options(parameter_s,'cias:e:',list_all=True)
+ opt = opts.get
+ shell = self.shell
+ psearch = shell.inspector.psearch
+
+ # select case options
+ if 'i' in opts:
+ ignore_case = True
+ elif 'c' in opts:
+ ignore_case = False
+ else:
+ ignore_case = not shell.wildcards_case_sensitive
+
+ # Build list of namespaces to search from user options
+ def_search.extend(opt('s',[]))
+ ns_exclude = ns_exclude=opt('e',[])
+ ns_search = [nm for nm in def_search if nm not in ns_exclude]
+
+ # Call the actual search
+ try:
+ psearch(args,shell.ns_table,ns_search,
+ show_all=opt('a'),ignore_case=ignore_case)
+ except:
+ shell.showtraceback()
+
+ @skip_doctest
+ @line_magic
+ def who_ls(self, parameter_s=''):
+ """Return a sorted list of all interactive variables.
+
+ If arguments are given, only variables of types matching these
+ arguments are returned.
+
+ Examples
+ --------
+
+ Define two variables and list them with who_ls::
+
+ In [1]: alpha = 123
+
+ In [2]: beta = 'test'
+
+ In [3]: %who_ls
+ Out[3]: ['alpha', 'beta']
+
+ In [4]: %who_ls int
+ Out[4]: ['alpha']
+
+ In [5]: %who_ls str
+ Out[5]: ['beta']
+ """
+
+ user_ns = self.shell.user_ns
+ user_ns_hidden = self.shell.user_ns_hidden
+ nonmatching = object() # This can never be in user_ns
+ out = [ i for i in user_ns
+ if not i.startswith('_') \
+ and (user_ns[i] is not user_ns_hidden.get(i, nonmatching)) ]
+
+ typelist = parameter_s.split()
+ if typelist:
+ typeset = set(typelist)
+ out = [i for i in out if type(user_ns[i]).__name__ in typeset]
+
+ out.sort()
+ return out
+
+ @skip_doctest
+ @line_magic
+ def who(self, parameter_s=''):
+ """Print all interactive variables, with some minimal formatting.
+
+ If any arguments are given, only variables whose type matches one of
+ these are printed. For example::
+
+ %who function str
+
+ will only list functions and strings, excluding all other types of
+ variables. To find the proper type names, simply use type(var) at a
+ command line to see how python prints type names. For example:
+
+ ::
+
+ In [1]: type('hello')\\
+ Out[1]: <type 'str'>
+
+ indicates that the type name for strings is 'str'.
+
+ ``%who`` always excludes executed names loaded through your configuration
+ file and things which are internal to IPython.
+
+ This is deliberate, as typically you may load many modules and the
+ purpose of %who is to show you only what you've manually defined.
+
+ Examples
+ --------
+
+ Define two variables and list them with who::
+
+ In [1]: alpha = 123
+
+ In [2]: beta = 'test'
+
+ In [3]: %who
+ alpha beta
+
+ In [4]: %who int
+ alpha
+
+ In [5]: %who str
+ beta
+ """
+
+ varlist = self.who_ls(parameter_s)
+ if not varlist:
+ if parameter_s:
+ print('No variables match your requested type.')
+ else:
+ print('Interactive namespace is empty.')
+ return
+
+ # if we have variables, move on...
+ count = 0
+ for i in varlist:
+ print(i+'\t', end=' ')
+ count += 1
+ if count > 8:
+ count = 0
+ print()
+ print()
+
+ @skip_doctest
+ @line_magic
+ def whos(self, parameter_s=''):
+ """Like %who, but gives some extra information about each variable.
+
+ The same type filtering of %who can be applied here.
+
+ For all variables, the type is printed. Additionally it prints:
+
+ - For {},[],(): their length.
+
+ - For numpy arrays, a summary with shape, number of
+ elements, typecode and size in memory.
+
+ - Everything else: a string representation, snipping their middle if
+ too long.
+
+ Examples
+ --------
+
+ Define two variables and list them with whos::
+
+ In [1]: alpha = 123
+
+ In [2]: beta = 'test'
+
+ In [3]: %whos
+ Variable Type Data/Info
+ --------------------------------
+ alpha int 123
+ beta str test
+ """
+
+ varnames = self.who_ls(parameter_s)
+ if not varnames:
+ if parameter_s:
+ print('No variables match your requested type.')
+ else:
+ print('Interactive namespace is empty.')
+ return
+
+ # if we have variables, move on...
+
+ # for these types, show len() instead of data:
+ seq_types = ['dict', 'list', 'tuple']
+
+ # for numpy arrays, display summary info
+ ndarray_type = None
+ if 'numpy' in sys.modules:
+ try:
+ from numpy import ndarray
+ except ImportError:
+ pass
+ else:
+ ndarray_type = ndarray.__name__
+
+ # Find all variable names and types so we can figure out column sizes
+
+ # some types are well known and can be shorter
+ abbrevs = {'IPython.core.macro.Macro' : 'Macro'}
+ def type_name(v):
+ tn = type(v).__name__
+ return abbrevs.get(tn,tn)
+
+ varlist = [self.shell.user_ns[n] for n in varnames]
+
+ typelist = []
+ for vv in varlist:
+ tt = type_name(vv)
+
+ if tt=='instance':
+ typelist.append( abbrevs.get(str(vv.__class__),
+ str(vv.__class__)))
+ else:
+ typelist.append(tt)
+
+ # column labels and # of spaces as separator
+ varlabel = 'Variable'
+ typelabel = 'Type'
+ datalabel = 'Data/Info'
+ colsep = 3
+ # variable format strings
+ vformat = "{0:<{varwidth}}{1:<{typewidth}}"
+ aformat = "%s: %s elems, type `%s`, %s bytes"
+ # find the size of the columns to format the output nicely
+ varwidth = max(max(map(len,varnames)), len(varlabel)) + colsep
+ typewidth = max(max(map(len,typelist)), len(typelabel)) + colsep
+ # table header
+ print(varlabel.ljust(varwidth) + typelabel.ljust(typewidth) + \
+ ' '+datalabel+'\n' + '-'*(varwidth+typewidth+len(datalabel)+1))
+ # and the table itself
+ kb = 1024
+ Mb = 1048576 # kb**2
+ for vname,var,vtype in zip(varnames,varlist,typelist):
+ print(vformat.format(vname, vtype, varwidth=varwidth, typewidth=typewidth), end=' ')
+ if vtype in seq_types:
+ print("n="+str(len(var)))
+ elif vtype == ndarray_type:
+ vshape = str(var.shape).replace(',','').replace(' ','x')[1:-1]
+ if vtype==ndarray_type:
+ # numpy
+ vsize = var.size
+ vbytes = vsize*var.itemsize
+ vdtype = var.dtype
+
+ if vbytes < 100000:
+ print(aformat % (vshape, vsize, vdtype, vbytes))
+ else:
+ print(aformat % (vshape, vsize, vdtype, vbytes), end=' ')
+ if vbytes < Mb:
+ print('(%s kb)' % (vbytes/kb,))
+ else:
+ print('(%s Mb)' % (vbytes/Mb,))
+ else:
+ try:
+ vstr = str(var)
+ except UnicodeEncodeError:
+ vstr = unicode_type(var).encode(DEFAULT_ENCODING,
+ 'backslashreplace')
+ except:
+ vstr = "<object with id %d (str() failed)>" % id(var)
+ vstr = vstr.replace('\n', '\\n')
+ if len(vstr) < 50:
+ print(vstr)
+ else:
+ print(vstr[:25] + "<...>" + vstr[-25:])
+
+ @line_magic
+ def reset(self, parameter_s=''):
+ """Resets the namespace by removing all names defined by the user, if
+ called without arguments, or by removing some types of objects, such
+ as everything currently in IPython's In[] and Out[] containers (see
+ the parameters for details).
+
+ Parameters
+ ----------
+ -f : force reset without asking for confirmation.
+
+ -s : 'Soft' reset: Only clears your namespace, leaving history intact.
+ References to objects may be kept. By default (without this option),
+ we do a 'hard' reset, giving you a new session and removing all
+ references to objects from the current session.
+
+ in : reset input history
+
+ out : reset output history
+
+ dhist : reset directory history
+
+ array : reset only variables that are NumPy arrays
+
+ See Also
+ --------
+ reset_selective : invoked as ``%reset_selective``
+
+ Examples
+ --------
+ ::
+
+ In [6]: a = 1
+
+ In [7]: a
+ Out[7]: 1
+
+ In [8]: 'a' in _ip.user_ns
+ Out[8]: True
+
+ In [9]: %reset -f
+
+ In [1]: 'a' in _ip.user_ns
+ Out[1]: False
+
+ In [2]: %reset -f in
+ Flushing input history
+
+ In [3]: %reset -f dhist in
+ Flushing directory history
+ Flushing input history
+
+ Notes
+ -----
+ Calling this magic from clients that do not implement standard input,
+ such as the ipython notebook interface, will reset the namespace
+ without confirmation.
+ """
+ opts, args = self.parse_options(parameter_s,'sf', mode='list')
+ if 'f' in opts:
+ ans = True
+ else:
+ try:
+ ans = self.shell.ask_yes_no(
+ "Once deleted, variables cannot be recovered. Proceed (y/[n])?",
+ default='n')
+ except StdinNotImplementedError:
+ ans = True
+ if not ans:
+ print('Nothing done.')
+ return
+
+ if 's' in opts: # Soft reset
+ user_ns = self.shell.user_ns
+ for i in self.who_ls():
+ del(user_ns[i])
+ elif len(args) == 0: # Hard reset
+ self.shell.reset(new_session = False)
+
+ # reset in/out/dhist/array: previously extensinions/clearcmd.py
+ ip = self.shell
+ user_ns = self.shell.user_ns # local lookup, heavily used
+
+ for target in args:
+ target = target.lower() # make matches case insensitive
+ if target == 'out':
+ print("Flushing output cache (%d entries)" % len(user_ns['_oh']))
+ self.shell.displayhook.flush()
+
+ elif target == 'in':
+ print("Flushing input history")
+ pc = self.shell.displayhook.prompt_count + 1
+ for n in range(1, pc):
+ key = '_i'+repr(n)
+ user_ns.pop(key,None)
+ user_ns.update(dict(_i=u'',_ii=u'',_iii=u''))
+ hm = ip.history_manager
+ # don't delete these, as %save and %macro depending on the
+ # length of these lists to be preserved
+ hm.input_hist_parsed[:] = [''] * pc
+ hm.input_hist_raw[:] = [''] * pc
+ # hm has internal machinery for _i,_ii,_iii, clear it out
+ hm._i = hm._ii = hm._iii = hm._i00 = u''
+
+ elif target == 'array':
+ # Support cleaning up numpy arrays
+ try:
+ from numpy import ndarray
+ # This must be done with items and not iteritems because
+ # we're going to modify the dict in-place.
+ for x,val in list(user_ns.items()):
+ if isinstance(val,ndarray):
+ del user_ns[x]
+ except ImportError:
+ print("reset array only works if Numpy is available.")
+
+ elif target == 'dhist':
+ print("Flushing directory history")
+ del user_ns['_dh'][:]
+
+ else:
+ print("Don't know how to reset ", end=' ')
+ print(target + ", please run `%reset?` for details")
+
+ gc.collect()
+
+ @line_magic
+ def reset_selective(self, parameter_s=''):
+ """Resets the namespace by removing names defined by the user.
+
+ Input/Output history are left around in case you need them.
+
+ %reset_selective [-f] regex
+
+ No action is taken if regex is not included
+
+ Options
+ -f : force reset without asking for confirmation.
+
+ See Also
+ --------
+ reset : invoked as ``%reset``
+
+ Examples
+ --------
+
+ We first fully reset the namespace so your output looks identical to
+ this example for pedagogical reasons; in practice you do not need a
+ full reset::
+
+ In [1]: %reset -f
+
+ Now, with a clean namespace we can make a few variables and use
+ ``%reset_selective`` to only delete names that match our regexp::
+
+ In [2]: a=1; b=2; c=3; b1m=4; b2m=5; b3m=6; b4m=7; b2s=8
+
+ In [3]: who_ls
+ Out[3]: ['a', 'b', 'b1m', 'b2m', 'b2s', 'b3m', 'b4m', 'c']
+
+ In [4]: %reset_selective -f b[2-3]m
+
+ In [5]: who_ls
+ Out[5]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
+
+ In [6]: %reset_selective -f d
+
+ In [7]: who_ls
+ Out[7]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
+
+ In [8]: %reset_selective -f c
+
+ In [9]: who_ls
+ Out[9]: ['a', 'b', 'b1m', 'b2s', 'b4m']
+
+ In [10]: %reset_selective -f b
+
+ In [11]: who_ls
+ Out[11]: ['a']
+
+ Notes
+ -----
+ Calling this magic from clients that do not implement standard input,
+ such as the ipython notebook interface, will reset the namespace
+ without confirmation.
+ """
+
+ opts, regex = self.parse_options(parameter_s,'f')
+
+ if 'f' in opts:
+ ans = True
+ else:
+ try:
+ ans = self.shell.ask_yes_no(
+ "Once deleted, variables cannot be recovered. Proceed (y/[n])? ",
+ default='n')
+ except StdinNotImplementedError:
+ ans = True
+ if not ans:
+ print('Nothing done.')
+ return
+ user_ns = self.shell.user_ns
+ if not regex:
+ print('No regex pattern specified. Nothing done.')
+ return
+ else:
+ try:
+ m = re.compile(regex)
+ except TypeError:
+ raise TypeError('regex must be a string or compiled pattern')
+ for i in self.who_ls():
+ if m.search(i):
+ del(user_ns[i])
+
+ @line_magic
+ def xdel(self, parameter_s=''):
+ """Delete a variable, trying to clear it from anywhere that
+ IPython's machinery has references to it. By default, this uses
+ the identity of the named object in the user namespace to remove
+ references held under other names. The object is also removed
+ from the output history.
+
+ Options
+ -n : Delete the specified name from all namespaces, without
+ checking their identity.
+ """
+ opts, varname = self.parse_options(parameter_s,'n')
+ try:
+ self.shell.del_var(varname, ('n' in opts))
+ except (NameError, ValueError) as e:
+ print(type(e).__name__ +": "+ str(e))
diff --git a/contrib/python/ipython/py2/IPython/core/magics/osm.py b/contrib/python/ipython/py2/IPython/core/magics/osm.py
index e4c3bbc7b6..352cf2d451 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/osm.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/osm.py
@@ -1,790 +1,790 @@
-"""Implementation of magic functions for interaction with the OS.
-
-Note: this module is named 'osm' instead of 'os' to avoid a collision with the
-builtin.
-"""
-from __future__ import print_function
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012 The IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Stdlib
-import io
-import os
-import re
-import sys
-from pprint import pformat
-
-# Our own packages
-from IPython.core import magic_arguments
-from IPython.core import oinspect
-from IPython.core import page
-from IPython.core.alias import AliasError, Alias
-from IPython.core.error import UsageError
-from IPython.core.magic import (
- Magics, compress_dhist, magics_class, line_magic, cell_magic, line_cell_magic
-)
-from IPython.testing.skipdoctest import skip_doctest
-from IPython.utils.openpy import source_to_unicode
-from IPython.utils.process import abbrev_cwd
-from IPython.utils import py3compat
-from IPython.utils.py3compat import unicode_type
-from IPython.utils.terminal import set_term_title
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-@magics_class
-class OSMagics(Magics):
- """Magics to interact with the underlying OS (shell-type functionality).
- """
-
- @skip_doctest
- @line_magic
- def alias(self, parameter_s=''):
- """Define an alias for a system command.
-
- '%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd'
-
- Then, typing 'alias_name params' will execute the system command 'cmd
- params' (from your underlying operating system).
-
- Aliases have lower precedence than magic functions and Python normal
- variables, so if 'foo' is both a Python variable and an alias, the
- alias can not be executed until 'del foo' removes the Python variable.
-
- You can use the %l specifier in an alias definition to represent the
- whole line when the alias is called. For example::
-
- In [2]: alias bracket echo "Input in brackets: <%l>"
- In [3]: bracket hello world
- Input in brackets: <hello world>
-
- You can also define aliases with parameters using %s specifiers (one
- per parameter)::
-
- In [1]: alias parts echo first %s second %s
- In [2]: %parts A B
- first A second B
- In [3]: %parts A
- Incorrect number of arguments: 2 expected.
- parts is an alias to: 'echo first %s second %s'
-
- Note that %l and %s are mutually exclusive. You can only use one or
- the other in your aliases.
-
- Aliases expand Python variables just like system calls using ! or !!
- do: all expressions prefixed with '$' get expanded. For details of
- the semantic rules, see PEP-215:
- http://www.python.org/peps/pep-0215.html. This is the library used by
- IPython for variable expansion. If you want to access a true shell
- variable, an extra $ is necessary to prevent its expansion by
- IPython::
-
- In [6]: alias show echo
- In [7]: PATH='A Python string'
- In [8]: show $PATH
- A Python string
- In [9]: show $$PATH
- /usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:...
-
- You can use the alias facility to acess all of $PATH. See the %rehashx
- function, which automatically creates aliases for the contents of your
- $PATH.
-
- If called with no parameters, %alias prints the current alias table."""
-
- par = parameter_s.strip()
- if not par:
- aliases = sorted(self.shell.alias_manager.aliases)
- # stored = self.shell.db.get('stored_aliases', {} )
- # for k, v in stored:
- # atab.append(k, v[0])
-
- print("Total number of aliases:", len(aliases))
- sys.stdout.flush()
- return aliases
-
- # Now try to define a new one
- try:
- alias,cmd = par.split(None, 1)
- except TypeError:
- print(oinspect.getdoc(self.alias))
- return
-
- try:
- self.shell.alias_manager.define_alias(alias, cmd)
- except AliasError as e:
- print(e)
- # end magic_alias
-
- @line_magic
- def unalias(self, parameter_s=''):
- """Remove an alias"""
-
- aname = parameter_s.strip()
- try:
- self.shell.alias_manager.undefine_alias(aname)
- except ValueError as e:
- print(e)
- return
-
- stored = self.shell.db.get('stored_aliases', {} )
- if aname in stored:
- print("Removing %stored alias",aname)
- del stored[aname]
- self.shell.db['stored_aliases'] = stored
-
- @line_magic
- def rehashx(self, parameter_s=''):
- """Update the alias table with all executable files in $PATH.
-
- rehashx explicitly checks that every entry in $PATH is a file
- with execute access (os.X_OK).
-
- Under Windows, it checks executability as a match against a
- '|'-separated string of extensions, stored in the IPython config
- variable win_exec_ext. This defaults to 'exe|com|bat'.
-
- This function also resets the root module cache of module completer,
- used on slow filesystems.
- """
- from IPython.core.alias import InvalidAliasError
-
- # for the benefit of module completer in ipy_completers.py
- del self.shell.db['rootmodules_cache']
-
- path = [os.path.abspath(os.path.expanduser(p)) for p in
- os.environ.get('PATH','').split(os.pathsep)]
-
- syscmdlist = []
- # Now define isexec in a cross platform manner.
- if os.name == 'posix':
- isexec = lambda fname:os.path.isfile(fname) and \
- os.access(fname,os.X_OK)
- else:
- try:
- winext = os.environ['pathext'].replace(';','|').replace('.','')
- except KeyError:
- winext = 'exe|com|bat|py'
- if 'py' not in winext:
- winext += '|py'
- execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)
- isexec = lambda fname:os.path.isfile(fname) and execre.match(fname)
- savedir = py3compat.getcwd()
-
- # Now walk the paths looking for executables to alias.
- try:
- # write the whole loop for posix/Windows so we don't have an if in
- # the innermost part
- if os.name == 'posix':
- for pdir in path:
- try:
- os.chdir(pdir)
- dirlist = os.listdir(pdir)
- except OSError:
- continue
- for ff in dirlist:
- if isexec(ff):
- try:
- # Removes dots from the name since ipython
- # will assume names with dots to be python.
- if not self.shell.alias_manager.is_alias(ff):
- self.shell.alias_manager.define_alias(
- ff.replace('.',''), ff)
- except InvalidAliasError:
- pass
- else:
- syscmdlist.append(ff)
- else:
- no_alias = Alias.blacklist
- for pdir in path:
- try:
- os.chdir(pdir)
- dirlist = os.listdir(pdir)
- except OSError:
- continue
- for ff in dirlist:
- base, ext = os.path.splitext(ff)
- if isexec(ff) and base.lower() not in no_alias:
- if ext.lower() == '.exe':
- ff = base
- try:
- # Removes dots from the name since ipython
- # will assume names with dots to be python.
- self.shell.alias_manager.define_alias(
- base.lower().replace('.',''), ff)
- except InvalidAliasError:
- pass
- syscmdlist.append(ff)
- self.shell.db['syscmdlist'] = syscmdlist
- finally:
- os.chdir(savedir)
-
- @skip_doctest
- @line_magic
- def pwd(self, parameter_s=''):
- """Return the current working directory path.
-
- Examples
- --------
- ::
-
- In [9]: pwd
- Out[9]: '/home/tsuser/sprint/ipython'
- """
- return py3compat.getcwd()
-
- @skip_doctest
- @line_magic
- def cd(self, parameter_s=''):
- """Change the current working directory.
-
- This command automatically maintains an internal list of directories
- you visit during your IPython session, in the variable _dh. The
- command %dhist shows this history nicely formatted. You can also
- do 'cd -<tab>' to see directory history conveniently.
-
- Usage:
-
- cd 'dir': changes to directory 'dir'.
-
- cd -: changes to the last visited directory.
-
- cd -<n>: changes to the n-th directory in the directory history.
-
- cd --foo: change to directory that matches 'foo' in history
-
- cd -b <bookmark_name>: jump to a bookmark set by %bookmark
- (note: cd <bookmark_name> is enough if there is no
- directory <bookmark_name>, but a bookmark with the name exists.)
- 'cd -b <tab>' allows you to tab-complete bookmark names.
-
- Options:
-
- -q: quiet. Do not print the working directory after the cd command is
- executed. By default IPython's cd command does print this directory,
- since the default prompts do not display path information.
-
- Note that !cd doesn't work for this purpose because the shell where
- !command runs is immediately discarded after executing 'command'.
-
- Examples
- --------
- ::
-
- In [10]: cd parent/child
- /home/tsuser/parent/child
- """
-
- oldcwd = py3compat.getcwd()
- numcd = re.match(r'(-)(\d+)$',parameter_s)
- # jump in directory history by number
- if numcd:
- nn = int(numcd.group(2))
- try:
- ps = self.shell.user_ns['_dh'][nn]
- except IndexError:
- print('The requested directory does not exist in history.')
- return
- else:
- opts = {}
- elif parameter_s.startswith('--'):
- ps = None
- fallback = None
- pat = parameter_s[2:]
- dh = self.shell.user_ns['_dh']
- # first search only by basename (last component)
- for ent in reversed(dh):
- if pat in os.path.basename(ent) and os.path.isdir(ent):
- ps = ent
- break
-
- if fallback is None and pat in ent and os.path.isdir(ent):
- fallback = ent
-
- # if we have no last part match, pick the first full path match
- if ps is None:
- ps = fallback
-
- if ps is None:
- print("No matching entry in directory history")
- return
- else:
- opts = {}
-
-
- else:
+"""Implementation of magic functions for interaction with the OS.
+
+Note: this module is named 'osm' instead of 'os' to avoid a collision with the
+builtin.
+"""
+from __future__ import print_function
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import io
+import os
+import re
+import sys
+from pprint import pformat
+
+# Our own packages
+from IPython.core import magic_arguments
+from IPython.core import oinspect
+from IPython.core import page
+from IPython.core.alias import AliasError, Alias
+from IPython.core.error import UsageError
+from IPython.core.magic import (
+ Magics, compress_dhist, magics_class, line_magic, cell_magic, line_cell_magic
+)
+from IPython.testing.skipdoctest import skip_doctest
+from IPython.utils.openpy import source_to_unicode
+from IPython.utils.process import abbrev_cwd
+from IPython.utils import py3compat
+from IPython.utils.py3compat import unicode_type
+from IPython.utils.terminal import set_term_title
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+@magics_class
+class OSMagics(Magics):
+ """Magics to interact with the underlying OS (shell-type functionality).
+ """
+
+ @skip_doctest
+ @line_magic
+ def alias(self, parameter_s=''):
+ """Define an alias for a system command.
+
+ '%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd'
+
+ Then, typing 'alias_name params' will execute the system command 'cmd
+ params' (from your underlying operating system).
+
+ Aliases have lower precedence than magic functions and Python normal
+ variables, so if 'foo' is both a Python variable and an alias, the
+ alias can not be executed until 'del foo' removes the Python variable.
+
+ You can use the %l specifier in an alias definition to represent the
+ whole line when the alias is called. For example::
+
+ In [2]: alias bracket echo "Input in brackets: <%l>"
+ In [3]: bracket hello world
+ Input in brackets: <hello world>
+
+ You can also define aliases with parameters using %s specifiers (one
+ per parameter)::
+
+ In [1]: alias parts echo first %s second %s
+ In [2]: %parts A B
+ first A second B
+ In [3]: %parts A
+ Incorrect number of arguments: 2 expected.
+ parts is an alias to: 'echo first %s second %s'
+
+ Note that %l and %s are mutually exclusive. You can only use one or
+ the other in your aliases.
+
+ Aliases expand Python variables just like system calls using ! or !!
+ do: all expressions prefixed with '$' get expanded. For details of
+ the semantic rules, see PEP-215:
+ http://www.python.org/peps/pep-0215.html. This is the library used by
+ IPython for variable expansion. If you want to access a true shell
+ variable, an extra $ is necessary to prevent its expansion by
+ IPython::
+
+ In [6]: alias show echo
+ In [7]: PATH='A Python string'
+ In [8]: show $PATH
+ A Python string
+ In [9]: show $$PATH
+ /usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:...
+
+ You can use the alias facility to acess all of $PATH. See the %rehashx
+ function, which automatically creates aliases for the contents of your
+ $PATH.
+
+ If called with no parameters, %alias prints the current alias table."""
+
+ par = parameter_s.strip()
+ if not par:
+ aliases = sorted(self.shell.alias_manager.aliases)
+ # stored = self.shell.db.get('stored_aliases', {} )
+ # for k, v in stored:
+ # atab.append(k, v[0])
+
+ print("Total number of aliases:", len(aliases))
+ sys.stdout.flush()
+ return aliases
+
+ # Now try to define a new one
+ try:
+ alias,cmd = par.split(None, 1)
+ except TypeError:
+ print(oinspect.getdoc(self.alias))
+ return
+
+ try:
+ self.shell.alias_manager.define_alias(alias, cmd)
+ except AliasError as e:
+ print(e)
+ # end magic_alias
+
+ @line_magic
+ def unalias(self, parameter_s=''):
+ """Remove an alias"""
+
+ aname = parameter_s.strip()
+ try:
+ self.shell.alias_manager.undefine_alias(aname)
+ except ValueError as e:
+ print(e)
+ return
+
+ stored = self.shell.db.get('stored_aliases', {} )
+ if aname in stored:
+ print("Removing %stored alias",aname)
+ del stored[aname]
+ self.shell.db['stored_aliases'] = stored
+
+ @line_magic
+ def rehashx(self, parameter_s=''):
+ """Update the alias table with all executable files in $PATH.
+
+ rehashx explicitly checks that every entry in $PATH is a file
+ with execute access (os.X_OK).
+
+ Under Windows, it checks executability as a match against a
+ '|'-separated string of extensions, stored in the IPython config
+ variable win_exec_ext. This defaults to 'exe|com|bat'.
+
+ This function also resets the root module cache of module completer,
+ used on slow filesystems.
+ """
+ from IPython.core.alias import InvalidAliasError
+
+ # for the benefit of module completer in ipy_completers.py
+ del self.shell.db['rootmodules_cache']
+
+ path = [os.path.abspath(os.path.expanduser(p)) for p in
+ os.environ.get('PATH','').split(os.pathsep)]
+
+ syscmdlist = []
+ # Now define isexec in a cross platform manner.
+ if os.name == 'posix':
+ isexec = lambda fname:os.path.isfile(fname) and \
+ os.access(fname,os.X_OK)
+ else:
+ try:
+ winext = os.environ['pathext'].replace(';','|').replace('.','')
+ except KeyError:
+ winext = 'exe|com|bat|py'
+ if 'py' not in winext:
+ winext += '|py'
+ execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)
+ isexec = lambda fname:os.path.isfile(fname) and execre.match(fname)
+ savedir = py3compat.getcwd()
+
+ # Now walk the paths looking for executables to alias.
+ try:
+ # write the whole loop for posix/Windows so we don't have an if in
+ # the innermost part
+ if os.name == 'posix':
+ for pdir in path:
+ try:
+ os.chdir(pdir)
+ dirlist = os.listdir(pdir)
+ except OSError:
+ continue
+ for ff in dirlist:
+ if isexec(ff):
+ try:
+ # Removes dots from the name since ipython
+ # will assume names with dots to be python.
+ if not self.shell.alias_manager.is_alias(ff):
+ self.shell.alias_manager.define_alias(
+ ff.replace('.',''), ff)
+ except InvalidAliasError:
+ pass
+ else:
+ syscmdlist.append(ff)
+ else:
+ no_alias = Alias.blacklist
+ for pdir in path:
+ try:
+ os.chdir(pdir)
+ dirlist = os.listdir(pdir)
+ except OSError:
+ continue
+ for ff in dirlist:
+ base, ext = os.path.splitext(ff)
+ if isexec(ff) and base.lower() not in no_alias:
+ if ext.lower() == '.exe':
+ ff = base
+ try:
+ # Removes dots from the name since ipython
+ # will assume names with dots to be python.
+ self.shell.alias_manager.define_alias(
+ base.lower().replace('.',''), ff)
+ except InvalidAliasError:
+ pass
+ syscmdlist.append(ff)
+ self.shell.db['syscmdlist'] = syscmdlist
+ finally:
+ os.chdir(savedir)
+
+ @skip_doctest
+ @line_magic
+ def pwd(self, parameter_s=''):
+ """Return the current working directory path.
+
+ Examples
+ --------
+ ::
+
+ In [9]: pwd
+ Out[9]: '/home/tsuser/sprint/ipython'
+ """
+ return py3compat.getcwd()
+
+ @skip_doctest
+ @line_magic
+ def cd(self, parameter_s=''):
+ """Change the current working directory.
+
+ This command automatically maintains an internal list of directories
+ you visit during your IPython session, in the variable _dh. The
+ command %dhist shows this history nicely formatted. You can also
+ do 'cd -<tab>' to see directory history conveniently.
+
+ Usage:
+
+ cd 'dir': changes to directory 'dir'.
+
+ cd -: changes to the last visited directory.
+
+ cd -<n>: changes to the n-th directory in the directory history.
+
+ cd --foo: change to directory that matches 'foo' in history
+
+ cd -b <bookmark_name>: jump to a bookmark set by %bookmark
+ (note: cd <bookmark_name> is enough if there is no
+ directory <bookmark_name>, but a bookmark with the name exists.)
+ 'cd -b <tab>' allows you to tab-complete bookmark names.
+
+ Options:
+
+ -q: quiet. Do not print the working directory after the cd command is
+ executed. By default IPython's cd command does print this directory,
+ since the default prompts do not display path information.
+
+ Note that !cd doesn't work for this purpose because the shell where
+ !command runs is immediately discarded after executing 'command'.
+
+ Examples
+ --------
+ ::
+
+ In [10]: cd parent/child
+ /home/tsuser/parent/child
+ """
+
+ oldcwd = py3compat.getcwd()
+ numcd = re.match(r'(-)(\d+)$',parameter_s)
+ # jump in directory history by number
+ if numcd:
+ nn = int(numcd.group(2))
+ try:
+ ps = self.shell.user_ns['_dh'][nn]
+ except IndexError:
+ print('The requested directory does not exist in history.')
+ return
+ else:
+ opts = {}
+ elif parameter_s.startswith('--'):
+ ps = None
+ fallback = None
+ pat = parameter_s[2:]
+ dh = self.shell.user_ns['_dh']
+ # first search only by basename (last component)
+ for ent in reversed(dh):
+ if pat in os.path.basename(ent) and os.path.isdir(ent):
+ ps = ent
+ break
+
+ if fallback is None and pat in ent and os.path.isdir(ent):
+ fallback = ent
+
+ # if we have no last part match, pick the first full path match
+ if ps is None:
+ ps = fallback
+
+ if ps is None:
+ print("No matching entry in directory history")
+ return
+ else:
+ opts = {}
+
+
+ else:
opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
- # jump to previous
- if ps == '-':
- try:
- ps = self.shell.user_ns['_dh'][-2]
- except IndexError:
- raise UsageError('%cd -: No previous directory to change to.')
- # jump to bookmark if needed
- else:
- if not os.path.isdir(ps) or 'b' in opts:
- bkms = self.shell.db.get('bookmarks', {})
-
- if ps in bkms:
- target = bkms[ps]
- print('(bookmark:%s) -> %s' % (ps, target))
- ps = target
- else:
- if 'b' in opts:
- raise UsageError("Bookmark '%s' not found. "
- "Use '%%bookmark -l' to see your bookmarks." % ps)
-
- # at this point ps should point to the target dir
- if ps:
- try:
- os.chdir(os.path.expanduser(ps))
- if hasattr(self.shell, 'term_title') and self.shell.term_title:
- set_term_title('IPython: ' + abbrev_cwd())
- except OSError:
- print(sys.exc_info()[1])
- else:
- cwd = py3compat.getcwd()
- dhist = self.shell.user_ns['_dh']
- if oldcwd != cwd:
- dhist.append(cwd)
- self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
-
- else:
- os.chdir(self.shell.home_dir)
- if hasattr(self.shell, 'term_title') and self.shell.term_title:
- set_term_title('IPython: ' + '~')
- cwd = py3compat.getcwd()
- dhist = self.shell.user_ns['_dh']
-
- if oldcwd != cwd:
- dhist.append(cwd)
- self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
- if not 'q' in opts and self.shell.user_ns['_dh']:
- print(self.shell.user_ns['_dh'][-1])
-
- @line_magic
- def env(self, parameter_s=''):
- """Get, set, or list environment variables.
-
- Usage:\\
-
- %env: lists all environment variables/values
- %env var: get value for var
- %env var val: set value for var
- %env var=val: set value for var
- %env var=$val: set value for var, using python expansion if possible
- """
- if parameter_s.strip():
- split = '=' if '=' in parameter_s else ' '
- bits = parameter_s.split(split)
- if len(bits) == 1:
- key = parameter_s.strip()
- if key in os.environ:
- return os.environ[key]
- else:
- err = "Environment does not have key: {0}".format(key)
- raise UsageError(err)
- if len(bits) > 1:
- return self.set_env(parameter_s)
- return dict(os.environ)
-
- @line_magic
- def set_env(self, parameter_s):
- """Set environment variables. Assumptions are that either "val" is a
- name in the user namespace, or val is something that evaluates to a
- string.
-
- Usage:\\
- %set_env var val: set value for var
- %set_env var=val: set value for var
- %set_env var=$val: set value for var, using python expansion if possible
- """
- split = '=' if '=' in parameter_s else ' '
- bits = parameter_s.split(split, 1)
- if not parameter_s.strip() or len(bits)<2:
- raise UsageError("usage is 'set_env var=val'")
- var = bits[0].strip()
- val = bits[1].strip()
- if re.match(r'.*\s.*', var):
- # an environment variable with whitespace is almost certainly
- # not what the user intended. what's more likely is the wrong
- # split was chosen, ie for "set_env cmd_args A=B", we chose
- # '=' for the split and should have chosen ' '. to get around
- # this, users should just assign directly to os.environ or use
- # standard magic {var} expansion.
- err = "refusing to set env var with whitespace: '{0}'"
- err = err.format(val)
- raise UsageError(err)
- os.environ[py3compat.cast_bytes_py2(var)] = py3compat.cast_bytes_py2(val)
- print('env: {0}={1}'.format(var,val))
-
- @line_magic
- def pushd(self, parameter_s=''):
- """Place the current dir on stack and change directory.
-
- Usage:\\
- %pushd ['dirname']
- """
-
- dir_s = self.shell.dir_stack
+ # jump to previous
+ if ps == '-':
+ try:
+ ps = self.shell.user_ns['_dh'][-2]
+ except IndexError:
+ raise UsageError('%cd -: No previous directory to change to.')
+ # jump to bookmark if needed
+ else:
+ if not os.path.isdir(ps) or 'b' in opts:
+ bkms = self.shell.db.get('bookmarks', {})
+
+ if ps in bkms:
+ target = bkms[ps]
+ print('(bookmark:%s) -> %s' % (ps, target))
+ ps = target
+ else:
+ if 'b' in opts:
+ raise UsageError("Bookmark '%s' not found. "
+ "Use '%%bookmark -l' to see your bookmarks." % ps)
+
+ # at this point ps should point to the target dir
+ if ps:
+ try:
+ os.chdir(os.path.expanduser(ps))
+ if hasattr(self.shell, 'term_title') and self.shell.term_title:
+ set_term_title('IPython: ' + abbrev_cwd())
+ except OSError:
+ print(sys.exc_info()[1])
+ else:
+ cwd = py3compat.getcwd()
+ dhist = self.shell.user_ns['_dh']
+ if oldcwd != cwd:
+ dhist.append(cwd)
+ self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
+
+ else:
+ os.chdir(self.shell.home_dir)
+ if hasattr(self.shell, 'term_title') and self.shell.term_title:
+ set_term_title('IPython: ' + '~')
+ cwd = py3compat.getcwd()
+ dhist = self.shell.user_ns['_dh']
+
+ if oldcwd != cwd:
+ dhist.append(cwd)
+ self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
+ if not 'q' in opts and self.shell.user_ns['_dh']:
+ print(self.shell.user_ns['_dh'][-1])
+
+ @line_magic
+ def env(self, parameter_s=''):
+ """Get, set, or list environment variables.
+
+ Usage:\\
+
+ %env: lists all environment variables/values
+ %env var: get value for var
+ %env var val: set value for var
+ %env var=val: set value for var
+ %env var=$val: set value for var, using python expansion if possible
+ """
+ if parameter_s.strip():
+ split = '=' if '=' in parameter_s else ' '
+ bits = parameter_s.split(split)
+ if len(bits) == 1:
+ key = parameter_s.strip()
+ if key in os.environ:
+ return os.environ[key]
+ else:
+ err = "Environment does not have key: {0}".format(key)
+ raise UsageError(err)
+ if len(bits) > 1:
+ return self.set_env(parameter_s)
+ return dict(os.environ)
+
+ @line_magic
+ def set_env(self, parameter_s):
+ """Set environment variables. Assumptions are that either "val" is a
+ name in the user namespace, or val is something that evaluates to a
+ string.
+
+ Usage:\\
+ %set_env var val: set value for var
+ %set_env var=val: set value for var
+ %set_env var=$val: set value for var, using python expansion if possible
+ """
+ split = '=' if '=' in parameter_s else ' '
+ bits = parameter_s.split(split, 1)
+ if not parameter_s.strip() or len(bits)<2:
+ raise UsageError("usage is 'set_env var=val'")
+ var = bits[0].strip()
+ val = bits[1].strip()
+ if re.match(r'.*\s.*', var):
+ # an environment variable with whitespace is almost certainly
+ # not what the user intended. what's more likely is the wrong
+ # split was chosen, ie for "set_env cmd_args A=B", we chose
+ # '=' for the split and should have chosen ' '. to get around
+ # this, users should just assign directly to os.environ or use
+ # standard magic {var} expansion.
+ err = "refusing to set env var with whitespace: '{0}'"
+ err = err.format(val)
+ raise UsageError(err)
+ os.environ[py3compat.cast_bytes_py2(var)] = py3compat.cast_bytes_py2(val)
+ print('env: {0}={1}'.format(var,val))
+
+ @line_magic
+ def pushd(self, parameter_s=''):
+ """Place the current dir on stack and change directory.
+
+ Usage:\\
+ %pushd ['dirname']
+ """
+
+ dir_s = self.shell.dir_stack
tgt = os.path.expanduser(parameter_s)
- cwd = py3compat.getcwd().replace(self.shell.home_dir,'~')
- if tgt:
- self.cd(parameter_s)
- dir_s.insert(0,cwd)
- return self.shell.magic('dirs')
-
- @line_magic
- def popd(self, parameter_s=''):
- """Change to directory popped off the top of the stack.
- """
- if not self.shell.dir_stack:
- raise UsageError("%popd on empty stack")
- top = self.shell.dir_stack.pop(0)
- self.cd(top)
- print("popd ->",top)
-
- @line_magic
- def dirs(self, parameter_s=''):
- """Return the current directory stack."""
-
- return self.shell.dir_stack
-
- @line_magic
- def dhist(self, parameter_s=''):
- """Print your history of visited directories.
-
- %dhist -> print full history\\
- %dhist n -> print last n entries only\\
- %dhist n1 n2 -> print entries between n1 and n2 (n2 not included)\\
-
- This history is automatically maintained by the %cd command, and
- always available as the global list variable _dh. You can use %cd -<n>
- to go to directory number <n>.
-
- Note that most of time, you should view directory history by entering
- cd -<TAB>.
-
- """
-
- dh = self.shell.user_ns['_dh']
- if parameter_s:
- try:
- args = map(int,parameter_s.split())
- except:
- self.arg_err(self.dhist)
- return
- if len(args) == 1:
- ini,fin = max(len(dh)-(args[0]),0),len(dh)
- elif len(args) == 2:
- ini,fin = args
- fin = min(fin, len(dh))
- else:
- self.arg_err(self.dhist)
- return
- else:
- ini,fin = 0,len(dh)
- print('Directory history (kept in _dh)')
- for i in range(ini, fin):
- print("%d: %s" % (i, dh[i]))
-
- @skip_doctest
- @line_magic
- def sc(self, parameter_s=''):
- """Shell capture - run shell command and capture output (DEPRECATED use !).
-
- DEPRECATED. Suboptimal, retained for backwards compatibility.
-
- You should use the form 'var = !command' instead. Example:
-
- "%sc -l myfiles = ls ~" should now be written as
-
- "myfiles = !ls ~"
-
- myfiles.s, myfiles.l and myfiles.n still apply as documented
- below.
-
- --
- %sc [options] varname=command
-
- IPython will run the given command using commands.getoutput(), and
- will then update the user's interactive namespace with a variable
- called varname, containing the value of the call. Your command can
- contain shell wildcards, pipes, etc.
-
- The '=' sign in the syntax is mandatory, and the variable name you
- supply must follow Python's standard conventions for valid names.
-
- (A special format without variable name exists for internal use)
-
- Options:
-
- -l: list output. Split the output on newlines into a list before
- assigning it to the given variable. By default the output is stored
- as a single string.
-
- -v: verbose. Print the contents of the variable.
-
- In most cases you should not need to split as a list, because the
- returned value is a special type of string which can automatically
- provide its contents either as a list (split on newlines) or as a
- space-separated string. These are convenient, respectively, either
- for sequential processing or to be passed to a shell command.
-
- For example::
-
- # Capture into variable a
- In [1]: sc a=ls *py
-
- # a is a string with embedded newlines
- In [2]: a
- Out[2]: 'setup.py\\nwin32_manual_post_install.py'
-
- # which can be seen as a list:
- In [3]: a.l
- Out[3]: ['setup.py', 'win32_manual_post_install.py']
-
- # or as a whitespace-separated string:
- In [4]: a.s
- Out[4]: 'setup.py win32_manual_post_install.py'
-
- # a.s is useful to pass as a single command line:
- In [5]: !wc -l $a.s
- 146 setup.py
- 130 win32_manual_post_install.py
- 276 total
-
- # while the list form is useful to loop over:
- In [6]: for f in a.l:
- ...: !wc -l $f
- ...:
- 146 setup.py
- 130 win32_manual_post_install.py
-
- Similarly, the lists returned by the -l option are also special, in
- the sense that you can equally invoke the .s attribute on them to
- automatically get a whitespace-separated string from their contents::
-
- In [7]: sc -l b=ls *py
-
- In [8]: b
- Out[8]: ['setup.py', 'win32_manual_post_install.py']
-
- In [9]: b.s
- Out[9]: 'setup.py win32_manual_post_install.py'
-
- In summary, both the lists and strings used for output capture have
- the following special attributes::
-
- .l (or .list) : value as list.
- .n (or .nlstr): value as newline-separated string.
- .s (or .spstr): value as space-separated string.
- """
-
- opts,args = self.parse_options(parameter_s, 'lv')
- # Try to get a variable name and command to run
- try:
- # the variable name must be obtained from the parse_options
- # output, which uses shlex.split to strip options out.
- var,_ = args.split('=', 1)
- var = var.strip()
- # But the command has to be extracted from the original input
- # parameter_s, not on what parse_options returns, to avoid the
- # quote stripping which shlex.split performs on it.
- _,cmd = parameter_s.split('=', 1)
- except ValueError:
- var,cmd = '',''
- # If all looks ok, proceed
- split = 'l' in opts
- out = self.shell.getoutput(cmd, split=split)
- if 'v' in opts:
- print('%s ==\n%s' % (var, pformat(out)))
- if var:
- self.shell.user_ns.update({var:out})
- else:
- return out
-
- @line_cell_magic
- def sx(self, line='', cell=None):
- """Shell execute - run shell command and capture output (!! is short-hand).
-
- %sx command
-
- IPython will run the given command using commands.getoutput(), and
- return the result formatted as a list (split on '\\n'). Since the
- output is _returned_, it will be stored in ipython's regular output
- cache Out[N] and in the '_N' automatic variables.
-
- Notes:
-
- 1) If an input line begins with '!!', then %sx is automatically
- invoked. That is, while::
-
- !ls
-
- causes ipython to simply issue system('ls'), typing::
-
- !!ls
-
- is a shorthand equivalent to::
-
- %sx ls
-
- 2) %sx differs from %sc in that %sx automatically splits into a list,
- like '%sc -l'. The reason for this is to make it as easy as possible
- to process line-oriented shell output via further python commands.
- %sc is meant to provide much finer control, but requires more
- typing.
-
- 3) Just like %sc -l, this is a list with special attributes:
- ::
-
- .l (or .list) : value as list.
- .n (or .nlstr): value as newline-separated string.
- .s (or .spstr): value as whitespace-separated string.
-
- This is very useful when trying to use such lists as arguments to
- system commands."""
-
- if cell is None:
- # line magic
- return self.shell.getoutput(line)
- else:
- opts,args = self.parse_options(line, '', 'out=')
- output = self.shell.getoutput(cell)
- out_name = opts.get('out', opts.get('o'))
- if out_name:
- self.shell.user_ns[out_name] = output
- else:
- return output
-
- system = line_cell_magic('system')(sx)
- bang = cell_magic('!')(sx)
-
- @line_magic
- def bookmark(self, parameter_s=''):
- """Manage IPython's bookmark system.
-
- %bookmark <name> - set bookmark to current dir
- %bookmark <name> <dir> - set bookmark to <dir>
- %bookmark -l - list all bookmarks
- %bookmark -d <name> - remove bookmark
- %bookmark -r - remove all bookmarks
-
- You can later on access a bookmarked folder with::
-
- %cd -b <name>
-
- or simply '%cd <name>' if there is no directory called <name> AND
- there is such a bookmark defined.
-
- Your bookmarks persist through IPython sessions, but they are
- associated with each profile."""
-
- opts,args = self.parse_options(parameter_s,'drl',mode='list')
- if len(args) > 2:
- raise UsageError("%bookmark: too many arguments")
-
- bkms = self.shell.db.get('bookmarks',{})
-
- if 'd' in opts:
- try:
- todel = args[0]
- except IndexError:
- raise UsageError(
- "%bookmark -d: must provide a bookmark to delete")
- else:
- try:
- del bkms[todel]
- except KeyError:
- raise UsageError(
- "%%bookmark -d: Can't delete bookmark '%s'" % todel)
-
- elif 'r' in opts:
- bkms = {}
- elif 'l' in opts:
- bks = sorted(bkms)
- if bks:
- size = max(map(len, bks))
- else:
- size = 0
- fmt = '%-'+str(size)+'s -> %s'
- print('Current bookmarks:')
- for bk in bks:
- print(fmt % (bk, bkms[bk]))
- else:
- if not args:
- raise UsageError("%bookmark: You must specify the bookmark name")
- elif len(args)==1:
- bkms[args[0]] = py3compat.getcwd()
- elif len(args)==2:
- bkms[args[0]] = args[1]
- self.shell.db['bookmarks'] = bkms
-
- @line_magic
- def pycat(self, parameter_s=''):
- """Show a syntax-highlighted file through a pager.
-
- This magic is similar to the cat utility, but it will assume the file
- to be Python source and will show it with syntax highlighting.
-
- This magic command can either take a local filename, an url,
- an history range (see %history) or a macro as argument ::
-
- %pycat myscript.py
- %pycat 7-27
- %pycat myMacro
- %pycat http://www.example.com/myscript.py
- """
- if not parameter_s:
- raise UsageError('Missing filename, URL, input history range, '
- 'or macro.')
-
- try :
- cont = self.shell.find_user_code(parameter_s, skip_encoding_cookie=False)
- except (ValueError, IOError):
- print("Error: no such file, variable, URL, history range or macro")
- return
-
- page.page(self.shell.pycolorize(source_to_unicode(cont)))
-
- @magic_arguments.magic_arguments()
- @magic_arguments.argument(
- '-a', '--append', action='store_true', default=False,
- help='Append contents of the cell to an existing file. '
- 'The file will be created if it does not exist.'
- )
- @magic_arguments.argument(
- 'filename', type=unicode_type,
- help='file to write'
- )
- @cell_magic
- def writefile(self, line, cell):
- """Write the contents of the cell to a file.
-
- The file will be overwritten unless the -a (--append) flag is specified.
- """
- args = magic_arguments.parse_argstring(self.writefile, line)
+ cwd = py3compat.getcwd().replace(self.shell.home_dir,'~')
+ if tgt:
+ self.cd(parameter_s)
+ dir_s.insert(0,cwd)
+ return self.shell.magic('dirs')
+
+ @line_magic
+ def popd(self, parameter_s=''):
+ """Change to directory popped off the top of the stack.
+ """
+ if not self.shell.dir_stack:
+ raise UsageError("%popd on empty stack")
+ top = self.shell.dir_stack.pop(0)
+ self.cd(top)
+ print("popd ->",top)
+
+ @line_magic
+ def dirs(self, parameter_s=''):
+ """Return the current directory stack."""
+
+ return self.shell.dir_stack
+
+ @line_magic
+ def dhist(self, parameter_s=''):
+ """Print your history of visited directories.
+
+ %dhist -> print full history\\
+ %dhist n -> print last n entries only\\
+ %dhist n1 n2 -> print entries between n1 and n2 (n2 not included)\\
+
+ This history is automatically maintained by the %cd command, and
+ always available as the global list variable _dh. You can use %cd -<n>
+ to go to directory number <n>.
+
+ Note that most of time, you should view directory history by entering
+ cd -<TAB>.
+
+ """
+
+ dh = self.shell.user_ns['_dh']
+ if parameter_s:
+ try:
+ args = map(int,parameter_s.split())
+ except:
+ self.arg_err(self.dhist)
+ return
+ if len(args) == 1:
+ ini,fin = max(len(dh)-(args[0]),0),len(dh)
+ elif len(args) == 2:
+ ini,fin = args
+ fin = min(fin, len(dh))
+ else:
+ self.arg_err(self.dhist)
+ return
+ else:
+ ini,fin = 0,len(dh)
+ print('Directory history (kept in _dh)')
+ for i in range(ini, fin):
+ print("%d: %s" % (i, dh[i]))
+
+ @skip_doctest
+ @line_magic
+ def sc(self, parameter_s=''):
+ """Shell capture - run shell command and capture output (DEPRECATED use !).
+
+ DEPRECATED. Suboptimal, retained for backwards compatibility.
+
+ You should use the form 'var = !command' instead. Example:
+
+ "%sc -l myfiles = ls ~" should now be written as
+
+ "myfiles = !ls ~"
+
+ myfiles.s, myfiles.l and myfiles.n still apply as documented
+ below.
+
+ --
+ %sc [options] varname=command
+
+ IPython will run the given command using commands.getoutput(), and
+ will then update the user's interactive namespace with a variable
+ called varname, containing the value of the call. Your command can
+ contain shell wildcards, pipes, etc.
+
+ The '=' sign in the syntax is mandatory, and the variable name you
+ supply must follow Python's standard conventions for valid names.
+
+ (A special format without variable name exists for internal use)
+
+ Options:
+
+ -l: list output. Split the output on newlines into a list before
+ assigning it to the given variable. By default the output is stored
+ as a single string.
+
+ -v: verbose. Print the contents of the variable.
+
+ In most cases you should not need to split as a list, because the
+ returned value is a special type of string which can automatically
+ provide its contents either as a list (split on newlines) or as a
+ space-separated string. These are convenient, respectively, either
+ for sequential processing or to be passed to a shell command.
+
+ For example::
+
+ # Capture into variable a
+ In [1]: sc a=ls *py
+
+ # a is a string with embedded newlines
+ In [2]: a
+ Out[2]: 'setup.py\\nwin32_manual_post_install.py'
+
+ # which can be seen as a list:
+ In [3]: a.l
+ Out[3]: ['setup.py', 'win32_manual_post_install.py']
+
+ # or as a whitespace-separated string:
+ In [4]: a.s
+ Out[4]: 'setup.py win32_manual_post_install.py'
+
+ # a.s is useful to pass as a single command line:
+ In [5]: !wc -l $a.s
+ 146 setup.py
+ 130 win32_manual_post_install.py
+ 276 total
+
+ # while the list form is useful to loop over:
+ In [6]: for f in a.l:
+ ...: !wc -l $f
+ ...:
+ 146 setup.py
+ 130 win32_manual_post_install.py
+
+ Similarly, the lists returned by the -l option are also special, in
+ the sense that you can equally invoke the .s attribute on them to
+ automatically get a whitespace-separated string from their contents::
+
+ In [7]: sc -l b=ls *py
+
+ In [8]: b
+ Out[8]: ['setup.py', 'win32_manual_post_install.py']
+
+ In [9]: b.s
+ Out[9]: 'setup.py win32_manual_post_install.py'
+
+ In summary, both the lists and strings used for output capture have
+ the following special attributes::
+
+ .l (or .list) : value as list.
+ .n (or .nlstr): value as newline-separated string.
+ .s (or .spstr): value as space-separated string.
+ """
+
+ opts,args = self.parse_options(parameter_s, 'lv')
+ # Try to get a variable name and command to run
+ try:
+ # the variable name must be obtained from the parse_options
+ # output, which uses shlex.split to strip options out.
+ var,_ = args.split('=', 1)
+ var = var.strip()
+ # But the command has to be extracted from the original input
+ # parameter_s, not on what parse_options returns, to avoid the
+ # quote stripping which shlex.split performs on it.
+ _,cmd = parameter_s.split('=', 1)
+ except ValueError:
+ var,cmd = '',''
+ # If all looks ok, proceed
+ split = 'l' in opts
+ out = self.shell.getoutput(cmd, split=split)
+ if 'v' in opts:
+ print('%s ==\n%s' % (var, pformat(out)))
+ if var:
+ self.shell.user_ns.update({var:out})
+ else:
+ return out
+
+ @line_cell_magic
+ def sx(self, line='', cell=None):
+ """Shell execute - run shell command and capture output (!! is short-hand).
+
+ %sx command
+
+ IPython will run the given command using commands.getoutput(), and
+ return the result formatted as a list (split on '\\n'). Since the
+ output is _returned_, it will be stored in ipython's regular output
+ cache Out[N] and in the '_N' automatic variables.
+
+ Notes:
+
+ 1) If an input line begins with '!!', then %sx is automatically
+ invoked. That is, while::
+
+ !ls
+
+ causes ipython to simply issue system('ls'), typing::
+
+ !!ls
+
+ is a shorthand equivalent to::
+
+ %sx ls
+
+ 2) %sx differs from %sc in that %sx automatically splits into a list,
+ like '%sc -l'. The reason for this is to make it as easy as possible
+ to process line-oriented shell output via further python commands.
+ %sc is meant to provide much finer control, but requires more
+ typing.
+
+ 3) Just like %sc -l, this is a list with special attributes:
+ ::
+
+ .l (or .list) : value as list.
+ .n (or .nlstr): value as newline-separated string.
+ .s (or .spstr): value as whitespace-separated string.
+
+ This is very useful when trying to use such lists as arguments to
+ system commands."""
+
+ if cell is None:
+ # line magic
+ return self.shell.getoutput(line)
+ else:
+ opts,args = self.parse_options(line, '', 'out=')
+ output = self.shell.getoutput(cell)
+ out_name = opts.get('out', opts.get('o'))
+ if out_name:
+ self.shell.user_ns[out_name] = output
+ else:
+ return output
+
+ system = line_cell_magic('system')(sx)
+ bang = cell_magic('!')(sx)
+
+ @line_magic
+ def bookmark(self, parameter_s=''):
+ """Manage IPython's bookmark system.
+
+ %bookmark <name> - set bookmark to current dir
+ %bookmark <name> <dir> - set bookmark to <dir>
+ %bookmark -l - list all bookmarks
+ %bookmark -d <name> - remove bookmark
+ %bookmark -r - remove all bookmarks
+
+ You can later on access a bookmarked folder with::
+
+ %cd -b <name>
+
+ or simply '%cd <name>' if there is no directory called <name> AND
+ there is such a bookmark defined.
+
+ Your bookmarks persist through IPython sessions, but they are
+ associated with each profile."""
+
+ opts,args = self.parse_options(parameter_s,'drl',mode='list')
+ if len(args) > 2:
+ raise UsageError("%bookmark: too many arguments")
+
+ bkms = self.shell.db.get('bookmarks',{})
+
+ if 'd' in opts:
+ try:
+ todel = args[0]
+ except IndexError:
+ raise UsageError(
+ "%bookmark -d: must provide a bookmark to delete")
+ else:
+ try:
+ del bkms[todel]
+ except KeyError:
+ raise UsageError(
+ "%%bookmark -d: Can't delete bookmark '%s'" % todel)
+
+ elif 'r' in opts:
+ bkms = {}
+ elif 'l' in opts:
+ bks = sorted(bkms)
+ if bks:
+ size = max(map(len, bks))
+ else:
+ size = 0
+ fmt = '%-'+str(size)+'s -> %s'
+ print('Current bookmarks:')
+ for bk in bks:
+ print(fmt % (bk, bkms[bk]))
+ else:
+ if not args:
+ raise UsageError("%bookmark: You must specify the bookmark name")
+ elif len(args)==1:
+ bkms[args[0]] = py3compat.getcwd()
+ elif len(args)==2:
+ bkms[args[0]] = args[1]
+ self.shell.db['bookmarks'] = bkms
+
+ @line_magic
+ def pycat(self, parameter_s=''):
+ """Show a syntax-highlighted file through a pager.
+
+ This magic is similar to the cat utility, but it will assume the file
+ to be Python source and will show it with syntax highlighting.
+
+ This magic command can either take a local filename, an url,
+ an history range (see %history) or a macro as argument ::
+
+ %pycat myscript.py
+ %pycat 7-27
+ %pycat myMacro
+ %pycat http://www.example.com/myscript.py
+ """
+ if not parameter_s:
+ raise UsageError('Missing filename, URL, input history range, '
+ 'or macro.')
+
+ try :
+ cont = self.shell.find_user_code(parameter_s, skip_encoding_cookie=False)
+ except (ValueError, IOError):
+ print("Error: no such file, variable, URL, history range or macro")
+ return
+
+ page.page(self.shell.pycolorize(source_to_unicode(cont)))
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '-a', '--append', action='store_true', default=False,
+ help='Append contents of the cell to an existing file. '
+ 'The file will be created if it does not exist.'
+ )
+ @magic_arguments.argument(
+ 'filename', type=unicode_type,
+ help='file to write'
+ )
+ @cell_magic
+ def writefile(self, line, cell):
+ """Write the contents of the cell to a file.
+
+ The file will be overwritten unless the -a (--append) flag is specified.
+ """
+ args = magic_arguments.parse_argstring(self.writefile, line)
filename = os.path.expanduser(args.filename)
- if os.path.exists(filename):
- if args.append:
- print("Appending to %s" % filename)
- else:
- print("Overwriting %s" % filename)
- else:
- print("Writing %s" % filename)
-
- mode = 'a' if args.append else 'w'
- with io.open(filename, mode, encoding='utf-8') as f:
- f.write(cell)
+ if os.path.exists(filename):
+ if args.append:
+ print("Appending to %s" % filename)
+ else:
+ print("Overwriting %s" % filename)
+ else:
+ print("Writing %s" % filename)
+
+ mode = 'a' if args.append else 'w'
+ with io.open(filename, mode, encoding='utf-8') as f:
+ f.write(cell)
diff --git a/contrib/python/ipython/py2/IPython/core/magics/pylab.py b/contrib/python/ipython/py2/IPython/core/magics/pylab.py
index deec14cb7c..6c5cd68a59 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/pylab.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/pylab.py
@@ -1,167 +1,167 @@
-"""Implementation of magic functions for matplotlib/pylab support.
-"""
-from __future__ import print_function
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012 The IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Our own packages
-from traitlets.config.application import Application
-from IPython.core import magic_arguments
-from IPython.core.magic import Magics, magics_class, line_magic
-from IPython.testing.skipdoctest import skip_doctest
+"""Implementation of magic functions for matplotlib/pylab support.
+"""
+from __future__ import print_function
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012 The IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Our own packages
+from traitlets.config.application import Application
+from IPython.core import magic_arguments
+from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.testing.skipdoctest import skip_doctest
from warnings import warn
-from IPython.core.pylabtools import backends
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-magic_gui_arg = magic_arguments.argument(
- 'gui', nargs='?',
- help="""Name of the matplotlib backend to use %s.
- If given, the corresponding matplotlib backend is used,
- otherwise it will be matplotlib's default
- (which you can set in your matplotlib config file).
- """ % str(tuple(sorted(backends.keys())))
-)
-
-
-@magics_class
-class PylabMagics(Magics):
- """Magics related to matplotlib's pylab support"""
-
- @skip_doctest
- @line_magic
- @magic_arguments.magic_arguments()
- @magic_arguments.argument('-l', '--list', action='store_true',
- help='Show available matplotlib backends')
- @magic_gui_arg
- def matplotlib(self, line=''):
- """Set up matplotlib to work interactively.
-
- This function lets you activate matplotlib interactive support
- at any point during an IPython session. It does not import anything
- into the interactive namespace.
-
- If you are using the inline matplotlib backend in the IPython Notebook
- you can set which figure formats are enabled using the following::
-
- In [1]: from IPython.display import set_matplotlib_formats
-
- In [2]: set_matplotlib_formats('pdf', 'svg')
-
- The default for inline figures sets `bbox_inches` to 'tight'. This can
- cause discrepancies between the displayed image and the identical
- image created using `savefig`. This behavior can be disabled using the
- `%config` magic::
-
- In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
-
- In addition, see the docstring of
- `IPython.display.set_matplotlib_formats` and
- `IPython.display.set_matplotlib_close` for more information on
- changing additional behaviors of the inline backend.
-
- Examples
- --------
- To enable the inline backend for usage with the IPython Notebook::
-
- In [1]: %matplotlib inline
-
- In this case, where the matplotlib default is TkAgg::
-
- In [2]: %matplotlib
- Using matplotlib backend: TkAgg
-
- But you can explicitly request a different GUI backend::
-
- In [3]: %matplotlib qt
-
- You can list the available backends using the -l/--list option::
-
- In [4]: %matplotlib --list
- Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',
- 'gtk', 'tk', 'inline']
- """
- args = magic_arguments.parse_argstring(self.matplotlib, line)
- if args.list:
- backends_list = list(backends.keys())
- print("Available matplotlib backends: %s" % backends_list)
- else:
- gui, backend = self.shell.enable_matplotlib(args.gui)
- self._show_matplotlib_backend(args.gui, backend)
-
- @skip_doctest
- @line_magic
- @magic_arguments.magic_arguments()
- @magic_arguments.argument(
- '--no-import-all', action='store_true', default=None,
- help="""Prevent IPython from performing ``import *`` into the interactive namespace.
-
- You can govern the default behavior of this flag with the
- InteractiveShellApp.pylab_import_all configurable.
- """
- )
- @magic_gui_arg
- def pylab(self, line=''):
- """Load numpy and matplotlib to work interactively.
-
- This function lets you activate pylab (matplotlib, numpy and
- interactive support) at any point during an IPython session.
-
- %pylab makes the following imports::
-
- import numpy
- import matplotlib
- from matplotlib import pylab, mlab, pyplot
- np = numpy
- plt = pyplot
-
- from IPython.display import display
- from IPython.core.pylabtools import figsize, getfigs
-
- from pylab import *
- from numpy import *
-
- If you pass `--no-import-all`, the last two `*` imports will be excluded.
-
- See the %matplotlib magic for more details about activating matplotlib
- without affecting the interactive namespace.
- """
- args = magic_arguments.parse_argstring(self.pylab, line)
- if args.no_import_all is None:
- # get default from Application
- if Application.initialized():
- app = Application.instance()
- try:
- import_all = app.pylab_import_all
- except AttributeError:
- import_all = True
- else:
- # nothing specified, no app - default True
- import_all = True
- else:
- # invert no-import flag
- import_all = not args.no_import_all
-
- gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
- self._show_matplotlib_backend(args.gui, backend)
- print ("Populating the interactive namespace from numpy and matplotlib")
- if clobbered:
- warn("pylab import has clobbered these variables: %s" % clobbered +
- "\n`%matplotlib` prevents importing * from pylab and numpy"
- )
-
- def _show_matplotlib_backend(self, gui, backend):
- """show matplotlib message backend message"""
- if not gui or gui == 'auto':
- print("Using matplotlib backend: %s" % backend)
+from IPython.core.pylabtools import backends
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+magic_gui_arg = magic_arguments.argument(
+ 'gui', nargs='?',
+ help="""Name of the matplotlib backend to use %s.
+ If given, the corresponding matplotlib backend is used,
+ otherwise it will be matplotlib's default
+ (which you can set in your matplotlib config file).
+ """ % str(tuple(sorted(backends.keys())))
+)
+
+
+@magics_class
+class PylabMagics(Magics):
+ """Magics related to matplotlib's pylab support"""
+
+ @skip_doctest
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument('-l', '--list', action='store_true',
+ help='Show available matplotlib backends')
+ @magic_gui_arg
+ def matplotlib(self, line=''):
+ """Set up matplotlib to work interactively.
+
+ This function lets you activate matplotlib interactive support
+ at any point during an IPython session. It does not import anything
+ into the interactive namespace.
+
+ If you are using the inline matplotlib backend in the IPython Notebook
+ you can set which figure formats are enabled using the following::
+
+ In [1]: from IPython.display import set_matplotlib_formats
+
+ In [2]: set_matplotlib_formats('pdf', 'svg')
+
+ The default for inline figures sets `bbox_inches` to 'tight'. This can
+ cause discrepancies between the displayed image and the identical
+ image created using `savefig`. This behavior can be disabled using the
+ `%config` magic::
+
+ In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
+
+ In addition, see the docstring of
+ `IPython.display.set_matplotlib_formats` and
+ `IPython.display.set_matplotlib_close` for more information on
+ changing additional behaviors of the inline backend.
+
+ Examples
+ --------
+ To enable the inline backend for usage with the IPython Notebook::
+
+ In [1]: %matplotlib inline
+
+ In this case, where the matplotlib default is TkAgg::
+
+ In [2]: %matplotlib
+ Using matplotlib backend: TkAgg
+
+ But you can explicitly request a different GUI backend::
+
+ In [3]: %matplotlib qt
+
+ You can list the available backends using the -l/--list option::
+
+ In [4]: %matplotlib --list
+ Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',
+ 'gtk', 'tk', 'inline']
+ """
+ args = magic_arguments.parse_argstring(self.matplotlib, line)
+ if args.list:
+ backends_list = list(backends.keys())
+ print("Available matplotlib backends: %s" % backends_list)
+ else:
+ gui, backend = self.shell.enable_matplotlib(args.gui)
+ self._show_matplotlib_backend(args.gui, backend)
+
+ @skip_doctest
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '--no-import-all', action='store_true', default=None,
+ help="""Prevent IPython from performing ``import *`` into the interactive namespace.
+
+ You can govern the default behavior of this flag with the
+ InteractiveShellApp.pylab_import_all configurable.
+ """
+ )
+ @magic_gui_arg
+ def pylab(self, line=''):
+ """Load numpy and matplotlib to work interactively.
+
+ This function lets you activate pylab (matplotlib, numpy and
+ interactive support) at any point during an IPython session.
+
+ %pylab makes the following imports::
+
+ import numpy
+ import matplotlib
+ from matplotlib import pylab, mlab, pyplot
+ np = numpy
+ plt = pyplot
+
+ from IPython.display import display
+ from IPython.core.pylabtools import figsize, getfigs
+
+ from pylab import *
+ from numpy import *
+
+ If you pass `--no-import-all`, the last two `*` imports will be excluded.
+
+ See the %matplotlib magic for more details about activating matplotlib
+ without affecting the interactive namespace.
+ """
+ args = magic_arguments.parse_argstring(self.pylab, line)
+ if args.no_import_all is None:
+ # get default from Application
+ if Application.initialized():
+ app = Application.instance()
+ try:
+ import_all = app.pylab_import_all
+ except AttributeError:
+ import_all = True
+ else:
+ # nothing specified, no app - default True
+ import_all = True
+ else:
+ # invert no-import flag
+ import_all = not args.no_import_all
+
+ gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
+ self._show_matplotlib_backend(args.gui, backend)
+ print ("Populating the interactive namespace from numpy and matplotlib")
+ if clobbered:
+ warn("pylab import has clobbered these variables: %s" % clobbered +
+ "\n`%matplotlib` prevents importing * from pylab and numpy"
+ )
+
+ def _show_matplotlib_backend(self, gui, backend):
+ """show matplotlib message backend message"""
+ if not gui or gui == 'auto':
+ print("Using matplotlib backend: %s" % backend)
diff --git a/contrib/python/ipython/py2/IPython/core/magics/script.py b/contrib/python/ipython/py2/IPython/core/magics/script.py
index d381d97234..3fbddc38a8 100644
--- a/contrib/python/ipython/py2/IPython/core/magics/script.py
+++ b/contrib/python/ipython/py2/IPython/core/magics/script.py
@@ -1,280 +1,280 @@
-"""Magic functions for running cells in various scripts."""
-from __future__ import print_function
-
+"""Magic functions for running cells in various scripts."""
+from __future__ import print_function
+
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
-
-import errno
-import os
-import sys
-import signal
-import time
-from subprocess import Popen, PIPE
-import atexit
-
-from IPython.core import magic_arguments
-from IPython.core.magic import (
- Magics, magics_class, line_magic, cell_magic
-)
-from IPython.lib.backgroundjobs import BackgroundJobManager
-from IPython.utils import py3compat
-from IPython.utils.process import arg_split
+
+import errno
+import os
+import sys
+import signal
+import time
+from subprocess import Popen, PIPE
+import atexit
+
+from IPython.core import magic_arguments
+from IPython.core.magic import (
+ Magics, magics_class, line_magic, cell_magic
+)
+from IPython.lib.backgroundjobs import BackgroundJobManager
+from IPython.utils import py3compat
+from IPython.utils.process import arg_split
from traitlets import List, Dict, default
-
-#-----------------------------------------------------------------------------
-# Magic implementation classes
-#-----------------------------------------------------------------------------
-
-def script_args(f):
- """single decorator for adding script args"""
- args = [
- magic_arguments.argument(
- '--out', type=str,
- help="""The variable in which to store stdout from the script.
- If the script is backgrounded, this will be the stdout *pipe*,
- instead of the stderr text itself.
- """
- ),
- magic_arguments.argument(
- '--err', type=str,
- help="""The variable in which to store stderr from the script.
- If the script is backgrounded, this will be the stderr *pipe*,
- instead of the stderr text itself.
- """
- ),
- magic_arguments.argument(
- '--bg', action="store_true",
- help="""Whether to run the script in the background.
- If given, the only way to see the output of the command is
- with --out/err.
- """
- ),
- magic_arguments.argument(
- '--proc', type=str,
- help="""The variable in which to store Popen instance.
- This is used only when --bg option is given.
- """
- ),
- ]
- for arg in args:
- f = arg(f)
- return f
-
-@magics_class
-class ScriptMagics(Magics):
- """Magics for talking to scripts
-
- This defines a base `%%script` cell magic for running a cell
- with a program in a subprocess, and registers a few top-level
- magics that call %%script with common interpreters.
- """
+
+#-----------------------------------------------------------------------------
+# Magic implementation classes
+#-----------------------------------------------------------------------------
+
+def script_args(f):
+ """single decorator for adding script args"""
+ args = [
+ magic_arguments.argument(
+ '--out', type=str,
+ help="""The variable in which to store stdout from the script.
+ If the script is backgrounded, this will be the stdout *pipe*,
+ instead of the stderr text itself.
+ """
+ ),
+ magic_arguments.argument(
+ '--err', type=str,
+ help="""The variable in which to store stderr from the script.
+ If the script is backgrounded, this will be the stderr *pipe*,
+ instead of the stderr text itself.
+ """
+ ),
+ magic_arguments.argument(
+ '--bg', action="store_true",
+ help="""Whether to run the script in the background.
+ If given, the only way to see the output of the command is
+ with --out/err.
+ """
+ ),
+ magic_arguments.argument(
+ '--proc', type=str,
+ help="""The variable in which to store Popen instance.
+ This is used only when --bg option is given.
+ """
+ ),
+ ]
+ for arg in args:
+ f = arg(f)
+ return f
+
+@magics_class
+class ScriptMagics(Magics):
+ """Magics for talking to scripts
+
+ This defines a base `%%script` cell magic for running a cell
+ with a program in a subprocess, and registers a few top-level
+ magics that call %%script with common interpreters.
+ """
script_magics = List(
- help="""Extra script cell magics to define
-
- This generates simple wrappers of `%%script foo` as `%%foo`.
-
- If you want to add script magics that aren't on your path,
- specify them in script_paths
- """,
+ help="""Extra script cell magics to define
+
+ This generates simple wrappers of `%%script foo` as `%%foo`.
+
+ If you want to add script magics that aren't on your path,
+ specify them in script_paths
+ """,
).tag(config=True)
@default('script_magics')
- def _script_magics_default(self):
- """default to a common list of programs"""
-
- defaults = [
- 'sh',
- 'bash',
- 'perl',
- 'ruby',
- 'python',
- 'python2',
- 'python3',
- 'pypy',
- ]
- if os.name == 'nt':
- defaults.extend([
- 'cmd',
- ])
-
- return defaults
-
+ def _script_magics_default(self):
+ """default to a common list of programs"""
+
+ defaults = [
+ 'sh',
+ 'bash',
+ 'perl',
+ 'ruby',
+ 'python',
+ 'python2',
+ 'python3',
+ 'pypy',
+ ]
+ if os.name == 'nt':
+ defaults.extend([
+ 'cmd',
+ ])
+
+ return defaults
+
script_paths = Dict(
- help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
-
- Only necessary for items in script_magics where the default path will not
- find the right interpreter.
- """
+ help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
+
+ Only necessary for items in script_magics where the default path will not
+ find the right interpreter.
+ """
).tag(config=True)
-
- def __init__(self, shell=None):
- super(ScriptMagics, self).__init__(shell=shell)
- self._generate_script_magics()
- self.job_manager = BackgroundJobManager()
- self.bg_processes = []
- atexit.register(self.kill_bg_processes)
-
- def __del__(self):
- self.kill_bg_processes()
-
- def _generate_script_magics(self):
- cell_magics = self.magics['cell']
- for name in self.script_magics:
- cell_magics[name] = self._make_script_magic(name)
-
- def _make_script_magic(self, name):
- """make a named magic, that calls %%script with a particular program"""
- # expand to explicit path if necessary:
- script = self.script_paths.get(name, name)
-
- @magic_arguments.magic_arguments()
- @script_args
- def named_script_magic(line, cell):
- # if line, add it as cl-flags
- if line:
- line = "%s %s" % (script, line)
- else:
- line = script
- return self.shebang(line, cell)
-
- # write a basic docstring:
- named_script_magic.__doc__ = \
- """%%{name} script magic
-
- Run cells with {script} in a subprocess.
-
- This is a shortcut for `%%script {script}`
- """.format(**locals())
-
- return named_script_magic
-
- @magic_arguments.magic_arguments()
- @script_args
- @cell_magic("script")
- def shebang(self, line, cell):
- """Run a cell via a shell command
-
- The `%%script` line is like the #! line of script,
- specifying a program (bash, perl, ruby, etc.) with which to run.
-
- The rest of the cell is run by that program.
-
- Examples
- --------
- ::
-
- In [1]: %%script bash
- ...: for i in 1 2 3; do
- ...: echo $i
- ...: done
- 1
- 2
- 3
- """
- argv = arg_split(line, posix = not sys.platform.startswith('win'))
- args, cmd = self.shebang.parser.parse_known_args(argv)
-
- try:
- p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
- except OSError as e:
- if e.errno == errno.ENOENT:
- print("Couldn't find program: %r" % cmd[0])
- return
- else:
- raise
-
- if not cell.endswith('\n'):
- cell += '\n'
- cell = cell.encode('utf8', 'replace')
- if args.bg:
- self.bg_processes.append(p)
- self._gc_bg_processes()
- if args.out:
- self.shell.user_ns[args.out] = p.stdout
- if args.err:
- self.shell.user_ns[args.err] = p.stderr
- self.job_manager.new(self._run_script, p, cell, daemon=True)
- if args.proc:
- self.shell.user_ns[args.proc] = p
- return
-
- try:
- out, err = p.communicate(cell)
- except KeyboardInterrupt:
- try:
- p.send_signal(signal.SIGINT)
- time.sleep(0.1)
- if p.poll() is not None:
- print("Process is interrupted.")
- return
- p.terminate()
- time.sleep(0.1)
- if p.poll() is not None:
- print("Process is terminated.")
- return
- p.kill()
- print("Process is killed.")
- except OSError:
- pass
- except Exception as e:
- print("Error while terminating subprocess (pid=%i): %s" \
- % (p.pid, e))
- return
- out = py3compat.bytes_to_str(out)
- err = py3compat.bytes_to_str(err)
- if args.out:
- self.shell.user_ns[args.out] = out
- else:
- sys.stdout.write(out)
- sys.stdout.flush()
- if args.err:
- self.shell.user_ns[args.err] = err
- else:
- sys.stderr.write(err)
- sys.stderr.flush()
-
- def _run_script(self, p, cell):
- """callback for running the script in the background"""
- p.stdin.write(cell)
- p.stdin.close()
- p.wait()
-
- @line_magic("killbgscripts")
- def killbgscripts(self, _nouse_=''):
- """Kill all BG processes started by %%script and its family."""
- self.kill_bg_processes()
- print("All background processes were killed.")
-
- def kill_bg_processes(self):
- """Kill all BG processes which are still running."""
+
+ def __init__(self, shell=None):
+ super(ScriptMagics, self).__init__(shell=shell)
+ self._generate_script_magics()
+ self.job_manager = BackgroundJobManager()
+ self.bg_processes = []
+ atexit.register(self.kill_bg_processes)
+
+ def __del__(self):
+ self.kill_bg_processes()
+
+ def _generate_script_magics(self):
+ cell_magics = self.magics['cell']
+ for name in self.script_magics:
+ cell_magics[name] = self._make_script_magic(name)
+
+ def _make_script_magic(self, name):
+ """make a named magic, that calls %%script with a particular program"""
+ # expand to explicit path if necessary:
+ script = self.script_paths.get(name, name)
+
+ @magic_arguments.magic_arguments()
+ @script_args
+ def named_script_magic(line, cell):
+ # if line, add it as cl-flags
+ if line:
+ line = "%s %s" % (script, line)
+ else:
+ line = script
+ return self.shebang(line, cell)
+
+ # write a basic docstring:
+ named_script_magic.__doc__ = \
+ """%%{name} script magic
+
+ Run cells with {script} in a subprocess.
+
+ This is a shortcut for `%%script {script}`
+ """.format(**locals())
+
+ return named_script_magic
+
+ @magic_arguments.magic_arguments()
+ @script_args
+ @cell_magic("script")
+ def shebang(self, line, cell):
+ """Run a cell via a shell command
+
+ The `%%script` line is like the #! line of script,
+ specifying a program (bash, perl, ruby, etc.) with which to run.
+
+ The rest of the cell is run by that program.
+
+ Examples
+ --------
+ ::
+
+ In [1]: %%script bash
+ ...: for i in 1 2 3; do
+ ...: echo $i
+ ...: done
+ 1
+ 2
+ 3
+ """
+ argv = arg_split(line, posix = not sys.platform.startswith('win'))
+ args, cmd = self.shebang.parser.parse_known_args(argv)
+
+ try:
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ print("Couldn't find program: %r" % cmd[0])
+ return
+ else:
+ raise
+
+ if not cell.endswith('\n'):
+ cell += '\n'
+ cell = cell.encode('utf8', 'replace')
+ if args.bg:
+ self.bg_processes.append(p)
+ self._gc_bg_processes()
+ if args.out:
+ self.shell.user_ns[args.out] = p.stdout
+ if args.err:
+ self.shell.user_ns[args.err] = p.stderr
+ self.job_manager.new(self._run_script, p, cell, daemon=True)
+ if args.proc:
+ self.shell.user_ns[args.proc] = p
+ return
+
+ try:
+ out, err = p.communicate(cell)
+ except KeyboardInterrupt:
+ try:
+ p.send_signal(signal.SIGINT)
+ time.sleep(0.1)
+ if p.poll() is not None:
+ print("Process is interrupted.")
+ return
+ p.terminate()
+ time.sleep(0.1)
+ if p.poll() is not None:
+ print("Process is terminated.")
+ return
+ p.kill()
+ print("Process is killed.")
+ except OSError:
+ pass
+ except Exception as e:
+ print("Error while terminating subprocess (pid=%i): %s" \
+ % (p.pid, e))
+ return
+ out = py3compat.bytes_to_str(out)
+ err = py3compat.bytes_to_str(err)
+ if args.out:
+ self.shell.user_ns[args.out] = out
+ else:
+ sys.stdout.write(out)
+ sys.stdout.flush()
+ if args.err:
+ self.shell.user_ns[args.err] = err
+ else:
+ sys.stderr.write(err)
+ sys.stderr.flush()
+
+ def _run_script(self, p, cell):
+ """callback for running the script in the background"""
+ p.stdin.write(cell)
+ p.stdin.close()
+ p.wait()
+
+ @line_magic("killbgscripts")
+ def killbgscripts(self, _nouse_=''):
+ """Kill all BG processes started by %%script and its family."""
+ self.kill_bg_processes()
+ print("All background processes were killed.")
+
+ def kill_bg_processes(self):
+ """Kill all BG processes which are still running."""
if not self.bg_processes:
return
- for p in self.bg_processes:
- if p.poll() is None:
- try:
- p.send_signal(signal.SIGINT)
- except:
- pass
- time.sleep(0.1)
+ for p in self.bg_processes:
+ if p.poll() is None:
+ try:
+ p.send_signal(signal.SIGINT)
+ except:
+ pass
+ time.sleep(0.1)
self._gc_bg_processes()
if not self.bg_processes:
return
- for p in self.bg_processes:
- if p.poll() is None:
- try:
- p.terminate()
- except:
- pass
- time.sleep(0.1)
+ for p in self.bg_processes:
+ if p.poll() is None:
+ try:
+ p.terminate()
+ except:
+ pass
+ time.sleep(0.1)
self._gc_bg_processes()
if not self.bg_processes:
return
- for p in self.bg_processes:
- if p.poll() is None:
- try:
- p.kill()
- except:
- pass
- self._gc_bg_processes()
-
- def _gc_bg_processes(self):
- self.bg_processes = [p for p in self.bg_processes if p.poll() is None]
+ for p in self.bg_processes:
+ if p.poll() is None:
+ try:
+ p.kill()
+ except:
+ pass
+ self._gc_bg_processes()
+
+ def _gc_bg_processes(self):
+ self.bg_processes = [p for p in self.bg_processes if p.poll() is None]
diff --git a/contrib/python/ipython/py2/IPython/core/oinspect.py b/contrib/python/ipython/py2/IPython/core/oinspect.py
index 6849412528..55a4efe8c0 100644
--- a/contrib/python/ipython/py2/IPython/core/oinspect.py
+++ b/contrib/python/ipython/py2/IPython/core/oinspect.py
@@ -1,49 +1,49 @@
-# -*- coding: utf-8 -*-
-"""Tools for inspecting Python objects.
-
-Uses syntax highlighting for presenting the various information elements.
-
-Similar in spirit to the inspect module, but all calls take a name argument to
-reference the name under which an object is being read.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
-__all__ = ['Inspector','InspectColors']
-
-# stdlib modules
-import inspect
-import linecache
+# -*- coding: utf-8 -*-
+"""Tools for inspecting Python objects.
+
+Uses syntax highlighting for presenting the various information elements.
+
+Similar in spirit to the inspect module, but all calls take a name argument to
+reference the name under which an object is being read.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+
+__all__ = ['Inspector','InspectColors']
+
+# stdlib modules
+import inspect
+import linecache
import warnings
-import os
-from textwrap import dedent
-import types
-import io as stdlib_io
-
-try:
- from itertools import izip_longest
-except ImportError:
- from itertools import zip_longest as izip_longest
-
-# IPython's own
-from IPython.core import page
-from IPython.lib.pretty import pretty
-from IPython.testing.skipdoctest import skip_doctest_py3
-from IPython.utils import PyColorize
-from IPython.utils import openpy
-from IPython.utils import py3compat
-from IPython.utils.dir2 import safe_hasattr
-from IPython.utils.path import compress_user
-from IPython.utils.text import indent
-from IPython.utils.wildcard import list_namespace
-from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable
-from IPython.utils.py3compat import cast_unicode, string_types, PY3
-from IPython.utils.signatures import signature
+import os
+from textwrap import dedent
+import types
+import io as stdlib_io
+
+try:
+ from itertools import izip_longest
+except ImportError:
+ from itertools import zip_longest as izip_longest
+
+# IPython's own
+from IPython.core import page
+from IPython.lib.pretty import pretty
+from IPython.testing.skipdoctest import skip_doctest_py3
+from IPython.utils import PyColorize
+from IPython.utils import openpy
+from IPython.utils import py3compat
+from IPython.utils.dir2 import safe_hasattr
+from IPython.utils.path import compress_user
+from IPython.utils.text import indent
+from IPython.utils.wildcard import list_namespace
+from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable
+from IPython.utils.py3compat import cast_unicode, string_types, PY3
+from IPython.utils.signatures import signature
from IPython.utils.colorable import Colorable
-
+
from pygments import highlight
try:
# PythonLexer was renamed to Python2Lexer in pygments 2.5
@@ -55,510 +55,510 @@ from pygments.formatters import HtmlFormatter
def pylight(code):
return highlight(code, Python2Lexer(), HtmlFormatter(noclasses=True))
-# builtin docstrings to ignore
-_func_call_docstring = types.FunctionType.__call__.__doc__
-_object_init_docstring = object.__init__.__doc__
-_builtin_type_docstrings = {
- inspect.getdoc(t) for t in (types.ModuleType, types.MethodType,
- types.FunctionType, property)
-}
-
-_builtin_func_type = type(all)
-_builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions
-#****************************************************************************
-# Builtin color schemes
-
-Colors = TermColors # just a shorthand
-
-InspectColors = PyColorize.ANSICodeColors
-
-#****************************************************************************
-# Auxiliary functions and objects
-
-# See the messaging spec for the definition of all these fields. This list
-# effectively defines the order of display
-info_fields = ['type_name', 'base_class', 'string_form', 'namespace',
- 'length', 'file', 'definition', 'docstring', 'source',
- 'init_definition', 'class_docstring', 'init_docstring',
- 'call_def', 'call_docstring',
- # These won't be printed but will be used to determine how to
- # format the object
- 'ismagic', 'isalias', 'isclass', 'argspec', 'found', 'name'
- ]
-
-
-def object_info(**kw):
- """Make an object info dict with all fields present."""
- infodict = dict(izip_longest(info_fields, [None]))
- infodict.update(kw)
- return infodict
-
-
-def get_encoding(obj):
- """Get encoding for python source file defining obj
-
- Returns None if obj is not defined in a sourcefile.
- """
- ofile = find_file(obj)
- # run contents of file through pager starting at line where the object
- # is defined, as long as the file isn't binary and is actually on the
- # filesystem.
- if ofile is None:
- return None
- elif ofile.endswith(('.so', '.dll', '.pyd')):
- return None
- elif not os.path.isfile(ofile):
- return None
- else:
- # Print only text files, not extension binaries. Note that
- # getsourcelines returns lineno with 1-offset and page() uses
- # 0-offset, so we must adjust.
- with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2
- encoding, lines = openpy.detect_encoding(buffer.readline)
- return encoding
-
-def getdoc(obj):
- """Stable wrapper around inspect.getdoc.
-
- This can't crash because of attribute problems.
-
- It also attempts to call a getdoc() method on the given object. This
- allows objects which provide their docstrings via non-standard mechanisms
+# builtin docstrings to ignore
+_func_call_docstring = types.FunctionType.__call__.__doc__
+_object_init_docstring = object.__init__.__doc__
+_builtin_type_docstrings = {
+ inspect.getdoc(t) for t in (types.ModuleType, types.MethodType,
+ types.FunctionType, property)
+}
+
+_builtin_func_type = type(all)
+_builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions
+#****************************************************************************
+# Builtin color schemes
+
+Colors = TermColors # just a shorthand
+
+InspectColors = PyColorize.ANSICodeColors
+
+#****************************************************************************
+# Auxiliary functions and objects
+
+# See the messaging spec for the definition of all these fields. This list
+# effectively defines the order of display
+info_fields = ['type_name', 'base_class', 'string_form', 'namespace',
+ 'length', 'file', 'definition', 'docstring', 'source',
+ 'init_definition', 'class_docstring', 'init_docstring',
+ 'call_def', 'call_docstring',
+ # These won't be printed but will be used to determine how to
+ # format the object
+ 'ismagic', 'isalias', 'isclass', 'argspec', 'found', 'name'
+ ]
+
+
+def object_info(**kw):
+ """Make an object info dict with all fields present."""
+ infodict = dict(izip_longest(info_fields, [None]))
+ infodict.update(kw)
+ return infodict
+
+
+def get_encoding(obj):
+ """Get encoding for python source file defining obj
+
+ Returns None if obj is not defined in a sourcefile.
+ """
+ ofile = find_file(obj)
+ # run contents of file through pager starting at line where the object
+ # is defined, as long as the file isn't binary and is actually on the
+ # filesystem.
+ if ofile is None:
+ return None
+ elif ofile.endswith(('.so', '.dll', '.pyd')):
+ return None
+ elif not os.path.isfile(ofile):
+ return None
+ else:
+ # Print only text files, not extension binaries. Note that
+ # getsourcelines returns lineno with 1-offset and page() uses
+ # 0-offset, so we must adjust.
+ with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2
+ encoding, lines = openpy.detect_encoding(buffer.readline)
+ return encoding
+
+def getdoc(obj):
+ """Stable wrapper around inspect.getdoc.
+
+ This can't crash because of attribute problems.
+
+ It also attempts to call a getdoc() method on the given object. This
+ allows objects which provide their docstrings via non-standard mechanisms
(like Pyro proxies) to still be inspected by ipython's ? system.
"""
- # Allow objects to offer customized documentation via a getdoc method:
- try:
- ds = obj.getdoc()
- except Exception:
- pass
- else:
- # if we get extra info, we add it to the normal docstring.
- if isinstance(ds, string_types):
- return inspect.cleandoc(ds)
- try:
- docstr = inspect.getdoc(obj)
- encoding = get_encoding(obj)
- return py3compat.cast_unicode(docstr, encoding=encoding)
- except Exception:
- # Harden against an inspect failure, which can occur with
+ # Allow objects to offer customized documentation via a getdoc method:
+ try:
+ ds = obj.getdoc()
+ except Exception:
+ pass
+ else:
+ # if we get extra info, we add it to the normal docstring.
+ if isinstance(ds, string_types):
+ return inspect.cleandoc(ds)
+ try:
+ docstr = inspect.getdoc(obj)
+ encoding = get_encoding(obj)
+ return py3compat.cast_unicode(docstr, encoding=encoding)
+ except Exception:
+ # Harden against an inspect failure, which can occur with
# extensions modules.
- raise
- return None
-
-
-def getsource(obj, oname=''):
- """Wrapper around inspect.getsource.
-
- This can be modified by other projects to provide customized source
- extraction.
-
- Parameters
- ----------
- obj : object
- an object whose source code we will attempt to extract
- oname : str
- (optional) a name under which the object is known
-
- Returns
- -------
- src : unicode or None
-
- """
-
- if isinstance(obj, property):
- sources = []
- for attrname in ['fget', 'fset', 'fdel']:
- fn = getattr(obj, attrname)
- if fn is not None:
- encoding = get_encoding(fn)
- oname_prefix = ('%s.' % oname) if oname else ''
- sources.append(cast_unicode(
- ''.join(('# ', oname_prefix, attrname)),
- encoding=encoding))
- if inspect.isfunction(fn):
- sources.append(dedent(getsource(fn)))
- else:
- # Default str/repr only prints function name,
- # pretty.pretty prints module name too.
- sources.append(cast_unicode(
- '%s%s = %s\n' % (
- oname_prefix, attrname, pretty(fn)),
- encoding=encoding))
- if sources:
- return '\n'.join(sources)
- else:
- return None
-
- else:
- # Get source for non-property objects.
-
- obj = _get_wrapped(obj)
-
- try:
- src = inspect.getsource(obj)
- except TypeError:
- # The object itself provided no meaningful source, try looking for
- # its class definition instead.
- if hasattr(obj, '__class__'):
- try:
- src = inspect.getsource(obj.__class__)
- except TypeError:
- return None
-
- encoding = get_encoding(obj)
- return cast_unicode(src, encoding=encoding)
-
-
-def is_simple_callable(obj):
- """True if obj is a function ()"""
- return (inspect.isfunction(obj) or inspect.ismethod(obj) or \
- isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type))
-
-
-def getargspec(obj):
- """Wrapper around :func:`inspect.getfullargspec` on Python 3, and
- :func:inspect.getargspec` on Python 2.
-
- In addition to functions and methods, this can also handle objects with a
- ``__call__`` attribute.
- """
- if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
- obj = obj.__call__
-
- return inspect.getfullargspec(obj) if PY3 else inspect.getargspec(obj)
-
-
-def format_argspec(argspec):
- """Format argspect, convenience wrapper around inspect's.
-
- This takes a dict instead of ordered arguments and calls
- inspect.format_argspec with the arguments in the necessary order.
- """
- return inspect.formatargspec(argspec['args'], argspec['varargs'],
- argspec['varkw'], argspec['defaults'])
-
-
-def call_tip(oinfo, format_call=True):
- """Extract call tip data from an oinfo dict.
-
- Parameters
- ----------
- oinfo : dict
-
- format_call : bool, optional
- If True, the call line is formatted and returned as a string. If not, a
- tuple of (name, argspec) is returned.
-
- Returns
- -------
- call_info : None, str or (str, dict) tuple.
- When format_call is True, the whole call information is formattted as a
- single string. Otherwise, the object's name and its argspec dict are
- returned. If no call information is available, None is returned.
-
- docstring : str or None
- The most relevant docstring for calling purposes is returned, if
- available. The priority is: call docstring for callable instances, then
- constructor docstring for classes, then main object's docstring otherwise
- (regular functions).
- """
- # Get call definition
- argspec = oinfo.get('argspec')
- if argspec is None:
- call_line = None
- else:
- # Callable objects will have 'self' as their first argument, prune
- # it out if it's there for clarity (since users do *not* pass an
- # extra first argument explicitly).
- try:
- has_self = argspec['args'][0] == 'self'
- except (KeyError, IndexError):
- pass
- else:
- if has_self:
- argspec['args'] = argspec['args'][1:]
-
- call_line = oinfo['name']+format_argspec(argspec)
-
- # Now get docstring.
- # The priority is: call docstring, constructor docstring, main one.
- doc = oinfo.get('call_docstring')
- if doc is None:
- doc = oinfo.get('init_docstring')
- if doc is None:
- doc = oinfo.get('docstring','')
-
- return call_line, doc
-
-
-def _get_wrapped(obj):
- """Get the original object if wrapped in one or more @decorators
-
- Some objects automatically construct similar objects on any unrecognised
- attribute access (e.g. unittest.mock.call). To protect against infinite loops,
- this will arbitrarily cut off after 100 levels of obj.__wrapped__
- attribute access. --TK, Jan 2016
- """
- orig_obj = obj
- i = 0
- while safe_hasattr(obj, '__wrapped__'):
- obj = obj.__wrapped__
- i += 1
- if i > 100:
- # __wrapped__ is probably a lie, so return the thing we started with
- return orig_obj
- return obj
-
-def find_file(obj):
- """Find the absolute path to the file where an object was defined.
-
- This is essentially a robust wrapper around `inspect.getabsfile`.
-
- Returns None if no file can be found.
-
- Parameters
- ----------
- obj : any Python object
-
- Returns
- -------
- fname : str
- The absolute path to the file where the object was defined.
- """
- obj = _get_wrapped(obj)
-
- fname = None
- try:
- fname = inspect.getabsfile(obj)
- except TypeError:
- # For an instance, the file that matters is where its class was
- # declared.
- if hasattr(obj, '__class__'):
- try:
- fname = inspect.getabsfile(obj.__class__)
- except TypeError:
- # Can happen for builtins
- pass
- except:
- pass
- return cast_unicode(fname)
-
-
-def find_source_lines(obj):
- """Find the line number in a file where an object was defined.
-
- This is essentially a robust wrapper around `inspect.getsourcelines`.
-
- Returns None if no file can be found.
-
- Parameters
- ----------
- obj : any Python object
-
- Returns
- -------
- lineno : int
- The line number where the object definition starts.
- """
- obj = _get_wrapped(obj)
-
- try:
- try:
- lineno = inspect.getsourcelines(obj)[1]
- except TypeError:
- # For instances, try the class object like getsource() does
- if hasattr(obj, '__class__'):
- lineno = inspect.getsourcelines(obj.__class__)[1]
- else:
- lineno = None
- except:
- return None
-
- return lineno
-
+ raise
+ return None
+
+
+def getsource(obj, oname=''):
+ """Wrapper around inspect.getsource.
+
+ This can be modified by other projects to provide customized source
+ extraction.
+
+ Parameters
+ ----------
+ obj : object
+ an object whose source code we will attempt to extract
+ oname : str
+ (optional) a name under which the object is known
+
+ Returns
+ -------
+ src : unicode or None
+
+ """
+
+ if isinstance(obj, property):
+ sources = []
+ for attrname in ['fget', 'fset', 'fdel']:
+ fn = getattr(obj, attrname)
+ if fn is not None:
+ encoding = get_encoding(fn)
+ oname_prefix = ('%s.' % oname) if oname else ''
+ sources.append(cast_unicode(
+ ''.join(('# ', oname_prefix, attrname)),
+ encoding=encoding))
+ if inspect.isfunction(fn):
+ sources.append(dedent(getsource(fn)))
+ else:
+ # Default str/repr only prints function name,
+ # pretty.pretty prints module name too.
+ sources.append(cast_unicode(
+ '%s%s = %s\n' % (
+ oname_prefix, attrname, pretty(fn)),
+ encoding=encoding))
+ if sources:
+ return '\n'.join(sources)
+ else:
+ return None
+
+ else:
+ # Get source for non-property objects.
+
+ obj = _get_wrapped(obj)
+
+ try:
+ src = inspect.getsource(obj)
+ except TypeError:
+ # The object itself provided no meaningful source, try looking for
+ # its class definition instead.
+ if hasattr(obj, '__class__'):
+ try:
+ src = inspect.getsource(obj.__class__)
+ except TypeError:
+ return None
+
+ encoding = get_encoding(obj)
+ return cast_unicode(src, encoding=encoding)
+
+
+def is_simple_callable(obj):
+ """True if obj is a function ()"""
+ return (inspect.isfunction(obj) or inspect.ismethod(obj) or \
+ isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type))
+
+
+def getargspec(obj):
+ """Wrapper around :func:`inspect.getfullargspec` on Python 3, and
+ :func:inspect.getargspec` on Python 2.
+
+ In addition to functions and methods, this can also handle objects with a
+ ``__call__`` attribute.
+ """
+ if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
+ obj = obj.__call__
+
+ return inspect.getfullargspec(obj) if PY3 else inspect.getargspec(obj)
+
+
+def format_argspec(argspec):
+ """Format argspect, convenience wrapper around inspect's.
+
+ This takes a dict instead of ordered arguments and calls
+ inspect.format_argspec with the arguments in the necessary order.
+ """
+ return inspect.formatargspec(argspec['args'], argspec['varargs'],
+ argspec['varkw'], argspec['defaults'])
+
+
+def call_tip(oinfo, format_call=True):
+ """Extract call tip data from an oinfo dict.
+
+ Parameters
+ ----------
+ oinfo : dict
+
+ format_call : bool, optional
+ If True, the call line is formatted and returned as a string. If not, a
+ tuple of (name, argspec) is returned.
+
+ Returns
+ -------
+ call_info : None, str or (str, dict) tuple.
+ When format_call is True, the whole call information is formattted as a
+ single string. Otherwise, the object's name and its argspec dict are
+ returned. If no call information is available, None is returned.
+
+ docstring : str or None
+ The most relevant docstring for calling purposes is returned, if
+ available. The priority is: call docstring for callable instances, then
+ constructor docstring for classes, then main object's docstring otherwise
+ (regular functions).
+ """
+ # Get call definition
+ argspec = oinfo.get('argspec')
+ if argspec is None:
+ call_line = None
+ else:
+ # Callable objects will have 'self' as their first argument, prune
+ # it out if it's there for clarity (since users do *not* pass an
+ # extra first argument explicitly).
+ try:
+ has_self = argspec['args'][0] == 'self'
+ except (KeyError, IndexError):
+ pass
+ else:
+ if has_self:
+ argspec['args'] = argspec['args'][1:]
+
+ call_line = oinfo['name']+format_argspec(argspec)
+
+ # Now get docstring.
+ # The priority is: call docstring, constructor docstring, main one.
+ doc = oinfo.get('call_docstring')
+ if doc is None:
+ doc = oinfo.get('init_docstring')
+ if doc is None:
+ doc = oinfo.get('docstring','')
+
+ return call_line, doc
+
+
+def _get_wrapped(obj):
+ """Get the original object if wrapped in one or more @decorators
+
+ Some objects automatically construct similar objects on any unrecognised
+ attribute access (e.g. unittest.mock.call). To protect against infinite loops,
+ this will arbitrarily cut off after 100 levels of obj.__wrapped__
+ attribute access. --TK, Jan 2016
+ """
+ orig_obj = obj
+ i = 0
+ while safe_hasattr(obj, '__wrapped__'):
+ obj = obj.__wrapped__
+ i += 1
+ if i > 100:
+ # __wrapped__ is probably a lie, so return the thing we started with
+ return orig_obj
+ return obj
+
+def find_file(obj):
+ """Find the absolute path to the file where an object was defined.
+
+ This is essentially a robust wrapper around `inspect.getabsfile`.
+
+ Returns None if no file can be found.
+
+ Parameters
+ ----------
+ obj : any Python object
+
+ Returns
+ -------
+ fname : str
+ The absolute path to the file where the object was defined.
+ """
+ obj = _get_wrapped(obj)
+
+ fname = None
+ try:
+ fname = inspect.getabsfile(obj)
+ except TypeError:
+ # For an instance, the file that matters is where its class was
+ # declared.
+ if hasattr(obj, '__class__'):
+ try:
+ fname = inspect.getabsfile(obj.__class__)
+ except TypeError:
+ # Can happen for builtins
+ pass
+ except:
+ pass
+ return cast_unicode(fname)
+
+
+def find_source_lines(obj):
+ """Find the line number in a file where an object was defined.
+
+ This is essentially a robust wrapper around `inspect.getsourcelines`.
+
+ Returns None if no file can be found.
+
+ Parameters
+ ----------
+ obj : any Python object
+
+ Returns
+ -------
+ lineno : int
+ The line number where the object definition starts.
+ """
+ obj = _get_wrapped(obj)
+
+ try:
+ try:
+ lineno = inspect.getsourcelines(obj)[1]
+ except TypeError:
+ # For instances, try the class object like getsource() does
+ if hasattr(obj, '__class__'):
+ lineno = inspect.getsourcelines(obj.__class__)[1]
+ else:
+ lineno = None
+ except:
+ return None
+
+ return lineno
+
class Inspector(Colorable):
-
- def __init__(self, color_table=InspectColors,
- code_color_table=PyColorize.ANSICodeColors,
- scheme='NoColor',
+
+ def __init__(self, color_table=InspectColors,
+ code_color_table=PyColorize.ANSICodeColors,
+ scheme='NoColor',
str_detail_level=0,
parent=None, config=None):
super(Inspector, self).__init__(parent=parent, config=config)
- self.color_table = color_table
+ self.color_table = color_table
self.parser = PyColorize.Parser(out='str', parent=self, style=scheme)
- self.format = self.parser.format
- self.str_detail_level = str_detail_level
- self.set_active_scheme(scheme)
-
- def _getdef(self,obj,oname=''):
- """Return the call signature for any callable object.
-
- If any exception is generated, None is returned instead and the
- exception is suppressed."""
- try:
- hdef = oname + str(signature(obj))
- return cast_unicode(hdef)
- except:
- return None
-
- def __head(self,h):
- """Return a header string with proper colors."""
- return '%s%s%s' % (self.color_table.active_colors.header,h,
- self.color_table.active_colors.normal)
-
- def set_active_scheme(self, scheme):
- self.color_table.set_active_scheme(scheme)
- self.parser.color_table.set_active_scheme(scheme)
-
- def noinfo(self, msg, oname):
- """Generic message when no information is found."""
- print('No %s found' % msg, end=' ')
- if oname:
- print('for %s' % oname)
- else:
- print()
-
- def pdef(self, obj, oname=''):
- """Print the call signature for any callable object.
-
- If the object is a class, print the constructor information."""
-
- if not callable(obj):
- print('Object is not callable.')
- return
-
- header = ''
-
- if inspect.isclass(obj):
- header = self.__head('Class constructor information:\n')
- elif (not py3compat.PY3) and type(obj) is types.InstanceType:
- obj = obj.__call__
-
- output = self._getdef(obj,oname)
- if output is None:
- self.noinfo('definition header',oname)
- else:
+ self.format = self.parser.format
+ self.str_detail_level = str_detail_level
+ self.set_active_scheme(scheme)
+
+ def _getdef(self,obj,oname=''):
+ """Return the call signature for any callable object.
+
+ If any exception is generated, None is returned instead and the
+ exception is suppressed."""
+ try:
+ hdef = oname + str(signature(obj))
+ return cast_unicode(hdef)
+ except:
+ return None
+
+ def __head(self,h):
+ """Return a header string with proper colors."""
+ return '%s%s%s' % (self.color_table.active_colors.header,h,
+ self.color_table.active_colors.normal)
+
+ def set_active_scheme(self, scheme):
+ self.color_table.set_active_scheme(scheme)
+ self.parser.color_table.set_active_scheme(scheme)
+
+ def noinfo(self, msg, oname):
+ """Generic message when no information is found."""
+ print('No %s found' % msg, end=' ')
+ if oname:
+ print('for %s' % oname)
+ else:
+ print()
+
+ def pdef(self, obj, oname=''):
+ """Print the call signature for any callable object.
+
+ If the object is a class, print the constructor information."""
+
+ if not callable(obj):
+ print('Object is not callable.')
+ return
+
+ header = ''
+
+ if inspect.isclass(obj):
+ header = self.__head('Class constructor information:\n')
+ elif (not py3compat.PY3) and type(obj) is types.InstanceType:
+ obj = obj.__call__
+
+ output = self._getdef(obj,oname)
+ if output is None:
+ self.noinfo('definition header',oname)
+ else:
print(header,self.format(output), end=' ')
-
- # In Python 3, all classes are new-style, so they all have __init__.
- @skip_doctest_py3
+
+ # In Python 3, all classes are new-style, so they all have __init__.
+ @skip_doctest_py3
def pdoc(self, obj, oname='', formatter=None):
- """Print the docstring for any object.
-
- Optional:
- -formatter: a function to run the docstring through for specially
- formatted docstrings.
-
- Examples
- --------
-
- In [1]: class NoInit:
- ...: pass
-
- In [2]: class NoDoc:
- ...: def __init__(self):
- ...: pass
-
- In [3]: %pdoc NoDoc
- No documentation found for NoDoc
-
- In [4]: %pdoc NoInit
- No documentation found for NoInit
-
- In [5]: obj = NoInit()
-
- In [6]: %pdoc obj
- No documentation found for obj
-
- In [5]: obj2 = NoDoc()
-
- In [6]: %pdoc obj2
- No documentation found for obj2
- """
-
- head = self.__head # For convenience
- lines = []
- ds = getdoc(obj)
- if formatter:
+ """Print the docstring for any object.
+
+ Optional:
+ -formatter: a function to run the docstring through for specially
+ formatted docstrings.
+
+ Examples
+ --------
+
+ In [1]: class NoInit:
+ ...: pass
+
+ In [2]: class NoDoc:
+ ...: def __init__(self):
+ ...: pass
+
+ In [3]: %pdoc NoDoc
+ No documentation found for NoDoc
+
+ In [4]: %pdoc NoInit
+ No documentation found for NoInit
+
+ In [5]: obj = NoInit()
+
+ In [6]: %pdoc obj
+ No documentation found for obj
+
+ In [5]: obj2 = NoDoc()
+
+ In [6]: %pdoc obj2
+ No documentation found for obj2
+ """
+
+ head = self.__head # For convenience
+ lines = []
+ ds = getdoc(obj)
+ if formatter:
ds = formatter(ds).get('plain/text', ds)
- if ds:
- lines.append(head("Class docstring:"))
- lines.append(indent(ds))
- if inspect.isclass(obj) and hasattr(obj, '__init__'):
- init_ds = getdoc(obj.__init__)
- if init_ds is not None:
- lines.append(head("Init docstring:"))
- lines.append(indent(init_ds))
- elif hasattr(obj,'__call__'):
- call_ds = getdoc(obj.__call__)
- if call_ds:
- lines.append(head("Call docstring:"))
- lines.append(indent(call_ds))
-
- if not lines:
- self.noinfo('documentation',oname)
- else:
- page.page('\n'.join(lines))
-
- def psource(self, obj, oname=''):
- """Print the source code for an object."""
-
- # Flush the source cache because inspect can return out-of-date source
- linecache.checkcache()
- try:
- src = getsource(obj, oname=oname)
- except Exception:
- src = None
-
- if src is None:
- self.noinfo('source', oname)
- else:
- page.page(self.format(src))
-
- def pfile(self, obj, oname=''):
- """Show the whole file where an object was defined."""
-
- lineno = find_source_lines(obj)
- if lineno is None:
- self.noinfo('file', oname)
- return
-
- ofile = find_file(obj)
- # run contents of file through pager starting at line where the object
- # is defined, as long as the file isn't binary and is actually on the
- # filesystem.
- if ofile.endswith(('.so', '.dll', '.pyd')):
- print('File %r is binary, not printing.' % ofile)
- elif not os.path.isfile(ofile):
- print('File %r does not exist, not printing.' % ofile)
- else:
- # Print only text files, not extension binaries. Note that
- # getsourcelines returns lineno with 1-offset and page() uses
- # 0-offset, so we must adjust.
- page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1)
-
- def _format_fields(self, fields, title_width=0):
- """Formats a list of fields for display.
-
- Parameters
- ----------
- fields : list
- A list of 2-tuples: (field_title, field_content)
- title_width : int
- How many characters to pad titles to. Default to longest title.
- """
- out = []
- header = self.__head
- if title_width == 0:
- title_width = max(len(title) + 2 for title, _ in fields)
- for title, content in fields:
- if len(content.splitlines()) > 1:
+ if ds:
+ lines.append(head("Class docstring:"))
+ lines.append(indent(ds))
+ if inspect.isclass(obj) and hasattr(obj, '__init__'):
+ init_ds = getdoc(obj.__init__)
+ if init_ds is not None:
+ lines.append(head("Init docstring:"))
+ lines.append(indent(init_ds))
+ elif hasattr(obj,'__call__'):
+ call_ds = getdoc(obj.__call__)
+ if call_ds:
+ lines.append(head("Call docstring:"))
+ lines.append(indent(call_ds))
+
+ if not lines:
+ self.noinfo('documentation',oname)
+ else:
+ page.page('\n'.join(lines))
+
+ def psource(self, obj, oname=''):
+ """Print the source code for an object."""
+
+ # Flush the source cache because inspect can return out-of-date source
+ linecache.checkcache()
+ try:
+ src = getsource(obj, oname=oname)
+ except Exception:
+ src = None
+
+ if src is None:
+ self.noinfo('source', oname)
+ else:
+ page.page(self.format(src))
+
+ def pfile(self, obj, oname=''):
+ """Show the whole file where an object was defined."""
+
+ lineno = find_source_lines(obj)
+ if lineno is None:
+ self.noinfo('file', oname)
+ return
+
+ ofile = find_file(obj)
+ # run contents of file through pager starting at line where the object
+ # is defined, as long as the file isn't binary and is actually on the
+ # filesystem.
+ if ofile.endswith(('.so', '.dll', '.pyd')):
+ print('File %r is binary, not printing.' % ofile)
+ elif not os.path.isfile(ofile):
+ print('File %r does not exist, not printing.' % ofile)
+ else:
+ # Print only text files, not extension binaries. Note that
+ # getsourcelines returns lineno with 1-offset and page() uses
+ # 0-offset, so we must adjust.
+ page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1)
+
+ def _format_fields(self, fields, title_width=0):
+ """Formats a list of fields for display.
+
+ Parameters
+ ----------
+ fields : list
+ A list of 2-tuples: (field_title, field_content)
+ title_width : int
+ How many characters to pad titles to. Default to longest title.
+ """
+ out = []
+ header = self.__head
+ if title_width == 0:
+ title_width = max(len(title) + 2 for title, _ in fields)
+ for title, content in fields:
+ if len(content.splitlines()) > 1:
title = header(title + ':') + '\n'
- else:
+ else:
title = header((title + ':').ljust(title_width))
- out.append(cast_unicode(title) + cast_unicode(content))
- return "\n".join(out)
-
+ out.append(cast_unicode(title) + cast_unicode(content))
+ return "\n".join(out)
+
def _mime_format(self, text, formatter=None):
"""Return a mime bundle representation of the input text.
@@ -637,56 +637,56 @@ class Inspector(Colorable):
'text/html': pylight(text)
}
- if info['isalias']:
+ if info['isalias']:
append_field(_mime, 'Repr', 'string_form')
-
- elif info['ismagic']:
+
+ elif info['ismagic']:
if detail_level > 0:
append_field(_mime, 'Source', 'source', code_formatter)
- else:
+ else:
append_field(_mime, 'Docstring', 'docstring', formatter)
append_field(_mime, 'File', 'file')
-
- elif info['isclass'] or is_simple_callable(obj):
- # Functions, methods, classes
+
+ elif info['isclass'] or is_simple_callable(obj):
+ # Functions, methods, classes
append_field(_mime, 'Signature', 'definition', code_formatter)
append_field(_mime, 'Init signature', 'init_definition', code_formatter)
if detail_level > 0 and info['source']:
append_field(_mime, 'Source', 'source', code_formatter)
- else:
+ else:
append_field(_mime, 'Docstring', 'docstring', formatter)
append_field(_mime, 'Init docstring', 'init_docstring', formatter)
-
+
append_field(_mime, 'File', 'file')
append_field(_mime, 'Type', 'type_name')
-
- else:
- # General Python objects
+
+ else:
+ # General Python objects
append_field(_mime, 'Signature', 'definition', code_formatter)
append_field(_mime, 'Call signature', 'call_def', code_formatter)
append_field(_mime, 'Type', 'type_name')
-
- # Base class for old-style instances
- if (not py3compat.PY3) and isinstance(obj, types.InstanceType) and info['base_class']:
+
+ # Base class for old-style instances
+ if (not py3compat.PY3) and isinstance(obj, types.InstanceType) and info['base_class']:
append_field(_mime, 'Base Class', 'base_class')
-
+
append_field(_mime, 'String form', 'string_form')
-
- # Namespace
- if info['namespace'] != 'Interactive':
+
+ # Namespace
+ if info['namespace'] != 'Interactive':
append_field(_mime, 'Namespace', 'namespace')
-
+
append_field(_mime, 'Length', 'length')
append_field(_mime, 'File', 'file')
- # Source or docstring, depending on detail level and whether
- # source found.
+ # Source or docstring, depending on detail level and whether
+ # source found.
if detail_level > 0:
append_field(_mime, 'Source', 'source', code_formatter)
else:
append_field(_mime, 'Docstring', 'docstring', formatter)
-
+
append_field(_mime, 'Class docstring', 'class_docstring', formatter)
append_field(_mime, 'Init docstring', 'init_docstring', formatter)
append_field(_mime, 'Call docstring', 'call_docstring', formatter)
@@ -695,15 +695,15 @@ class Inspector(Colorable):
return self.format_mime(_mime)
def pinfo(self, obj, oname='', formatter=None, info=None, detail_level=0, enable_html_pager=True):
- """Show detailed information about an object.
-
- Optional arguments:
-
- - oname: name of the variable pointing to the object.
-
+ """Show detailed information about an object.
+
+ Optional arguments:
+
+ - oname: name of the variable pointing to the object.
+
- formatter: callable (optional)
A special formatter for docstrings.
-
+
The formatter is a callable that takes a string as an input
and returns either a formatted string or a mime type bundle
in the form of a dictionnary.
@@ -711,17 +711,17 @@ class Inspector(Colorable):
Although the support of custom formatter returning a string
instead of a mime type bundle is deprecated.
- - info: a structure with some information fields which may have been
- precomputed already.
-
- - detail_level: if set to 1, more information is given.
- """
+ - info: a structure with some information fields which may have been
+ precomputed already.
+
+ - detail_level: if set to 1, more information is given.
+ """
info = self._get_info(obj, oname, formatter, info, detail_level)
if not enable_html_pager:
del info['text/html']
page.page(info)
- def info(self, obj, oname='', formatter=None, info=None, detail_level=0):
+ def info(self, obj, oname='', formatter=None, info=None, detail_level=0):
"""DEPRECATED. Compute a dict with detailed information about an object.
"""
if formatter is not None:
@@ -731,126 +731,126 @@ class Inspector(Colorable):
return self._info(obj, oname=oname, info=info, detail_level=detail_level)
def _info(self, obj, oname='', info=None, detail_level=0):
- """Compute a dict with detailed information about an object.
-
- Optional arguments:
-
- - oname: name of the variable pointing to the object.
-
- - info: a structure with some information fields which may have been
- precomputed already.
-
- - detail_level: if set to 1, more information is given.
- """
-
- obj_type = type(obj)
-
- if info is None:
- ismagic = 0
- isalias = 0
- ospace = ''
- else:
- ismagic = info.ismagic
- isalias = info.isalias
- ospace = info.namespace
-
- # Get docstring, special-casing aliases:
- if isalias:
- if not callable(obj):
- try:
- ds = "Alias to the system command:\n %s" % obj[1]
- except:
- ds = "Alias: " + str(obj)
- else:
- ds = "Alias to " + str(obj)
- if obj.__doc__:
- ds += "\nDocstring:\n" + obj.__doc__
- else:
- ds = getdoc(obj)
- if ds is None:
- ds = '<no docstring>'
-
- # store output in a dict, we initialize it here and fill it as we go
- out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic)
-
- string_max = 200 # max size of strings to show (snipped if longer)
+ """Compute a dict with detailed information about an object.
+
+ Optional arguments:
+
+ - oname: name of the variable pointing to the object.
+
+ - info: a structure with some information fields which may have been
+ precomputed already.
+
+ - detail_level: if set to 1, more information is given.
+ """
+
+ obj_type = type(obj)
+
+ if info is None:
+ ismagic = 0
+ isalias = 0
+ ospace = ''
+ else:
+ ismagic = info.ismagic
+ isalias = info.isalias
+ ospace = info.namespace
+
+ # Get docstring, special-casing aliases:
+ if isalias:
+ if not callable(obj):
+ try:
+ ds = "Alias to the system command:\n %s" % obj[1]
+ except:
+ ds = "Alias: " + str(obj)
+ else:
+ ds = "Alias to " + str(obj)
+ if obj.__doc__:
+ ds += "\nDocstring:\n" + obj.__doc__
+ else:
+ ds = getdoc(obj)
+ if ds is None:
+ ds = '<no docstring>'
+
+ # store output in a dict, we initialize it here and fill it as we go
+ out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic)
+
+ string_max = 200 # max size of strings to show (snipped if longer)
shalf = int((string_max - 5) / 2)
-
- if ismagic:
- obj_type_name = 'Magic function'
- elif isalias:
- obj_type_name = 'System alias'
- else:
- obj_type_name = obj_type.__name__
- out['type_name'] = obj_type_name
-
- try:
- bclass = obj.__class__
- out['base_class'] = str(bclass)
- except: pass
-
- # String form, but snip if too long in ? form (full in ??)
- if detail_level >= self.str_detail_level:
- try:
- ostr = str(obj)
- str_head = 'string_form'
- if not detail_level and len(ostr)>string_max:
- ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:]
- ostr = ("\n" + " " * len(str_head.expandtabs())).\
- join(q.strip() for q in ostr.split("\n"))
- out[str_head] = ostr
- except:
- pass
-
- if ospace:
- out['namespace'] = ospace
-
- # Length (for strings and lists)
- try:
- out['length'] = str(len(obj))
- except: pass
-
- # Filename where object was defined
- binary_file = False
- fname = find_file(obj)
- if fname is None:
- # if anything goes wrong, we don't want to show source, so it's as
- # if the file was binary
- binary_file = True
- else:
- if fname.endswith(('.so', '.dll', '.pyd')):
- binary_file = True
- elif fname.endswith('<string>'):
- fname = 'Dynamically generated function. No source code available.'
- out['file'] = compress_user(fname)
-
- # Original source code for a callable, class or property.
- if detail_level:
- # Flush the source cache because inspect can return out-of-date
- # source
- linecache.checkcache()
- try:
- if isinstance(obj, property) or not binary_file:
- src = getsource(obj, oname)
- if src is not None:
- src = src.rstrip()
- out['source'] = src
-
- except Exception:
- pass
-
- # Add docstring only if no source is to be shown (avoid repetitions).
- if ds and out.get('source', None) is None:
- out['docstring'] = ds
-
- # Constructor docstring for classes
- if inspect.isclass(obj):
- out['isclass'] = True
+
+ if ismagic:
+ obj_type_name = 'Magic function'
+ elif isalias:
+ obj_type_name = 'System alias'
+ else:
+ obj_type_name = obj_type.__name__
+ out['type_name'] = obj_type_name
+
+ try:
+ bclass = obj.__class__
+ out['base_class'] = str(bclass)
+ except: pass
+
+ # String form, but snip if too long in ? form (full in ??)
+ if detail_level >= self.str_detail_level:
+ try:
+ ostr = str(obj)
+ str_head = 'string_form'
+ if not detail_level and len(ostr)>string_max:
+ ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:]
+ ostr = ("\n" + " " * len(str_head.expandtabs())).\
+ join(q.strip() for q in ostr.split("\n"))
+ out[str_head] = ostr
+ except:
+ pass
+
+ if ospace:
+ out['namespace'] = ospace
+
+ # Length (for strings and lists)
+ try:
+ out['length'] = str(len(obj))
+ except: pass
+
+ # Filename where object was defined
+ binary_file = False
+ fname = find_file(obj)
+ if fname is None:
+ # if anything goes wrong, we don't want to show source, so it's as
+ # if the file was binary
+ binary_file = True
+ else:
+ if fname.endswith(('.so', '.dll', '.pyd')):
+ binary_file = True
+ elif fname.endswith('<string>'):
+ fname = 'Dynamically generated function. No source code available.'
+ out['file'] = compress_user(fname)
+
+ # Original source code for a callable, class or property.
+ if detail_level:
+ # Flush the source cache because inspect can return out-of-date
+ # source
+ linecache.checkcache()
+ try:
+ if isinstance(obj, property) or not binary_file:
+ src = getsource(obj, oname)
+ if src is not None:
+ src = src.rstrip()
+ out['source'] = src
+
+ except Exception:
+ pass
+
+ # Add docstring only if no source is to be shown (avoid repetitions).
+ if ds and out.get('source', None) is None:
+ out['docstring'] = ds
+
+ # Constructor docstring for classes
+ if inspect.isclass(obj):
+ out['isclass'] = True
# get the init signature:
- try:
+ try:
init_def = self._getdef(obj, oname)
- except AttributeError:
+ except AttributeError:
init_def = None
# get the __init__ docstring
@@ -858,7 +858,7 @@ class Inspector(Colorable):
obj_init = obj.__init__
except AttributeError:
init_ds = None
- else:
+ else:
if init_def is None:
# Get signature from init if top-level sig failed.
# Can happen for built-in types (list, etc.).
@@ -867,149 +867,149 @@ class Inspector(Colorable):
except AttributeError:
pass
init_ds = getdoc(obj_init)
- # Skip Python's auto-generated docstrings
- if init_ds == _object_init_docstring:
- init_ds = None
-
+ # Skip Python's auto-generated docstrings
+ if init_ds == _object_init_docstring:
+ init_ds = None
+
if init_def:
out['init_definition'] = init_def
-
+
if init_ds:
out['init_docstring'] = init_ds
- # and class docstring for instances:
- else:
- # reconstruct the function definition and print it:
- defln = self._getdef(obj, oname)
- if defln:
+ # and class docstring for instances:
+ else:
+ # reconstruct the function definition and print it:
+ defln = self._getdef(obj, oname)
+ if defln:
out['definition'] = defln
-
- # First, check whether the instance docstring is identical to the
- # class one, and print it separately if they don't coincide. In
- # most cases they will, but it's nice to print all the info for
- # objects which use instance-customized docstrings.
- if ds:
- try:
- cls = getattr(obj,'__class__')
- except:
- class_ds = None
- else:
- class_ds = getdoc(cls)
- # Skip Python's auto-generated docstrings
- if class_ds in _builtin_type_docstrings:
- class_ds = None
- if class_ds and ds != class_ds:
- out['class_docstring'] = class_ds
-
- # Next, try to show constructor docstrings
- try:
- init_ds = getdoc(obj.__init__)
- # Skip Python's auto-generated docstrings
- if init_ds == _object_init_docstring:
- init_ds = None
- except AttributeError:
- init_ds = None
- if init_ds:
- out['init_docstring'] = init_ds
-
- # Call form docstring for callable instances
- if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
- call_def = self._getdef(obj.__call__, oname)
+
+ # First, check whether the instance docstring is identical to the
+ # class one, and print it separately if they don't coincide. In
+ # most cases they will, but it's nice to print all the info for
+ # objects which use instance-customized docstrings.
+ if ds:
+ try:
+ cls = getattr(obj,'__class__')
+ except:
+ class_ds = None
+ else:
+ class_ds = getdoc(cls)
+ # Skip Python's auto-generated docstrings
+ if class_ds in _builtin_type_docstrings:
+ class_ds = None
+ if class_ds and ds != class_ds:
+ out['class_docstring'] = class_ds
+
+ # Next, try to show constructor docstrings
+ try:
+ init_ds = getdoc(obj.__init__)
+ # Skip Python's auto-generated docstrings
+ if init_ds == _object_init_docstring:
+ init_ds = None
+ except AttributeError:
+ init_ds = None
+ if init_ds:
+ out['init_docstring'] = init_ds
+
+ # Call form docstring for callable instances
+ if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
+ call_def = self._getdef(obj.__call__, oname)
if call_def and (call_def != out.get('definition')):
- # it may never be the case that call def and definition differ,
- # but don't include the same signature twice
+ # it may never be the case that call def and definition differ,
+ # but don't include the same signature twice
out['call_def'] = call_def
- call_ds = getdoc(obj.__call__)
- # Skip Python's auto-generated docstrings
- if call_ds == _func_call_docstring:
- call_ds = None
- if call_ds:
- out['call_docstring'] = call_ds
-
- # Compute the object's argspec as a callable. The key is to decide
- # whether to pull it from the object itself, from its __init__ or
- # from its __call__ method.
-
- if inspect.isclass(obj):
- # Old-style classes need not have an __init__
- callable_obj = getattr(obj, "__init__", None)
- elif callable(obj):
- callable_obj = obj
- else:
- callable_obj = None
-
- if callable_obj is not None:
- try:
- argspec = getargspec(callable_obj)
- except (TypeError, AttributeError):
- # For extensions/builtins we can't retrieve the argspec
- pass
- else:
- # named tuples' _asdict() method returns an OrderedDict, but we
- # we want a normal
- out['argspec'] = argspec_dict = dict(argspec._asdict())
- # We called this varkw before argspec became a named tuple.
- # With getfullargspec it's also called varkw.
- if 'varkw' not in argspec_dict:
- argspec_dict['varkw'] = argspec_dict.pop('keywords')
-
- return object_info(**out)
-
- def psearch(self,pattern,ns_table,ns_search=[],
- ignore_case=False,show_all=False):
- """Search namespaces with wildcards for objects.
-
- Arguments:
-
- - pattern: string containing shell-like wildcards to use in namespace
- searches and optionally a type specification to narrow the search to
- objects of that type.
-
- - ns_table: dict of name->namespaces for search.
-
- Optional arguments:
-
- - ns_search: list of namespace names to include in search.
-
- - ignore_case(False): make the search case-insensitive.
-
- - show_all(False): show all names, including those starting with
- underscores.
- """
- #print 'ps pattern:<%r>' % pattern # dbg
-
- # defaults
- type_pattern = 'all'
- filter = ''
-
- cmds = pattern.split()
- len_cmds = len(cmds)
- if len_cmds == 1:
- # Only filter pattern given
- filter = cmds[0]
- elif len_cmds == 2:
- # Both filter and type specified
- filter,type_pattern = cmds
- else:
- raise ValueError('invalid argument string for psearch: <%s>' %
- pattern)
-
- # filter search namespaces
- for name in ns_search:
- if name not in ns_table:
- raise ValueError('invalid namespace <%s>. Valid names: %s' %
- (name,ns_table.keys()))
-
- #print 'type_pattern:',type_pattern # dbg
- search_result, namespaces_seen = set(), set()
- for ns_name in ns_search:
- ns = ns_table[ns_name]
- # Normally, locals and globals are the same, so we just check one.
- if id(ns) in namespaces_seen:
- continue
- namespaces_seen.add(id(ns))
- tmp_res = list_namespace(ns, type_pattern, filter,
- ignore_case=ignore_case, show_all=show_all)
- search_result.update(tmp_res)
-
- page.page('\n'.join(sorted(search_result)))
+ call_ds = getdoc(obj.__call__)
+ # Skip Python's auto-generated docstrings
+ if call_ds == _func_call_docstring:
+ call_ds = None
+ if call_ds:
+ out['call_docstring'] = call_ds
+
+ # Compute the object's argspec as a callable. The key is to decide
+ # whether to pull it from the object itself, from its __init__ or
+ # from its __call__ method.
+
+ if inspect.isclass(obj):
+ # Old-style classes need not have an __init__
+ callable_obj = getattr(obj, "__init__", None)
+ elif callable(obj):
+ callable_obj = obj
+ else:
+ callable_obj = None
+
+ if callable_obj is not None:
+ try:
+ argspec = getargspec(callable_obj)
+ except (TypeError, AttributeError):
+ # For extensions/builtins we can't retrieve the argspec
+ pass
+ else:
+ # named tuples' _asdict() method returns an OrderedDict, but we
+ # we want a normal
+ out['argspec'] = argspec_dict = dict(argspec._asdict())
+ # We called this varkw before argspec became a named tuple.
+ # With getfullargspec it's also called varkw.
+ if 'varkw' not in argspec_dict:
+ argspec_dict['varkw'] = argspec_dict.pop('keywords')
+
+ return object_info(**out)
+
+ def psearch(self,pattern,ns_table,ns_search=[],
+ ignore_case=False,show_all=False):
+ """Search namespaces with wildcards for objects.
+
+ Arguments:
+
+ - pattern: string containing shell-like wildcards to use in namespace
+ searches and optionally a type specification to narrow the search to
+ objects of that type.
+
+ - ns_table: dict of name->namespaces for search.
+
+ Optional arguments:
+
+ - ns_search: list of namespace names to include in search.
+
+ - ignore_case(False): make the search case-insensitive.
+
+ - show_all(False): show all names, including those starting with
+ underscores.
+ """
+ #print 'ps pattern:<%r>' % pattern # dbg
+
+ # defaults
+ type_pattern = 'all'
+ filter = ''
+
+ cmds = pattern.split()
+ len_cmds = len(cmds)
+ if len_cmds == 1:
+ # Only filter pattern given
+ filter = cmds[0]
+ elif len_cmds == 2:
+ # Both filter and type specified
+ filter,type_pattern = cmds
+ else:
+ raise ValueError('invalid argument string for psearch: <%s>' %
+ pattern)
+
+ # filter search namespaces
+ for name in ns_search:
+ if name not in ns_table:
+ raise ValueError('invalid namespace <%s>. Valid names: %s' %
+ (name,ns_table.keys()))
+
+ #print 'type_pattern:',type_pattern # dbg
+ search_result, namespaces_seen = set(), set()
+ for ns_name in ns_search:
+ ns = ns_table[ns_name]
+ # Normally, locals and globals are the same, so we just check one.
+ if id(ns) in namespaces_seen:
+ continue
+ namespaces_seen.add(id(ns))
+ tmp_res = list_namespace(ns, type_pattern, filter,
+ ignore_case=ignore_case, show_all=show_all)
+ search_result.update(tmp_res)
+
+ page.page('\n'.join(sorted(search_result)))
diff --git a/contrib/python/ipython/py2/IPython/core/page.py b/contrib/python/ipython/py2/IPython/core/page.py
index ba14901e19..6d213c9f29 100644
--- a/contrib/python/ipython/py2/IPython/core/page.py
+++ b/contrib/python/ipython/py2/IPython/core/page.py
@@ -1,386 +1,386 @@
-# encoding: utf-8
-"""
-Paging capabilities for IPython.core
-
-Notes
------
-
-For now this uses IPython hooks, so it can't be in IPython.utils. If we can get
-rid of that dependency, we could move it there.
------
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
-import os
-import re
-import sys
-import tempfile
-
-from io import UnsupportedOperation
-
-from IPython import get_ipython
-from IPython.core.display import display
-from IPython.core.error import TryNext
-from IPython.utils.data import chop
-from IPython.utils.process import system
-from IPython.utils.terminal import get_terminal_size
-from IPython.utils import py3compat
-
-
-def display_page(strng, start=0, screen_lines=25):
- """Just display, no paging. screen_lines is ignored."""
- if isinstance(strng, dict):
- data = strng
- else:
- if start:
- strng = u'\n'.join(strng.splitlines()[start:])
+# encoding: utf-8
+"""
+Paging capabilities for IPython.core
+
+Notes
+-----
+
+For now this uses IPython hooks, so it can't be in IPython.utils. If we can get
+rid of that dependency, we could move it there.
+-----
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+
+import os
+import re
+import sys
+import tempfile
+
+from io import UnsupportedOperation
+
+from IPython import get_ipython
+from IPython.core.display import display
+from IPython.core.error import TryNext
+from IPython.utils.data import chop
+from IPython.utils.process import system
+from IPython.utils.terminal import get_terminal_size
+from IPython.utils import py3compat
+
+
+def display_page(strng, start=0, screen_lines=25):
+ """Just display, no paging. screen_lines is ignored."""
+ if isinstance(strng, dict):
+ data = strng
+ else:
+ if start:
+ strng = u'\n'.join(strng.splitlines()[start:])
data = { 'text/plain': strng }
- display(data, raw=True)
-
-
-def as_hook(page_func):
- """Wrap a pager func to strip the `self` arg
-
- so it can be called as a hook.
- """
- return lambda self, *args, **kwargs: page_func(*args, **kwargs)
-
-
-esc_re = re.compile(r"(\x1b[^m]+m)")
-
-def page_dumb(strng, start=0, screen_lines=25):
- """Very dumb 'pager' in Python, for when nothing else works.
-
- Only moves forward, same interface as page(), except for pager_cmd and
+ display(data, raw=True)
+
+
+def as_hook(page_func):
+ """Wrap a pager func to strip the `self` arg
+
+ so it can be called as a hook.
+ """
+ return lambda self, *args, **kwargs: page_func(*args, **kwargs)
+
+
+esc_re = re.compile(r"(\x1b[^m]+m)")
+
+def page_dumb(strng, start=0, screen_lines=25):
+ """Very dumb 'pager' in Python, for when nothing else works.
+
+ Only moves forward, same interface as page(), except for pager_cmd and
mode.
"""
if isinstance(strng, dict):
strng = strng.get('text/plain', '')
- out_ln = strng.splitlines()[start:]
- screens = chop(out_ln,screen_lines-1)
- if len(screens) == 1:
+ out_ln = strng.splitlines()[start:]
+ screens = chop(out_ln,screen_lines-1)
+ if len(screens) == 1:
print(os.linesep.join(screens[0]))
- else:
- last_escape = ""
- for scr in screens[0:-1]:
- hunk = os.linesep.join(scr)
+ else:
+ last_escape = ""
+ for scr in screens[0:-1]:
+ hunk = os.linesep.join(scr)
print(last_escape + hunk)
- if not page_more():
- return
- esc_list = esc_re.findall(hunk)
- if len(esc_list) > 0:
- last_escape = esc_list[-1]
+ if not page_more():
+ return
+ esc_list = esc_re.findall(hunk)
+ if len(esc_list) > 0:
+ last_escape = esc_list[-1]
print(last_escape + os.linesep.join(screens[-1]))
-
-def _detect_screen_size(screen_lines_def):
- """Attempt to work out the number of lines on the screen.
-
- This is called by page(). It can raise an error (e.g. when run in the
- test suite), so it's separated out so it can easily be called in a try block.
- """
- TERM = os.environ.get('TERM',None)
- if not((TERM=='xterm' or TERM=='xterm-color') and sys.platform != 'sunos5'):
- # curses causes problems on many terminals other than xterm, and
- # some termios calls lock up on Sun OS5.
- return screen_lines_def
-
- try:
- import termios
- import curses
- except ImportError:
- return screen_lines_def
-
- # There is a bug in curses, where *sometimes* it fails to properly
- # initialize, and then after the endwin() call is made, the
- # terminal is left in an unusable state. Rather than trying to
- # check everytime for this (by requesting and comparing termios
- # flags each time), we just save the initial terminal state and
- # unconditionally reset it every time. It's cheaper than making
- # the checks.
- try:
- term_flags = termios.tcgetattr(sys.stdout)
- except termios.error as err:
- # can fail on Linux 2.6, pager_page will catch the TypeError
- raise TypeError('termios error: {0}'.format(err))
-
- # Curses modifies the stdout buffer size by default, which messes
- # up Python's normal stdout buffering. This would manifest itself
- # to IPython users as delayed printing on stdout after having used
- # the pager.
- #
- # We can prevent this by manually setting the NCURSES_NO_SETBUF
- # environment variable. For more details, see:
- # http://bugs.python.org/issue10144
- NCURSES_NO_SETBUF = os.environ.get('NCURSES_NO_SETBUF', None)
- os.environ['NCURSES_NO_SETBUF'] = ''
-
- # Proceed with curses initialization
- try:
- scr = curses.initscr()
- except AttributeError:
- # Curses on Solaris may not be complete, so we can't use it there
- return screen_lines_def
-
- screen_lines_real,screen_cols = scr.getmaxyx()
- curses.endwin()
-
- # Restore environment
- if NCURSES_NO_SETBUF is None:
- del os.environ['NCURSES_NO_SETBUF']
- else:
- os.environ['NCURSES_NO_SETBUF'] = NCURSES_NO_SETBUF
-
- # Restore terminal state in case endwin() didn't.
- termios.tcsetattr(sys.stdout,termios.TCSANOW,term_flags)
- # Now we have what we needed: the screen size in rows/columns
- return screen_lines_real
- #print '***Screen size:',screen_lines_real,'lines x',\
- #screen_cols,'columns.' # dbg
-
-def pager_page(strng, start=0, screen_lines=0, pager_cmd=None):
- """Display a string, piping through a pager after a certain length.
-
- strng can be a mime-bundle dict, supplying multiple representations,
- keyed by mime-type.
-
- The screen_lines parameter specifies the number of *usable* lines of your
- terminal screen (total lines minus lines you need to reserve to show other
- information).
-
- If you set screen_lines to a number <=0, page() will try to auto-determine
- your screen size and will only use up to (screen_size+screen_lines) for
- printing, paging after that. That is, if you want auto-detection but need
- to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
- auto-detection without any lines reserved simply use screen_lines = 0.
-
- If a string won't fit in the allowed lines, it is sent through the
- specified pager command. If none given, look for PAGER in the environment,
- and ultimately default to less.
-
- If no system pager works, the string is sent through a 'dumb pager'
- written in python, very simplistic.
- """
-
- # for compatibility with mime-bundle form:
- if isinstance(strng, dict):
- strng = strng['text/plain']
-
- # Ugly kludge, but calling curses.initscr() flat out crashes in emacs
- TERM = os.environ.get('TERM','dumb')
- if TERM in ['dumb','emacs'] and os.name != 'nt':
- print(strng)
- return
- # chop off the topmost part of the string we don't want to see
- str_lines = strng.splitlines()[start:]
- str_toprint = os.linesep.join(str_lines)
- num_newlines = len(str_lines)
- len_str = len(str_toprint)
-
- # Dumb heuristics to guesstimate number of on-screen lines the string
- # takes. Very basic, but good enough for docstrings in reasonable
- # terminals. If someone later feels like refining it, it's not hard.
- numlines = max(num_newlines,int(len_str/80)+1)
-
- screen_lines_def = get_terminal_size()[1]
-
- # auto-determine screen size
- if screen_lines <= 0:
- try:
- screen_lines += _detect_screen_size(screen_lines_def)
- except (TypeError, UnsupportedOperation):
+
+def _detect_screen_size(screen_lines_def):
+ """Attempt to work out the number of lines on the screen.
+
+ This is called by page(). It can raise an error (e.g. when run in the
+ test suite), so it's separated out so it can easily be called in a try block.
+ """
+ TERM = os.environ.get('TERM',None)
+ if not((TERM=='xterm' or TERM=='xterm-color') and sys.platform != 'sunos5'):
+ # curses causes problems on many terminals other than xterm, and
+ # some termios calls lock up on Sun OS5.
+ return screen_lines_def
+
+ try:
+ import termios
+ import curses
+ except ImportError:
+ return screen_lines_def
+
+ # There is a bug in curses, where *sometimes* it fails to properly
+ # initialize, and then after the endwin() call is made, the
+ # terminal is left in an unusable state. Rather than trying to
+ # check everytime for this (by requesting and comparing termios
+ # flags each time), we just save the initial terminal state and
+ # unconditionally reset it every time. It's cheaper than making
+ # the checks.
+ try:
+ term_flags = termios.tcgetattr(sys.stdout)
+ except termios.error as err:
+ # can fail on Linux 2.6, pager_page will catch the TypeError
+ raise TypeError('termios error: {0}'.format(err))
+
+ # Curses modifies the stdout buffer size by default, which messes
+ # up Python's normal stdout buffering. This would manifest itself
+ # to IPython users as delayed printing on stdout after having used
+ # the pager.
+ #
+ # We can prevent this by manually setting the NCURSES_NO_SETBUF
+ # environment variable. For more details, see:
+ # http://bugs.python.org/issue10144
+ NCURSES_NO_SETBUF = os.environ.get('NCURSES_NO_SETBUF', None)
+ os.environ['NCURSES_NO_SETBUF'] = ''
+
+ # Proceed with curses initialization
+ try:
+ scr = curses.initscr()
+ except AttributeError:
+ # Curses on Solaris may not be complete, so we can't use it there
+ return screen_lines_def
+
+ screen_lines_real,screen_cols = scr.getmaxyx()
+ curses.endwin()
+
+ # Restore environment
+ if NCURSES_NO_SETBUF is None:
+ del os.environ['NCURSES_NO_SETBUF']
+ else:
+ os.environ['NCURSES_NO_SETBUF'] = NCURSES_NO_SETBUF
+
+ # Restore terminal state in case endwin() didn't.
+ termios.tcsetattr(sys.stdout,termios.TCSANOW,term_flags)
+ # Now we have what we needed: the screen size in rows/columns
+ return screen_lines_real
+ #print '***Screen size:',screen_lines_real,'lines x',\
+ #screen_cols,'columns.' # dbg
+
+def pager_page(strng, start=0, screen_lines=0, pager_cmd=None):
+ """Display a string, piping through a pager after a certain length.
+
+ strng can be a mime-bundle dict, supplying multiple representations,
+ keyed by mime-type.
+
+ The screen_lines parameter specifies the number of *usable* lines of your
+ terminal screen (total lines minus lines you need to reserve to show other
+ information).
+
+ If you set screen_lines to a number <=0, page() will try to auto-determine
+ your screen size and will only use up to (screen_size+screen_lines) for
+ printing, paging after that. That is, if you want auto-detection but need
+ to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
+ auto-detection without any lines reserved simply use screen_lines = 0.
+
+ If a string won't fit in the allowed lines, it is sent through the
+ specified pager command. If none given, look for PAGER in the environment,
+ and ultimately default to less.
+
+ If no system pager works, the string is sent through a 'dumb pager'
+ written in python, very simplistic.
+ """
+
+ # for compatibility with mime-bundle form:
+ if isinstance(strng, dict):
+ strng = strng['text/plain']
+
+ # Ugly kludge, but calling curses.initscr() flat out crashes in emacs
+ TERM = os.environ.get('TERM','dumb')
+ if TERM in ['dumb','emacs'] and os.name != 'nt':
+ print(strng)
+ return
+ # chop off the topmost part of the string we don't want to see
+ str_lines = strng.splitlines()[start:]
+ str_toprint = os.linesep.join(str_lines)
+ num_newlines = len(str_lines)
+ len_str = len(str_toprint)
+
+ # Dumb heuristics to guesstimate number of on-screen lines the string
+ # takes. Very basic, but good enough for docstrings in reasonable
+ # terminals. If someone later feels like refining it, it's not hard.
+ numlines = max(num_newlines,int(len_str/80)+1)
+
+ screen_lines_def = get_terminal_size()[1]
+
+ # auto-determine screen size
+ if screen_lines <= 0:
+ try:
+ screen_lines += _detect_screen_size(screen_lines_def)
+ except (TypeError, UnsupportedOperation):
print(str_toprint)
- return
-
- #print 'numlines',numlines,'screenlines',screen_lines # dbg
- if numlines <= screen_lines :
- #print '*** normal print' # dbg
+ return
+
+ #print 'numlines',numlines,'screenlines',screen_lines # dbg
+ if numlines <= screen_lines :
+ #print '*** normal print' # dbg
print(str_toprint)
- else:
- # Try to open pager and default to internal one if that fails.
- # All failure modes are tagged as 'retval=1', to match the return
- # value of a failed system command. If any intermediate attempt
- # sets retval to 1, at the end we resort to our own page_dumb() pager.
- pager_cmd = get_pager_cmd(pager_cmd)
- pager_cmd += ' ' + get_pager_start(pager_cmd,start)
- if os.name == 'nt':
- if pager_cmd.startswith('type'):
- # The default WinXP 'type' command is failing on complex strings.
- retval = 1
- else:
- fd, tmpname = tempfile.mkstemp('.txt')
- try:
- os.close(fd)
- with open(tmpname, 'wt') as tmpfile:
- tmpfile.write(strng)
- cmd = "%s < %s" % (pager_cmd, tmpname)
- # tmpfile needs to be closed for windows
- if os.system(cmd):
- retval = 1
- else:
- retval = None
- finally:
- os.remove(tmpname)
- else:
- try:
- retval = None
- # if I use popen4, things hang. No idea why.
- #pager,shell_out = os.popen4(pager_cmd)
- pager = os.popen(pager_cmd, 'w')
- try:
- pager_encoding = pager.encoding or sys.stdout.encoding
- pager.write(py3compat.cast_bytes_py2(
- strng, encoding=pager_encoding))
- finally:
- retval = pager.close()
- except IOError as msg: # broken pipe when user quits
- if msg.args == (32, 'Broken pipe'):
- retval = None
- else:
- retval = 1
- except OSError:
- # Other strange problems, sometimes seen in Win2k/cygwin
- retval = 1
- if retval is not None:
- page_dumb(strng,screen_lines=screen_lines)
-
-
-def page(data, start=0, screen_lines=0, pager_cmd=None):
- """Display content in a pager, piping through a pager after a certain length.
-
- data can be a mime-bundle dict, supplying multiple representations,
- keyed by mime-type, or text.
-
- Pager is dispatched via the `show_in_pager` IPython hook.
- If no hook is registered, `pager_page` will be used.
- """
- # Some routines may auto-compute start offsets incorrectly and pass a
- # negative value. Offset to 0 for robustness.
- start = max(0, start)
-
- # first, try the hook
- ip = get_ipython()
- if ip:
- try:
- ip.hooks.show_in_pager(data, start=start, screen_lines=screen_lines)
- return
- except TryNext:
- pass
-
- # fallback on default pager
- return pager_page(data, start, screen_lines, pager_cmd)
-
-
-def page_file(fname, start=0, pager_cmd=None):
- """Page a file, using an optional pager command and starting line.
- """
-
- pager_cmd = get_pager_cmd(pager_cmd)
- pager_cmd += ' ' + get_pager_start(pager_cmd,start)
-
- try:
- if os.environ['TERM'] in ['emacs','dumb']:
- raise EnvironmentError
- system(pager_cmd + ' ' + fname)
- except:
- try:
- if start > 0:
- start -= 1
- page(open(fname).read(),start)
- except:
- print('Unable to show file',repr(fname))
-
-
-def get_pager_cmd(pager_cmd=None):
- """Return a pager command.
-
- Makes some attempts at finding an OS-correct one.
- """
- if os.name == 'posix':
+ else:
+ # Try to open pager and default to internal one if that fails.
+ # All failure modes are tagged as 'retval=1', to match the return
+ # value of a failed system command. If any intermediate attempt
+ # sets retval to 1, at the end we resort to our own page_dumb() pager.
+ pager_cmd = get_pager_cmd(pager_cmd)
+ pager_cmd += ' ' + get_pager_start(pager_cmd,start)
+ if os.name == 'nt':
+ if pager_cmd.startswith('type'):
+ # The default WinXP 'type' command is failing on complex strings.
+ retval = 1
+ else:
+ fd, tmpname = tempfile.mkstemp('.txt')
+ try:
+ os.close(fd)
+ with open(tmpname, 'wt') as tmpfile:
+ tmpfile.write(strng)
+ cmd = "%s < %s" % (pager_cmd, tmpname)
+ # tmpfile needs to be closed for windows
+ if os.system(cmd):
+ retval = 1
+ else:
+ retval = None
+ finally:
+ os.remove(tmpname)
+ else:
+ try:
+ retval = None
+ # if I use popen4, things hang. No idea why.
+ #pager,shell_out = os.popen4(pager_cmd)
+ pager = os.popen(pager_cmd, 'w')
+ try:
+ pager_encoding = pager.encoding or sys.stdout.encoding
+ pager.write(py3compat.cast_bytes_py2(
+ strng, encoding=pager_encoding))
+ finally:
+ retval = pager.close()
+ except IOError as msg: # broken pipe when user quits
+ if msg.args == (32, 'Broken pipe'):
+ retval = None
+ else:
+ retval = 1
+ except OSError:
+ # Other strange problems, sometimes seen in Win2k/cygwin
+ retval = 1
+ if retval is not None:
+ page_dumb(strng,screen_lines=screen_lines)
+
+
+def page(data, start=0, screen_lines=0, pager_cmd=None):
+ """Display content in a pager, piping through a pager after a certain length.
+
+ data can be a mime-bundle dict, supplying multiple representations,
+ keyed by mime-type, or text.
+
+ Pager is dispatched via the `show_in_pager` IPython hook.
+ If no hook is registered, `pager_page` will be used.
+ """
+ # Some routines may auto-compute start offsets incorrectly and pass a
+ # negative value. Offset to 0 for robustness.
+ start = max(0, start)
+
+ # first, try the hook
+ ip = get_ipython()
+ if ip:
+ try:
+ ip.hooks.show_in_pager(data, start=start, screen_lines=screen_lines)
+ return
+ except TryNext:
+ pass
+
+ # fallback on default pager
+ return pager_page(data, start, screen_lines, pager_cmd)
+
+
+def page_file(fname, start=0, pager_cmd=None):
+ """Page a file, using an optional pager command and starting line.
+ """
+
+ pager_cmd = get_pager_cmd(pager_cmd)
+ pager_cmd += ' ' + get_pager_start(pager_cmd,start)
+
+ try:
+ if os.environ['TERM'] in ['emacs','dumb']:
+ raise EnvironmentError
+ system(pager_cmd + ' ' + fname)
+ except:
+ try:
+ if start > 0:
+ start -= 1
+ page(open(fname).read(),start)
+ except:
+ print('Unable to show file',repr(fname))
+
+
+def get_pager_cmd(pager_cmd=None):
+ """Return a pager command.
+
+ Makes some attempts at finding an OS-correct one.
+ """
+ if os.name == 'posix':
default_pager_cmd = 'less -R' # -R for color control sequences
- elif os.name in ['nt','dos']:
- default_pager_cmd = 'type'
-
- if pager_cmd is None:
- try:
- pager_cmd = os.environ['PAGER']
- except:
- pager_cmd = default_pager_cmd
+ elif os.name in ['nt','dos']:
+ default_pager_cmd = 'type'
+
+ if pager_cmd is None:
+ try:
+ pager_cmd = os.environ['PAGER']
+ except:
+ pager_cmd = default_pager_cmd
if pager_cmd == 'less' and '-r' not in os.environ.get('LESS', '').lower():
pager_cmd += ' -R'
- return pager_cmd
-
-
-def get_pager_start(pager, start):
- """Return the string for paging files with an offset.
-
- This is the '+N' argument which less and more (under Unix) accept.
- """
-
- if pager in ['less','more']:
- if start:
- start_string = '+' + str(start)
- else:
- start_string = ''
- else:
- start_string = ''
- return start_string
-
-
-# (X)emacs on win32 doesn't like to be bypassed with msvcrt.getch()
-if os.name == 'nt' and os.environ.get('TERM','dumb') != 'emacs':
- import msvcrt
- def page_more():
- """ Smart pausing between pages
-
- @return: True if need print more lines, False if quit
- """
+ return pager_cmd
+
+
+def get_pager_start(pager, start):
+ """Return the string for paging files with an offset.
+
+ This is the '+N' argument which less and more (under Unix) accept.
+ """
+
+ if pager in ['less','more']:
+ if start:
+ start_string = '+' + str(start)
+ else:
+ start_string = ''
+ else:
+ start_string = ''
+ return start_string
+
+
+# (X)emacs on win32 doesn't like to be bypassed with msvcrt.getch()
+if os.name == 'nt' and os.environ.get('TERM','dumb') != 'emacs':
+ import msvcrt
+ def page_more():
+ """ Smart pausing between pages
+
+ @return: True if need print more lines, False if quit
+ """
sys.stdout.write('---Return to continue, q to quit--- ')
- ans = msvcrt.getwch()
- if ans in ("q", "Q"):
- result = False
- else:
- result = True
+ ans = msvcrt.getwch()
+ if ans in ("q", "Q"):
+ result = False
+ else:
+ result = True
sys.stdout.write("\b"*37 + " "*37 + "\b"*37)
- return result
-else:
- def page_more():
- ans = py3compat.input('---Return to continue, q to quit--- ')
- if ans.lower().startswith('q'):
- return False
- else:
- return True
-
-
-def snip_print(str,width = 75,print_full = 0,header = ''):
- """Print a string snipping the midsection to fit in width.
-
- print_full: mode control:
-
- - 0: only snip long strings
- - 1: send to page() directly.
- - 2: snip long strings and ask for full length viewing with page()
-
- Return 1 if snipping was necessary, 0 otherwise."""
-
- if print_full == 1:
- page(header+str)
- return 0
-
- print(header, end=' ')
- if len(str) < width:
- print(str)
- snip = 0
- else:
- whalf = int((width -5)/2)
- print(str[:whalf] + ' <...> ' + str[-whalf:])
- snip = 1
- if snip and print_full == 2:
- if py3compat.input(header+' Snipped. View (y/n)? [N]').lower() == 'y':
- page(str)
- return snip
+ return result
+else:
+ def page_more():
+ ans = py3compat.input('---Return to continue, q to quit--- ')
+ if ans.lower().startswith('q'):
+ return False
+ else:
+ return True
+
+
+def snip_print(str,width = 75,print_full = 0,header = ''):
+ """Print a string snipping the midsection to fit in width.
+
+ print_full: mode control:
+
+ - 0: only snip long strings
+ - 1: send to page() directly.
+ - 2: snip long strings and ask for full length viewing with page()
+
+ Return 1 if snipping was necessary, 0 otherwise."""
+
+ if print_full == 1:
+ page(header+str)
+ return 0
+
+ print(header, end=' ')
+ if len(str) < width:
+ print(str)
+ snip = 0
+ else:
+ whalf = int((width -5)/2)
+ print(str[:whalf] + ' <...> ' + str[-whalf:])
+ snip = 1
+ if snip and print_full == 2:
+ if py3compat.input(header+' Snipped. View (y/n)? [N]').lower() == 'y':
+ page(str)
+ return snip
diff --git a/contrib/python/ipython/py2/IPython/core/payload.py b/contrib/python/ipython/py2/IPython/core/payload.py
index caa9268fe2..6818be1537 100644
--- a/contrib/python/ipython/py2/IPython/core/payload.py
+++ b/contrib/python/ipython/py2/IPython/core/payload.py
@@ -1,55 +1,55 @@
-# -*- coding: utf-8 -*-
-"""Payload system for IPython.
-
-Authors:
-
-* Fernando Perez
-* Brian Granger
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-from traitlets.config.configurable import Configurable
-from traitlets import List
-
-#-----------------------------------------------------------------------------
-# Main payload class
-#-----------------------------------------------------------------------------
-
-class PayloadManager(Configurable):
-
- _payload = List([])
-
- def write_payload(self, data, single=True):
- """Include or update the specified `data` payload in the PayloadManager.
-
- If a previous payload with the same source exists and `single` is True,
- it will be overwritten with the new one.
- """
-
- if not isinstance(data, dict):
- raise TypeError('Each payload write must be a dict, got: %r' % data)
-
- if single and 'source' in data:
- source = data['source']
- for i, pl in enumerate(self._payload):
- if 'source' in pl and pl['source'] == source:
- self._payload[i] = data
- return
-
- self._payload.append(data)
-
- def read_payload(self):
- return self._payload
-
- def clear_payload(self):
- self._payload = []
+# -*- coding: utf-8 -*-
+"""Payload system for IPython.
+
+Authors:
+
+* Fernando Perez
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from traitlets.config.configurable import Configurable
+from traitlets import List
+
+#-----------------------------------------------------------------------------
+# Main payload class
+#-----------------------------------------------------------------------------
+
+class PayloadManager(Configurable):
+
+ _payload = List([])
+
+ def write_payload(self, data, single=True):
+ """Include or update the specified `data` payload in the PayloadManager.
+
+ If a previous payload with the same source exists and `single` is True,
+ it will be overwritten with the new one.
+ """
+
+ if not isinstance(data, dict):
+ raise TypeError('Each payload write must be a dict, got: %r' % data)
+
+ if single and 'source' in data:
+ source = data['source']
+ for i, pl in enumerate(self._payload):
+ if 'source' in pl and pl['source'] == source:
+ self._payload[i] = data
+ return
+
+ self._payload.append(data)
+
+ def read_payload(self):
+ return self._payload
+
+ def clear_payload(self):
+ self._payload = []
diff --git a/contrib/python/ipython/py2/IPython/core/payloadpage.py b/contrib/python/ipython/py2/IPython/core/payloadpage.py
index 43ac441631..eb613445dd 100644
--- a/contrib/python/ipython/py2/IPython/core/payloadpage.py
+++ b/contrib/python/ipython/py2/IPython/core/payloadpage.py
@@ -1,52 +1,52 @@
-# encoding: utf-8
-"""A payload based version of page."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import warnings
-from IPython.core.getipython import get_ipython
-
-
-def page(strng, start=0, screen_lines=0, pager_cmd=None):
- """Print a string, piping through a pager.
-
- This version ignores the screen_lines and pager_cmd arguments and uses
- IPython's payload system instead.
-
- Parameters
- ----------
- strng : str or mime-dict
- Text to page, or a mime-type keyed dict of already formatted data.
-
- start : int
- Starting line at which to place the display.
- """
-
- # Some routines may auto-compute start offsets incorrectly and pass a
- # negative value. Offset to 0 for robustness.
- start = max(0, start)
- shell = get_ipython()
-
- if isinstance(strng, dict):
- data = strng
- else:
- data = {'text/plain' : strng}
- payload = dict(
- source='page',
- data=data,
- start=start,
- )
- shell.payload_manager.write_payload(payload)
-
-
-def install_payload_page():
- """DEPRECATED, use show_in_pager hook
-
- Install this version of page as IPython.core.page.page.
- """
- warnings.warn("""install_payload_page is deprecated.
- Use `ip.set_hook('show_in_pager, page.as_hook(payloadpage.page))`
- """)
- from IPython.core import page as corepage
- corepage.page = page
+# encoding: utf-8
+"""A payload based version of page."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import warnings
+from IPython.core.getipython import get_ipython
+
+
+def page(strng, start=0, screen_lines=0, pager_cmd=None):
+ """Print a string, piping through a pager.
+
+ This version ignores the screen_lines and pager_cmd arguments and uses
+ IPython's payload system instead.
+
+ Parameters
+ ----------
+ strng : str or mime-dict
+ Text to page, or a mime-type keyed dict of already formatted data.
+
+ start : int
+ Starting line at which to place the display.
+ """
+
+ # Some routines may auto-compute start offsets incorrectly and pass a
+ # negative value. Offset to 0 for robustness.
+ start = max(0, start)
+ shell = get_ipython()
+
+ if isinstance(strng, dict):
+ data = strng
+ else:
+ data = {'text/plain' : strng}
+ payload = dict(
+ source='page',
+ data=data,
+ start=start,
+ )
+ shell.payload_manager.write_payload(payload)
+
+
+def install_payload_page():
+ """DEPRECATED, use show_in_pager hook
+
+ Install this version of page as IPython.core.page.page.
+ """
+ warnings.warn("""install_payload_page is deprecated.
+ Use `ip.set_hook('show_in_pager, page.as_hook(payloadpage.page))`
+ """)
+ from IPython.core import page as corepage
+ corepage.page = page
diff --git a/contrib/python/ipython/py2/IPython/core/prefilter.py b/contrib/python/ipython/py2/IPython/core/prefilter.py
index 953b6d2d43..cbed3fd80a 100644
--- a/contrib/python/ipython/py2/IPython/core/prefilter.py
+++ b/contrib/python/ipython/py2/IPython/core/prefilter.py
@@ -1,700 +1,700 @@
-# encoding: utf-8
-"""
-Prefiltering components.
-
-Prefilters transform user input before it is exec'd by Python. These
-transforms are used to implement additional syntax such as !ls and %magic.
-"""
-
+# encoding: utf-8
+"""
+Prefiltering components.
+
+Prefilters transform user input before it is exec'd by Python. These
+transforms are used to implement additional syntax such as !ls and %magic.
+"""
+
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
-
-from keyword import iskeyword
-import re
-
-from IPython.core.autocall import IPyAutocall
-from traitlets.config.configurable import Configurable
-from IPython.core.inputsplitter import (
- ESC_MAGIC,
- ESC_QUOTE,
- ESC_QUOTE2,
- ESC_PAREN,
-)
-from IPython.core.macro import Macro
-from IPython.core.splitinput import LineInfo
-
-from traitlets import (
+
+from keyword import iskeyword
+import re
+
+from IPython.core.autocall import IPyAutocall
+from traitlets.config.configurable import Configurable
+from IPython.core.inputsplitter import (
+ ESC_MAGIC,
+ ESC_QUOTE,
+ ESC_QUOTE2,
+ ESC_PAREN,
+)
+from IPython.core.macro import Macro
+from IPython.core.splitinput import LineInfo
+
+from traitlets import (
List, Integer, Unicode, Bool, Instance, CRegExp
-)
-
-#-----------------------------------------------------------------------------
-# Global utilities, errors and constants
-#-----------------------------------------------------------------------------
-
-
-class PrefilterError(Exception):
- pass
-
-
-# RegExp to identify potential function names
-re_fun_name = re.compile(r'[a-zA-Z_]([a-zA-Z0-9_.]*) *$')
-
-# RegExp to exclude strings with this start from autocalling. In
-# particular, all binary operators should be excluded, so that if foo is
-# callable, foo OP bar doesn't become foo(OP bar), which is invalid. The
-# characters '!=()' don't need to be checked for, as the checkPythonChars
-# routine explicitely does so, to catch direct calls and rebindings of
-# existing names.
-
-# Warning: the '-' HAS TO BE AT THE END of the first group, otherwise
-# it affects the rest of the group in square brackets.
-re_exclude_auto = re.compile(r'^[,&^\|\*/\+-]'
- r'|^is |^not |^in |^and |^or ')
-
-# try to catch also methods for stuff in lists/tuples/dicts: off
-# (experimental). For this to work, the line_split regexp would need
-# to be modified so it wouldn't break things at '['. That line is
-# nasty enough that I shouldn't change it until I can test it _well_.
-#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
-
-
-# Handler Check Utilities
-def is_shadowed(identifier, ip):
- """Is the given identifier defined in one of the namespaces which shadow
- the alias and magic namespaces? Note that an identifier is different
- than ifun, because it can not contain a '.' character."""
- # This is much safer than calling ofind, which can change state
- return (identifier in ip.user_ns \
- or identifier in ip.user_global_ns \
- or identifier in ip.ns_table['builtin']\
- or iskeyword(identifier))
-
-
-#-----------------------------------------------------------------------------
-# Main Prefilter manager
-#-----------------------------------------------------------------------------
-
-
-class PrefilterManager(Configurable):
- """Main prefilter component.
-
- The IPython prefilter is run on all user input before it is run. The
- prefilter consumes lines of input and produces transformed lines of
- input.
-
- The iplementation consists of two phases:
-
- 1. Transformers
- 2. Checkers and handlers
-
- Over time, we plan on deprecating the checkers and handlers and doing
- everything in the transformers.
-
- The transformers are instances of :class:`PrefilterTransformer` and have
- a single method :meth:`transform` that takes a line and returns a
- transformed line. The transformation can be accomplished using any
- tool, but our current ones use regular expressions for speed.
-
- After all the transformers have been run, the line is fed to the checkers,
- which are instances of :class:`PrefilterChecker`. The line is passed to
- the :meth:`check` method, which either returns `None` or a
- :class:`PrefilterHandler` instance. If `None` is returned, the other
- checkers are tried. If an :class:`PrefilterHandler` instance is returned,
- the line is passed to the :meth:`handle` method of the returned
- handler and no further checkers are tried.
-
- Both transformers and checkers have a `priority` attribute, that determines
- the order in which they are called. Smaller priorities are tried first.
-
- Both transformers and checkers also have `enabled` attribute, which is
- a boolean that determines if the instance is used.
-
- Users or developers can change the priority or enabled attribute of
- transformers or checkers, but they must call the :meth:`sort_checkers`
- or :meth:`sort_transformers` method after changing the priority.
- """
-
+)
+
+#-----------------------------------------------------------------------------
+# Global utilities, errors and constants
+#-----------------------------------------------------------------------------
+
+
+class PrefilterError(Exception):
+ pass
+
+
+# RegExp to identify potential function names
+re_fun_name = re.compile(r'[a-zA-Z_]([a-zA-Z0-9_.]*) *$')
+
+# RegExp to exclude strings with this start from autocalling. In
+# particular, all binary operators should be excluded, so that if foo is
+# callable, foo OP bar doesn't become foo(OP bar), which is invalid. The
+# characters '!=()' don't need to be checked for, as the checkPythonChars
+# routine explicitely does so, to catch direct calls and rebindings of
+# existing names.
+
+# Warning: the '-' HAS TO BE AT THE END of the first group, otherwise
+# it affects the rest of the group in square brackets.
+re_exclude_auto = re.compile(r'^[,&^\|\*/\+-]'
+ r'|^is |^not |^in |^and |^or ')
+
+# try to catch also methods for stuff in lists/tuples/dicts: off
+# (experimental). For this to work, the line_split regexp would need
+# to be modified so it wouldn't break things at '['. That line is
+# nasty enough that I shouldn't change it until I can test it _well_.
+#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
+
+
+# Handler Check Utilities
+def is_shadowed(identifier, ip):
+ """Is the given identifier defined in one of the namespaces which shadow
+ the alias and magic namespaces? Note that an identifier is different
+ than ifun, because it can not contain a '.' character."""
+ # This is much safer than calling ofind, which can change state
+ return (identifier in ip.user_ns \
+ or identifier in ip.user_global_ns \
+ or identifier in ip.ns_table['builtin']\
+ or iskeyword(identifier))
+
+
+#-----------------------------------------------------------------------------
+# Main Prefilter manager
+#-----------------------------------------------------------------------------
+
+
+class PrefilterManager(Configurable):
+ """Main prefilter component.
+
+ The IPython prefilter is run on all user input before it is run. The
+ prefilter consumes lines of input and produces transformed lines of
+ input.
+
+ The iplementation consists of two phases:
+
+ 1. Transformers
+ 2. Checkers and handlers
+
+ Over time, we plan on deprecating the checkers and handlers and doing
+ everything in the transformers.
+
+ The transformers are instances of :class:`PrefilterTransformer` and have
+ a single method :meth:`transform` that takes a line and returns a
+ transformed line. The transformation can be accomplished using any
+ tool, but our current ones use regular expressions for speed.
+
+ After all the transformers have been run, the line is fed to the checkers,
+ which are instances of :class:`PrefilterChecker`. The line is passed to
+ the :meth:`check` method, which either returns `None` or a
+ :class:`PrefilterHandler` instance. If `None` is returned, the other
+ checkers are tried. If an :class:`PrefilterHandler` instance is returned,
+ the line is passed to the :meth:`handle` method of the returned
+ handler and no further checkers are tried.
+
+ Both transformers and checkers have a `priority` attribute, that determines
+ the order in which they are called. Smaller priorities are tried first.
+
+ Both transformers and checkers also have `enabled` attribute, which is
+ a boolean that determines if the instance is used.
+
+ Users or developers can change the priority or enabled attribute of
+ transformers or checkers, but they must call the :meth:`sort_checkers`
+ or :meth:`sort_transformers` method after changing the priority.
+ """
+
multi_line_specials = Bool(True).tag(config=True)
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
-
- def __init__(self, shell=None, **kwargs):
- super(PrefilterManager, self).__init__(shell=shell, **kwargs)
- self.shell = shell
- self.init_transformers()
- self.init_handlers()
- self.init_checkers()
-
- #-------------------------------------------------------------------------
- # API for managing transformers
- #-------------------------------------------------------------------------
-
- def init_transformers(self):
- """Create the default transformers."""
- self._transformers = []
- for transformer_cls in _default_transformers:
- transformer_cls(
- shell=self.shell, prefilter_manager=self, parent=self
- )
-
- def sort_transformers(self):
- """Sort the transformers by priority.
-
- This must be called after the priority of a transformer is changed.
- The :meth:`register_transformer` method calls this automatically.
- """
- self._transformers.sort(key=lambda x: x.priority)
-
- @property
- def transformers(self):
- """Return a list of checkers, sorted by priority."""
- return self._transformers
-
- def register_transformer(self, transformer):
- """Register a transformer instance."""
- if transformer not in self._transformers:
- self._transformers.append(transformer)
- self.sort_transformers()
-
- def unregister_transformer(self, transformer):
- """Unregister a transformer instance."""
- if transformer in self._transformers:
- self._transformers.remove(transformer)
-
- #-------------------------------------------------------------------------
- # API for managing checkers
- #-------------------------------------------------------------------------
-
- def init_checkers(self):
- """Create the default checkers."""
- self._checkers = []
- for checker in _default_checkers:
- checker(
- shell=self.shell, prefilter_manager=self, parent=self
- )
-
- def sort_checkers(self):
- """Sort the checkers by priority.
-
- This must be called after the priority of a checker is changed.
- The :meth:`register_checker` method calls this automatically.
- """
- self._checkers.sort(key=lambda x: x.priority)
-
- @property
- def checkers(self):
- """Return a list of checkers, sorted by priority."""
- return self._checkers
-
- def register_checker(self, checker):
- """Register a checker instance."""
- if checker not in self._checkers:
- self._checkers.append(checker)
- self.sort_checkers()
-
- def unregister_checker(self, checker):
- """Unregister a checker instance."""
- if checker in self._checkers:
- self._checkers.remove(checker)
-
- #-------------------------------------------------------------------------
- # API for managing handlers
- #-------------------------------------------------------------------------
-
- def init_handlers(self):
- """Create the default handlers."""
- self._handlers = {}
- self._esc_handlers = {}
- for handler in _default_handlers:
- handler(
- shell=self.shell, prefilter_manager=self, parent=self
- )
-
- @property
- def handlers(self):
- """Return a dict of all the handlers."""
- return self._handlers
-
- def register_handler(self, name, handler, esc_strings):
- """Register a handler instance by name with esc_strings."""
- self._handlers[name] = handler
- for esc_str in esc_strings:
- self._esc_handlers[esc_str] = handler
-
- def unregister_handler(self, name, handler, esc_strings):
- """Unregister a handler instance by name with esc_strings."""
- try:
- del self._handlers[name]
- except KeyError:
- pass
- for esc_str in esc_strings:
- h = self._esc_handlers.get(esc_str)
- if h is handler:
- del self._esc_handlers[esc_str]
-
- def get_handler_by_name(self, name):
- """Get a handler by its name."""
- return self._handlers.get(name)
-
- def get_handler_by_esc(self, esc_str):
- """Get a handler by its escape string."""
- return self._esc_handlers.get(esc_str)
-
- #-------------------------------------------------------------------------
- # Main prefiltering API
- #-------------------------------------------------------------------------
-
- def prefilter_line_info(self, line_info):
- """Prefilter a line that has been converted to a LineInfo object.
-
- This implements the checker/handler part of the prefilter pipe.
- """
- # print "prefilter_line_info: ", line_info
- handler = self.find_handler(line_info)
- return handler.handle(line_info)
-
- def find_handler(self, line_info):
- """Find a handler for the line_info by trying checkers."""
- for checker in self.checkers:
- if checker.enabled:
- handler = checker.check(line_info)
- if handler:
- return handler
- return self.get_handler_by_name('normal')
-
- def transform_line(self, line, continue_prompt):
- """Calls the enabled transformers in order of increasing priority."""
- for transformer in self.transformers:
- if transformer.enabled:
- line = transformer.transform(line, continue_prompt)
- return line
-
- def prefilter_line(self, line, continue_prompt=False):
- """Prefilter a single input line as text.
-
- This method prefilters a single line of text by calling the
- transformers and then the checkers/handlers.
- """
-
- # print "prefilter_line: ", line, continue_prompt
- # All handlers *must* return a value, even if it's blank ('').
-
- # save the line away in case we crash, so the post-mortem handler can
- # record it
- self.shell._last_input_line = line
-
- if not line:
- # Return immediately on purely empty lines, so that if the user
- # previously typed some whitespace that started a continuation
- # prompt, he can break out of that loop with just an empty line.
- # This is how the default python prompt works.
- return ''
-
- # At this point, we invoke our transformers.
- if not continue_prompt or (continue_prompt and self.multi_line_specials):
- line = self.transform_line(line, continue_prompt)
-
- # Now we compute line_info for the checkers and handlers
- line_info = LineInfo(line, continue_prompt)
-
- # the input history needs to track even empty lines
- stripped = line.strip()
-
- normal_handler = self.get_handler_by_name('normal')
- if not stripped:
- return normal_handler.handle(line_info)
-
- # special handlers are only allowed for single line statements
- if continue_prompt and not self.multi_line_specials:
- return normal_handler.handle(line_info)
-
- prefiltered = self.prefilter_line_info(line_info)
- # print "prefiltered line: %r" % prefiltered
- return prefiltered
-
- def prefilter_lines(self, lines, continue_prompt=False):
- """Prefilter multiple input lines of text.
-
- This is the main entry point for prefiltering multiple lines of
- input. This simply calls :meth:`prefilter_line` for each line of
- input.
-
- This covers cases where there are multiple lines in the user entry,
- which is the case when the user goes back to a multiline history
- entry and presses enter.
- """
- llines = lines.rstrip('\n').split('\n')
- # We can get multiple lines in one shot, where multiline input 'blends'
- # into one line, in cases like recalling from the readline history
- # buffer. We need to make sure that in such cases, we correctly
- # communicate downstream which line is first and which are continuation
- # ones.
- if len(llines) > 1:
- out = '\n'.join([self.prefilter_line(line, lnum>0)
- for lnum, line in enumerate(llines) ])
- else:
- out = self.prefilter_line(llines[0], continue_prompt)
-
- return out
-
-#-----------------------------------------------------------------------------
-# Prefilter transformers
-#-----------------------------------------------------------------------------
-
-
-class PrefilterTransformer(Configurable):
- """Transform a line of user input."""
-
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+
+ def __init__(self, shell=None, **kwargs):
+ super(PrefilterManager, self).__init__(shell=shell, **kwargs)
+ self.shell = shell
+ self.init_transformers()
+ self.init_handlers()
+ self.init_checkers()
+
+ #-------------------------------------------------------------------------
+ # API for managing transformers
+ #-------------------------------------------------------------------------
+
+ def init_transformers(self):
+ """Create the default transformers."""
+ self._transformers = []
+ for transformer_cls in _default_transformers:
+ transformer_cls(
+ shell=self.shell, prefilter_manager=self, parent=self
+ )
+
+ def sort_transformers(self):
+ """Sort the transformers by priority.
+
+ This must be called after the priority of a transformer is changed.
+ The :meth:`register_transformer` method calls this automatically.
+ """
+ self._transformers.sort(key=lambda x: x.priority)
+
+ @property
+ def transformers(self):
+ """Return a list of checkers, sorted by priority."""
+ return self._transformers
+
+ def register_transformer(self, transformer):
+ """Register a transformer instance."""
+ if transformer not in self._transformers:
+ self._transformers.append(transformer)
+ self.sort_transformers()
+
+ def unregister_transformer(self, transformer):
+ """Unregister a transformer instance."""
+ if transformer in self._transformers:
+ self._transformers.remove(transformer)
+
+ #-------------------------------------------------------------------------
+ # API for managing checkers
+ #-------------------------------------------------------------------------
+
+ def init_checkers(self):
+ """Create the default checkers."""
+ self._checkers = []
+ for checker in _default_checkers:
+ checker(
+ shell=self.shell, prefilter_manager=self, parent=self
+ )
+
+ def sort_checkers(self):
+ """Sort the checkers by priority.
+
+ This must be called after the priority of a checker is changed.
+ The :meth:`register_checker` method calls this automatically.
+ """
+ self._checkers.sort(key=lambda x: x.priority)
+
+ @property
+ def checkers(self):
+ """Return a list of checkers, sorted by priority."""
+ return self._checkers
+
+ def register_checker(self, checker):
+ """Register a checker instance."""
+ if checker not in self._checkers:
+ self._checkers.append(checker)
+ self.sort_checkers()
+
+ def unregister_checker(self, checker):
+ """Unregister a checker instance."""
+ if checker in self._checkers:
+ self._checkers.remove(checker)
+
+ #-------------------------------------------------------------------------
+ # API for managing handlers
+ #-------------------------------------------------------------------------
+
+ def init_handlers(self):
+ """Create the default handlers."""
+ self._handlers = {}
+ self._esc_handlers = {}
+ for handler in _default_handlers:
+ handler(
+ shell=self.shell, prefilter_manager=self, parent=self
+ )
+
+ @property
+ def handlers(self):
+ """Return a dict of all the handlers."""
+ return self._handlers
+
+ def register_handler(self, name, handler, esc_strings):
+ """Register a handler instance by name with esc_strings."""
+ self._handlers[name] = handler
+ for esc_str in esc_strings:
+ self._esc_handlers[esc_str] = handler
+
+ def unregister_handler(self, name, handler, esc_strings):
+ """Unregister a handler instance by name with esc_strings."""
+ try:
+ del self._handlers[name]
+ except KeyError:
+ pass
+ for esc_str in esc_strings:
+ h = self._esc_handlers.get(esc_str)
+ if h is handler:
+ del self._esc_handlers[esc_str]
+
+ def get_handler_by_name(self, name):
+ """Get a handler by its name."""
+ return self._handlers.get(name)
+
+ def get_handler_by_esc(self, esc_str):
+ """Get a handler by its escape string."""
+ return self._esc_handlers.get(esc_str)
+
+ #-------------------------------------------------------------------------
+ # Main prefiltering API
+ #-------------------------------------------------------------------------
+
+ def prefilter_line_info(self, line_info):
+ """Prefilter a line that has been converted to a LineInfo object.
+
+ This implements the checker/handler part of the prefilter pipe.
+ """
+ # print "prefilter_line_info: ", line_info
+ handler = self.find_handler(line_info)
+ return handler.handle(line_info)
+
+ def find_handler(self, line_info):
+ """Find a handler for the line_info by trying checkers."""
+ for checker in self.checkers:
+ if checker.enabled:
+ handler = checker.check(line_info)
+ if handler:
+ return handler
+ return self.get_handler_by_name('normal')
+
+ def transform_line(self, line, continue_prompt):
+ """Calls the enabled transformers in order of increasing priority."""
+ for transformer in self.transformers:
+ if transformer.enabled:
+ line = transformer.transform(line, continue_prompt)
+ return line
+
+ def prefilter_line(self, line, continue_prompt=False):
+ """Prefilter a single input line as text.
+
+ This method prefilters a single line of text by calling the
+ transformers and then the checkers/handlers.
+ """
+
+ # print "prefilter_line: ", line, continue_prompt
+ # All handlers *must* return a value, even if it's blank ('').
+
+ # save the line away in case we crash, so the post-mortem handler can
+ # record it
+ self.shell._last_input_line = line
+
+ if not line:
+ # Return immediately on purely empty lines, so that if the user
+ # previously typed some whitespace that started a continuation
+ # prompt, he can break out of that loop with just an empty line.
+ # This is how the default python prompt works.
+ return ''
+
+ # At this point, we invoke our transformers.
+ if not continue_prompt or (continue_prompt and self.multi_line_specials):
+ line = self.transform_line(line, continue_prompt)
+
+ # Now we compute line_info for the checkers and handlers
+ line_info = LineInfo(line, continue_prompt)
+
+ # the input history needs to track even empty lines
+ stripped = line.strip()
+
+ normal_handler = self.get_handler_by_name('normal')
+ if not stripped:
+ return normal_handler.handle(line_info)
+
+ # special handlers are only allowed for single line statements
+ if continue_prompt and not self.multi_line_specials:
+ return normal_handler.handle(line_info)
+
+ prefiltered = self.prefilter_line_info(line_info)
+ # print "prefiltered line: %r" % prefiltered
+ return prefiltered
+
+ def prefilter_lines(self, lines, continue_prompt=False):
+ """Prefilter multiple input lines of text.
+
+ This is the main entry point for prefiltering multiple lines of
+ input. This simply calls :meth:`prefilter_line` for each line of
+ input.
+
+ This covers cases where there are multiple lines in the user entry,
+ which is the case when the user goes back to a multiline history
+ entry and presses enter.
+ """
+ llines = lines.rstrip('\n').split('\n')
+ # We can get multiple lines in one shot, where multiline input 'blends'
+ # into one line, in cases like recalling from the readline history
+ # buffer. We need to make sure that in such cases, we correctly
+ # communicate downstream which line is first and which are continuation
+ # ones.
+ if len(llines) > 1:
+ out = '\n'.join([self.prefilter_line(line, lnum>0)
+ for lnum, line in enumerate(llines) ])
+ else:
+ out = self.prefilter_line(llines[0], continue_prompt)
+
+ return out
+
+#-----------------------------------------------------------------------------
+# Prefilter transformers
+#-----------------------------------------------------------------------------
+
+
+class PrefilterTransformer(Configurable):
+ """Transform a line of user input."""
+
priority = Integer(100).tag(config=True)
- # Transformers don't currently use shell or prefilter_manager, but as we
- # move away from checkers and handlers, they will need them.
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
- prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+ # Transformers don't currently use shell or prefilter_manager, but as we
+ # move away from checkers and handlers, they will need them.
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
enabled = Bool(True).tag(config=True)
-
- def __init__(self, shell=None, prefilter_manager=None, **kwargs):
- super(PrefilterTransformer, self).__init__(
- shell=shell, prefilter_manager=prefilter_manager, **kwargs
- )
- self.prefilter_manager.register_transformer(self)
-
- def transform(self, line, continue_prompt):
- """Transform a line, returning the new one."""
- return None
-
- def __repr__(self):
- return "<%s(priority=%r, enabled=%r)>" % (
- self.__class__.__name__, self.priority, self.enabled)
-
-
-#-----------------------------------------------------------------------------
-# Prefilter checkers
-#-----------------------------------------------------------------------------
-
-
-class PrefilterChecker(Configurable):
- """Inspect an input line and return a handler for that line."""
-
+
+ def __init__(self, shell=None, prefilter_manager=None, **kwargs):
+ super(PrefilterTransformer, self).__init__(
+ shell=shell, prefilter_manager=prefilter_manager, **kwargs
+ )
+ self.prefilter_manager.register_transformer(self)
+
+ def transform(self, line, continue_prompt):
+ """Transform a line, returning the new one."""
+ return None
+
+ def __repr__(self):
+ return "<%s(priority=%r, enabled=%r)>" % (
+ self.__class__.__name__, self.priority, self.enabled)
+
+
+#-----------------------------------------------------------------------------
+# Prefilter checkers
+#-----------------------------------------------------------------------------
+
+
+class PrefilterChecker(Configurable):
+ """Inspect an input line and return a handler for that line."""
+
priority = Integer(100).tag(config=True)
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
- prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
enabled = Bool(True).tag(config=True)
-
- def __init__(self, shell=None, prefilter_manager=None, **kwargs):
- super(PrefilterChecker, self).__init__(
- shell=shell, prefilter_manager=prefilter_manager, **kwargs
- )
- self.prefilter_manager.register_checker(self)
-
- def check(self, line_info):
- """Inspect line_info and return a handler instance or None."""
- return None
-
- def __repr__(self):
- return "<%s(priority=%r, enabled=%r)>" % (
- self.__class__.__name__, self.priority, self.enabled)
-
-
-class EmacsChecker(PrefilterChecker):
-
+
+ def __init__(self, shell=None, prefilter_manager=None, **kwargs):
+ super(PrefilterChecker, self).__init__(
+ shell=shell, prefilter_manager=prefilter_manager, **kwargs
+ )
+ self.prefilter_manager.register_checker(self)
+
+ def check(self, line_info):
+ """Inspect line_info and return a handler instance or None."""
+ return None
+
+ def __repr__(self):
+ return "<%s(priority=%r, enabled=%r)>" % (
+ self.__class__.__name__, self.priority, self.enabled)
+
+
+class EmacsChecker(PrefilterChecker):
+
priority = Integer(100).tag(config=True)
enabled = Bool(False).tag(config=True)
-
- def check(self, line_info):
- "Emacs ipython-mode tags certain input lines."
- if line_info.line.endswith('# PYTHON-MODE'):
- return self.prefilter_manager.get_handler_by_name('emacs')
- else:
- return None
-
-
-class MacroChecker(PrefilterChecker):
-
+
+ def check(self, line_info):
+ "Emacs ipython-mode tags certain input lines."
+ if line_info.line.endswith('# PYTHON-MODE'):
+ return self.prefilter_manager.get_handler_by_name('emacs')
+ else:
+ return None
+
+
+class MacroChecker(PrefilterChecker):
+
priority = Integer(250).tag(config=True)
-
- def check(self, line_info):
- obj = self.shell.user_ns.get(line_info.ifun)
- if isinstance(obj, Macro):
- return self.prefilter_manager.get_handler_by_name('macro')
- else:
- return None
-
-
-class IPyAutocallChecker(PrefilterChecker):
-
+
+ def check(self, line_info):
+ obj = self.shell.user_ns.get(line_info.ifun)
+ if isinstance(obj, Macro):
+ return self.prefilter_manager.get_handler_by_name('macro')
+ else:
+ return None
+
+
+class IPyAutocallChecker(PrefilterChecker):
+
priority = Integer(300).tag(config=True)
-
- def check(self, line_info):
- "Instances of IPyAutocall in user_ns get autocalled immediately"
- obj = self.shell.user_ns.get(line_info.ifun, None)
- if isinstance(obj, IPyAutocall):
- obj.set_ip(self.shell)
- return self.prefilter_manager.get_handler_by_name('auto')
- else:
- return None
-
-
-class AssignmentChecker(PrefilterChecker):
-
+
+ def check(self, line_info):
+ "Instances of IPyAutocall in user_ns get autocalled immediately"
+ obj = self.shell.user_ns.get(line_info.ifun, None)
+ if isinstance(obj, IPyAutocall):
+ obj.set_ip(self.shell)
+ return self.prefilter_manager.get_handler_by_name('auto')
+ else:
+ return None
+
+
+class AssignmentChecker(PrefilterChecker):
+
priority = Integer(600).tag(config=True)
-
- def check(self, line_info):
- """Check to see if user is assigning to a var for the first time, in
- which case we want to avoid any sort of automagic / autocall games.
-
- This allows users to assign to either alias or magic names true python
- variables (the magic/alias systems always take second seat to true
- python code). E.g. ls='hi', or ls,that=1,2"""
- if line_info.the_rest:
- if line_info.the_rest[0] in '=,':
- return self.prefilter_manager.get_handler_by_name('normal')
- else:
- return None
-
-
-class AutoMagicChecker(PrefilterChecker):
-
+
+ def check(self, line_info):
+ """Check to see if user is assigning to a var for the first time, in
+ which case we want to avoid any sort of automagic / autocall games.
+
+ This allows users to assign to either alias or magic names true python
+ variables (the magic/alias systems always take second seat to true
+ python code). E.g. ls='hi', or ls,that=1,2"""
+ if line_info.the_rest:
+ if line_info.the_rest[0] in '=,':
+ return self.prefilter_manager.get_handler_by_name('normal')
+ else:
+ return None
+
+
+class AutoMagicChecker(PrefilterChecker):
+
priority = Integer(700).tag(config=True)
-
- def check(self, line_info):
- """If the ifun is magic, and automagic is on, run it. Note: normal,
- non-auto magic would already have been triggered via '%' in
- check_esc_chars. This just checks for automagic. Also, before
- triggering the magic handler, make sure that there is nothing in the
- user namespace which could shadow it."""
- if not self.shell.automagic or not self.shell.find_magic(line_info.ifun):
- return None
-
- # We have a likely magic method. Make sure we should actually call it.
- if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials:
- return None
-
- head = line_info.ifun.split('.',1)[0]
- if is_shadowed(head, self.shell):
- return None
-
- return self.prefilter_manager.get_handler_by_name('magic')
-
-
-class PythonOpsChecker(PrefilterChecker):
-
+
+ def check(self, line_info):
+ """If the ifun is magic, and automagic is on, run it. Note: normal,
+ non-auto magic would already have been triggered via '%' in
+ check_esc_chars. This just checks for automagic. Also, before
+ triggering the magic handler, make sure that there is nothing in the
+ user namespace which could shadow it."""
+ if not self.shell.automagic or not self.shell.find_magic(line_info.ifun):
+ return None
+
+ # We have a likely magic method. Make sure we should actually call it.
+ if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials:
+ return None
+
+ head = line_info.ifun.split('.',1)[0]
+ if is_shadowed(head, self.shell):
+ return None
+
+ return self.prefilter_manager.get_handler_by_name('magic')
+
+
+class PythonOpsChecker(PrefilterChecker):
+
priority = Integer(900).tag(config=True)
-
- def check(self, line_info):
- """If the 'rest' of the line begins with a function call or pretty much
- any python operator, we should simply execute the line (regardless of
- whether or not there's a possible autocall expansion). This avoids
- spurious (and very confusing) geattr() accesses."""
- if line_info.the_rest and line_info.the_rest[0] in '!=()<>,+*/%^&|':
- return self.prefilter_manager.get_handler_by_name('normal')
- else:
- return None
-
-
-class AutocallChecker(PrefilterChecker):
-
+
+ def check(self, line_info):
+ """If the 'rest' of the line begins with a function call or pretty much
+ any python operator, we should simply execute the line (regardless of
+ whether or not there's a possible autocall expansion). This avoids
+ spurious (and very confusing) geattr() accesses."""
+ if line_info.the_rest and line_info.the_rest[0] in '!=()<>,+*/%^&|':
+ return self.prefilter_manager.get_handler_by_name('normal')
+ else:
+ return None
+
+
+class AutocallChecker(PrefilterChecker):
+
priority = Integer(1000).tag(config=True)
-
+
function_name_regexp = CRegExp(re_fun_name,
help="RegExp to identify potential function names."
).tag(config=True)
exclude_regexp = CRegExp(re_exclude_auto,
help="RegExp to exclude strings with this start from autocalling."
).tag(config=True)
-
- def check(self, line_info):
- "Check if the initial word/function is callable and autocall is on."
- if not self.shell.autocall:
- return None
-
- oinfo = line_info.ofind(self.shell) # This can mutate state via getattr
- if not oinfo['found']:
- return None
-
- if callable(oinfo['obj']) \
- and (not self.exclude_regexp.match(line_info.the_rest)) \
- and self.function_name_regexp.match(line_info.ifun):
- return self.prefilter_manager.get_handler_by_name('auto')
- else:
- return None
-
-
-#-----------------------------------------------------------------------------
-# Prefilter handlers
-#-----------------------------------------------------------------------------
-
-
-class PrefilterHandler(Configurable):
-
- handler_name = Unicode('normal')
- esc_strings = List([])
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
- prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
-
- def __init__(self, shell=None, prefilter_manager=None, **kwargs):
- super(PrefilterHandler, self).__init__(
- shell=shell, prefilter_manager=prefilter_manager, **kwargs
- )
- self.prefilter_manager.register_handler(
- self.handler_name,
- self,
- self.esc_strings
- )
-
- def handle(self, line_info):
- # print "normal: ", line_info
- """Handle normal input lines. Use as a template for handlers."""
-
- # With autoindent on, we need some way to exit the input loop, and I
- # don't want to force the user to have to backspace all the way to
- # clear the line. The rule will be in this case, that either two
- # lines of pure whitespace in a row, or a line of pure whitespace but
- # of a size different to the indent level, will exit the input loop.
- line = line_info.line
- continue_prompt = line_info.continue_prompt
-
- if (continue_prompt and
- self.shell.autoindent and
- line.isspace() and
- 0 < abs(len(line) - self.shell.indent_current_nsp) <= 2):
- line = ''
-
- return line
-
- def __str__(self):
- return "<%s(name=%s)>" % (self.__class__.__name__, self.handler_name)
-
-
-class MacroHandler(PrefilterHandler):
- handler_name = Unicode("macro")
-
- def handle(self, line_info):
- obj = self.shell.user_ns.get(line_info.ifun)
- pre_space = line_info.pre_whitespace
- line_sep = "\n" + pre_space
- return pre_space + line_sep.join(obj.value.splitlines())
-
-
-class MagicHandler(PrefilterHandler):
-
- handler_name = Unicode('magic')
- esc_strings = List([ESC_MAGIC])
-
- def handle(self, line_info):
- """Execute magic functions."""
- ifun = line_info.ifun
- the_rest = line_info.the_rest
- cmd = '%sget_ipython().magic(%r)' % (line_info.pre_whitespace,
- (ifun + " " + the_rest))
- return cmd
-
-
-class AutoHandler(PrefilterHandler):
-
- handler_name = Unicode('auto')
- esc_strings = List([ESC_PAREN, ESC_QUOTE, ESC_QUOTE2])
-
- def handle(self, line_info):
- """Handle lines which can be auto-executed, quoting if requested."""
- line = line_info.line
- ifun = line_info.ifun
- the_rest = line_info.the_rest
- esc = line_info.esc
- continue_prompt = line_info.continue_prompt
- obj = line_info.ofind(self.shell)['obj']
-
- # This should only be active for single-line input!
- if continue_prompt:
- return line
-
- force_auto = isinstance(obj, IPyAutocall)
-
- # User objects sometimes raise exceptions on attribute access other
- # than AttributeError (we've seen it in the past), so it's safest to be
- # ultra-conservative here and catch all.
- try:
- auto_rewrite = obj.rewrite
- except Exception:
- auto_rewrite = True
-
- if esc == ESC_QUOTE:
- # Auto-quote splitting on whitespace
- newcmd = '%s("%s")' % (ifun,'", "'.join(the_rest.split()) )
- elif esc == ESC_QUOTE2:
- # Auto-quote whole string
- newcmd = '%s("%s")' % (ifun,the_rest)
- elif esc == ESC_PAREN:
- newcmd = '%s(%s)' % (ifun,",".join(the_rest.split()))
- else:
- # Auto-paren.
- if force_auto:
- # Don't rewrite if it is already a call.
- do_rewrite = not the_rest.startswith('(')
- else:
- if not the_rest:
- # We only apply it to argument-less calls if the autocall
- # parameter is set to 2.
- do_rewrite = (self.shell.autocall >= 2)
- elif the_rest.startswith('[') and hasattr(obj, '__getitem__'):
- # Don't autocall in this case: item access for an object
- # which is BOTH callable and implements __getitem__.
- do_rewrite = False
- else:
- do_rewrite = True
-
- # Figure out the rewritten command
- if do_rewrite:
- if the_rest.endswith(';'):
- newcmd = '%s(%s);' % (ifun.rstrip(),the_rest[:-1])
- else:
- newcmd = '%s(%s)' % (ifun.rstrip(), the_rest)
- else:
- normal_handler = self.prefilter_manager.get_handler_by_name('normal')
- return normal_handler.handle(line_info)
-
- # Display the rewritten call
- if auto_rewrite:
- self.shell.auto_rewrite_input(newcmd)
-
- return newcmd
-
-
-class EmacsHandler(PrefilterHandler):
-
- handler_name = Unicode('emacs')
- esc_strings = List([])
-
- def handle(self, line_info):
- """Handle input lines marked by python-mode."""
-
- # Currently, nothing is done. Later more functionality can be added
- # here if needed.
-
- # The input cache shouldn't be updated
- return line_info.line
-
-
-#-----------------------------------------------------------------------------
-# Defaults
-#-----------------------------------------------------------------------------
-
-
-_default_transformers = [
-]
-
-_default_checkers = [
- EmacsChecker,
- MacroChecker,
- IPyAutocallChecker,
- AssignmentChecker,
- AutoMagicChecker,
- PythonOpsChecker,
- AutocallChecker
-]
-
-_default_handlers = [
- PrefilterHandler,
- MacroHandler,
- MagicHandler,
- AutoHandler,
- EmacsHandler
-]
+
+ def check(self, line_info):
+ "Check if the initial word/function is callable and autocall is on."
+ if not self.shell.autocall:
+ return None
+
+ oinfo = line_info.ofind(self.shell) # This can mutate state via getattr
+ if not oinfo['found']:
+ return None
+
+ if callable(oinfo['obj']) \
+ and (not self.exclude_regexp.match(line_info.the_rest)) \
+ and self.function_name_regexp.match(line_info.ifun):
+ return self.prefilter_manager.get_handler_by_name('auto')
+ else:
+ return None
+
+
+#-----------------------------------------------------------------------------
+# Prefilter handlers
+#-----------------------------------------------------------------------------
+
+
+class PrefilterHandler(Configurable):
+
+ handler_name = Unicode('normal')
+ esc_strings = List([])
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
+ prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
+
+ def __init__(self, shell=None, prefilter_manager=None, **kwargs):
+ super(PrefilterHandler, self).__init__(
+ shell=shell, prefilter_manager=prefilter_manager, **kwargs
+ )
+ self.prefilter_manager.register_handler(
+ self.handler_name,
+ self,
+ self.esc_strings
+ )
+
+ def handle(self, line_info):
+ # print "normal: ", line_info
+ """Handle normal input lines. Use as a template for handlers."""
+
+ # With autoindent on, we need some way to exit the input loop, and I
+ # don't want to force the user to have to backspace all the way to
+ # clear the line. The rule will be in this case, that either two
+ # lines of pure whitespace in a row, or a line of pure whitespace but
+ # of a size different to the indent level, will exit the input loop.
+ line = line_info.line
+ continue_prompt = line_info.continue_prompt
+
+ if (continue_prompt and
+ self.shell.autoindent and
+ line.isspace() and
+ 0 < abs(len(line) - self.shell.indent_current_nsp) <= 2):
+ line = ''
+
+ return line
+
+ def __str__(self):
+ return "<%s(name=%s)>" % (self.__class__.__name__, self.handler_name)
+
+
+class MacroHandler(PrefilterHandler):
+ handler_name = Unicode("macro")
+
+ def handle(self, line_info):
+ obj = self.shell.user_ns.get(line_info.ifun)
+ pre_space = line_info.pre_whitespace
+ line_sep = "\n" + pre_space
+ return pre_space + line_sep.join(obj.value.splitlines())
+
+
+class MagicHandler(PrefilterHandler):
+
+ handler_name = Unicode('magic')
+ esc_strings = List([ESC_MAGIC])
+
+ def handle(self, line_info):
+ """Execute magic functions."""
+ ifun = line_info.ifun
+ the_rest = line_info.the_rest
+ cmd = '%sget_ipython().magic(%r)' % (line_info.pre_whitespace,
+ (ifun + " " + the_rest))
+ return cmd
+
+
+class AutoHandler(PrefilterHandler):
+
+ handler_name = Unicode('auto')
+ esc_strings = List([ESC_PAREN, ESC_QUOTE, ESC_QUOTE2])
+
+ def handle(self, line_info):
+ """Handle lines which can be auto-executed, quoting if requested."""
+ line = line_info.line
+ ifun = line_info.ifun
+ the_rest = line_info.the_rest
+ esc = line_info.esc
+ continue_prompt = line_info.continue_prompt
+ obj = line_info.ofind(self.shell)['obj']
+
+ # This should only be active for single-line input!
+ if continue_prompt:
+ return line
+
+ force_auto = isinstance(obj, IPyAutocall)
+
+ # User objects sometimes raise exceptions on attribute access other
+ # than AttributeError (we've seen it in the past), so it's safest to be
+ # ultra-conservative here and catch all.
+ try:
+ auto_rewrite = obj.rewrite
+ except Exception:
+ auto_rewrite = True
+
+ if esc == ESC_QUOTE:
+ # Auto-quote splitting on whitespace
+ newcmd = '%s("%s")' % (ifun,'", "'.join(the_rest.split()) )
+ elif esc == ESC_QUOTE2:
+ # Auto-quote whole string
+ newcmd = '%s("%s")' % (ifun,the_rest)
+ elif esc == ESC_PAREN:
+ newcmd = '%s(%s)' % (ifun,",".join(the_rest.split()))
+ else:
+ # Auto-paren.
+ if force_auto:
+ # Don't rewrite if it is already a call.
+ do_rewrite = not the_rest.startswith('(')
+ else:
+ if not the_rest:
+ # We only apply it to argument-less calls if the autocall
+ # parameter is set to 2.
+ do_rewrite = (self.shell.autocall >= 2)
+ elif the_rest.startswith('[') and hasattr(obj, '__getitem__'):
+ # Don't autocall in this case: item access for an object
+ # which is BOTH callable and implements __getitem__.
+ do_rewrite = False
+ else:
+ do_rewrite = True
+
+ # Figure out the rewritten command
+ if do_rewrite:
+ if the_rest.endswith(';'):
+ newcmd = '%s(%s);' % (ifun.rstrip(),the_rest[:-1])
+ else:
+ newcmd = '%s(%s)' % (ifun.rstrip(), the_rest)
+ else:
+ normal_handler = self.prefilter_manager.get_handler_by_name('normal')
+ return normal_handler.handle(line_info)
+
+ # Display the rewritten call
+ if auto_rewrite:
+ self.shell.auto_rewrite_input(newcmd)
+
+ return newcmd
+
+
+class EmacsHandler(PrefilterHandler):
+
+ handler_name = Unicode('emacs')
+ esc_strings = List([])
+
+ def handle(self, line_info):
+ """Handle input lines marked by python-mode."""
+
+ # Currently, nothing is done. Later more functionality can be added
+ # here if needed.
+
+ # The input cache shouldn't be updated
+ return line_info.line
+
+
+#-----------------------------------------------------------------------------
+# Defaults
+#-----------------------------------------------------------------------------
+
+
+_default_transformers = [
+]
+
+_default_checkers = [
+ EmacsChecker,
+ MacroChecker,
+ IPyAutocallChecker,
+ AssignmentChecker,
+ AutoMagicChecker,
+ PythonOpsChecker,
+ AutocallChecker
+]
+
+_default_handlers = [
+ PrefilterHandler,
+ MacroHandler,
+ MagicHandler,
+ AutoHandler,
+ EmacsHandler
+]
diff --git a/contrib/python/ipython/py2/IPython/core/profile/README_STARTUP b/contrib/python/ipython/py2/IPython/core/profile/README_STARTUP
index 051134cfc3..61d4700042 100644
--- a/contrib/python/ipython/py2/IPython/core/profile/README_STARTUP
+++ b/contrib/python/ipython/py2/IPython/core/profile/README_STARTUP
@@ -1,11 +1,11 @@
-This is the IPython startup directory
-
-.py and .ipy files in this directory will be run *prior* to any code or files specified
-via the exec_lines or exec_files configurables whenever you load this profile.
-
-Files will be run in lexicographical order, so you can control the execution order of files
-with a prefix, e.g.::
-
- 00-first.py
- 50-middle.py
- 99-last.ipy
+This is the IPython startup directory
+
+.py and .ipy files in this directory will be run *prior* to any code or files specified
+via the exec_lines or exec_files configurables whenever you load this profile.
+
+Files will be run in lexicographical order, so you can control the execution order of files
+with a prefix, e.g.::
+
+ 00-first.py
+ 50-middle.py
+ 99-last.ipy
diff --git a/contrib/python/ipython/py2/IPython/core/profileapp.py b/contrib/python/ipython/py2/IPython/core/profileapp.py
index 54e4f3b94c..b8e5fd26ac 100644
--- a/contrib/python/ipython/py2/IPython/core/profileapp.py
+++ b/contrib/python/ipython/py2/IPython/core/profileapp.py
@@ -1,314 +1,314 @@
-# encoding: utf-8
-"""
-An application for managing IPython profiles.
-
-To be invoked as the `ipython profile` subcommand.
-
-Authors:
-
-* Min RK
-
-"""
-from __future__ import print_function
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import os
-
-from traitlets.config.application import Application
-from IPython.core.application import (
- BaseIPythonApplication, base_flags
-)
-from IPython.core.profiledir import ProfileDir
-from IPython.utils.importstring import import_item
-from IPython.paths import get_ipython_dir, get_ipython_package_dir
-from IPython.utils import py3compat
+# encoding: utf-8
+"""
+An application for managing IPython profiles.
+
+To be invoked as the `ipython profile` subcommand.
+
+Authors:
+
+* Min RK
+
+"""
+from __future__ import print_function
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+
+from traitlets.config.application import Application
+from IPython.core.application import (
+ BaseIPythonApplication, base_flags
+)
+from IPython.core.profiledir import ProfileDir
+from IPython.utils.importstring import import_item
+from IPython.paths import get_ipython_dir, get_ipython_package_dir
+from IPython.utils import py3compat
from traitlets import Unicode, Bool, Dict, observe
-
-#-----------------------------------------------------------------------------
-# Constants
-#-----------------------------------------------------------------------------
-
-create_help = """Create an IPython profile by name
-
-Create an ipython profile directory by its name or
-profile directory path. Profile directories contain
-configuration, log and security related files and are named
-using the convention 'profile_<name>'. By default they are
-located in your ipython directory. Once created, you will
-can edit the configuration files in the profile
-directory to configure IPython. Most users will create a
-profile directory by name,
-`ipython profile create myprofile`, which will put the directory
-in `<ipython_dir>/profile_myprofile`.
-"""
-list_help = """List available IPython profiles
-
-List all available profiles, by profile location, that can
-be found in the current working directly or in the ipython
-directory. Profile directories are named using the convention
-'profile_<profile>'.
-"""
-profile_help = """Manage IPython profiles
-
-Profile directories contain
-configuration, log and security related files and are named
-using the convention 'profile_<name>'. By default they are
-located in your ipython directory. You can create profiles
-with `ipython profile create <name>`, or see the profiles you
-already have with `ipython profile list`
-
-To get started configuring IPython, simply do:
-
-$> ipython profile create
-
-and IPython will create the default profile in <ipython_dir>/profile_default,
-where you can edit ipython_config.py to start configuring IPython.
-
-"""
-
-_list_examples = "ipython profile list # list all profiles"
-
-_create_examples = """
-ipython profile create foo # create profile foo w/ default config files
-ipython profile create foo --reset # restage default config files over current
-ipython profile create foo --parallel # also stage parallel config files
-"""
-
-_main_examples = """
-ipython profile create -h # show the help string for the create subcommand
-ipython profile list -h # show the help string for the list subcommand
-
-ipython locate profile foo # print the path to the directory for profile 'foo'
-"""
-
-#-----------------------------------------------------------------------------
-# Profile Application Class (for `ipython profile` subcommand)
-#-----------------------------------------------------------------------------
-
-
-def list_profiles_in(path):
- """list profiles in a given root directory"""
- files = os.listdir(path)
- profiles = []
- for f in files:
- try:
- full_path = os.path.join(path, f)
- except UnicodeError:
- continue
- if os.path.isdir(full_path) and f.startswith('profile_'):
- profiles.append(f.split('_',1)[-1])
- return profiles
-
-
-def list_bundled_profiles():
- """list profiles that are bundled with IPython."""
- path = os.path.join(get_ipython_package_dir(), u'core', u'profile')
- files = os.listdir(path)
- profiles = []
- for profile in files:
- full_path = os.path.join(path, profile)
- if os.path.isdir(full_path) and profile != "__pycache__":
- profiles.append(profile)
- return profiles
-
-
-class ProfileLocate(BaseIPythonApplication):
- description = """print the path to an IPython profile dir"""
-
- def parse_command_line(self, argv=None):
- super(ProfileLocate, self).parse_command_line(argv)
- if self.extra_args:
- self.profile = self.extra_args[0]
-
- def start(self):
- print(self.profile_dir.location)
-
-
-class ProfileList(Application):
- name = u'ipython-profile'
- description = list_help
- examples = _list_examples
-
- aliases = Dict({
- 'ipython-dir' : 'ProfileList.ipython_dir',
- 'log-level' : 'Application.log_level',
- })
- flags = Dict(dict(
- debug = ({'Application' : {'log_level' : 0}},
- "Set Application.log_level to 0, maximizing log output."
- )
- ))
-
+
+#-----------------------------------------------------------------------------
+# Constants
+#-----------------------------------------------------------------------------
+
+create_help = """Create an IPython profile by name
+
+Create an ipython profile directory by its name or
+profile directory path. Profile directories contain
+configuration, log and security related files and are named
+using the convention 'profile_<name>'. By default they are
+located in your ipython directory. Once created, you will
+can edit the configuration files in the profile
+directory to configure IPython. Most users will create a
+profile directory by name,
+`ipython profile create myprofile`, which will put the directory
+in `<ipython_dir>/profile_myprofile`.
+"""
+list_help = """List available IPython profiles
+
+List all available profiles, by profile location, that can
+be found in the current working directly or in the ipython
+directory. Profile directories are named using the convention
+'profile_<profile>'.
+"""
+profile_help = """Manage IPython profiles
+
+Profile directories contain
+configuration, log and security related files and are named
+using the convention 'profile_<name>'. By default they are
+located in your ipython directory. You can create profiles
+with `ipython profile create <name>`, or see the profiles you
+already have with `ipython profile list`
+
+To get started configuring IPython, simply do:
+
+$> ipython profile create
+
+and IPython will create the default profile in <ipython_dir>/profile_default,
+where you can edit ipython_config.py to start configuring IPython.
+
+"""
+
+_list_examples = "ipython profile list # list all profiles"
+
+_create_examples = """
+ipython profile create foo # create profile foo w/ default config files
+ipython profile create foo --reset # restage default config files over current
+ipython profile create foo --parallel # also stage parallel config files
+"""
+
+_main_examples = """
+ipython profile create -h # show the help string for the create subcommand
+ipython profile list -h # show the help string for the list subcommand
+
+ipython locate profile foo # print the path to the directory for profile 'foo'
+"""
+
+#-----------------------------------------------------------------------------
+# Profile Application Class (for `ipython profile` subcommand)
+#-----------------------------------------------------------------------------
+
+
+def list_profiles_in(path):
+ """list profiles in a given root directory"""
+ files = os.listdir(path)
+ profiles = []
+ for f in files:
+ try:
+ full_path = os.path.join(path, f)
+ except UnicodeError:
+ continue
+ if os.path.isdir(full_path) and f.startswith('profile_'):
+ profiles.append(f.split('_',1)[-1])
+ return profiles
+
+
+def list_bundled_profiles():
+ """list profiles that are bundled with IPython."""
+ path = os.path.join(get_ipython_package_dir(), u'core', u'profile')
+ files = os.listdir(path)
+ profiles = []
+ for profile in files:
+ full_path = os.path.join(path, profile)
+ if os.path.isdir(full_path) and profile != "__pycache__":
+ profiles.append(profile)
+ return profiles
+
+
+class ProfileLocate(BaseIPythonApplication):
+ description = """print the path to an IPython profile dir"""
+
+ def parse_command_line(self, argv=None):
+ super(ProfileLocate, self).parse_command_line(argv)
+ if self.extra_args:
+ self.profile = self.extra_args[0]
+
+ def start(self):
+ print(self.profile_dir.location)
+
+
+class ProfileList(Application):
+ name = u'ipython-profile'
+ description = list_help
+ examples = _list_examples
+
+ aliases = Dict({
+ 'ipython-dir' : 'ProfileList.ipython_dir',
+ 'log-level' : 'Application.log_level',
+ })
+ flags = Dict(dict(
+ debug = ({'Application' : {'log_level' : 0}},
+ "Set Application.log_level to 0, maximizing log output."
+ )
+ ))
+
ipython_dir = Unicode(get_ipython_dir(),
- help="""
- The name of the IPython directory. This directory is used for logging
- configuration (through profiles), history storage, etc. The default
- is usually $HOME/.ipython. This options can also be specified through
- the environment variable IPYTHONDIR.
- """
+ help="""
+ The name of the IPython directory. This directory is used for logging
+ configuration (through profiles), history storage, etc. The default
+ is usually $HOME/.ipython. This options can also be specified through
+ the environment variable IPYTHONDIR.
+ """
).tag(config=True)
-
-
- def _print_profiles(self, profiles):
- """print list of profiles, indented."""
- for profile in profiles:
- print(' %s' % profile)
-
- def list_profile_dirs(self):
- profiles = list_bundled_profiles()
- if profiles:
- print()
- print("Available profiles in IPython:")
- self._print_profiles(profiles)
- print()
- print(" The first request for a bundled profile will copy it")
- print(" into your IPython directory (%s)," % self.ipython_dir)
- print(" where you can customize it.")
-
- profiles = list_profiles_in(self.ipython_dir)
- if profiles:
- print()
- print("Available profiles in %s:" % self.ipython_dir)
- self._print_profiles(profiles)
-
- profiles = list_profiles_in(py3compat.getcwd())
- if profiles:
- print()
- print("Available profiles in current directory (%s):" % py3compat.getcwd())
- self._print_profiles(profiles)
-
- print()
- print("To use any of the above profiles, start IPython with:")
- print(" ipython --profile=<name>")
- print()
-
- def start(self):
- self.list_profile_dirs()
-
-
-create_flags = {}
-create_flags.update(base_flags)
-# don't include '--init' flag, which implies running profile create in other apps
-create_flags.pop('init')
-create_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},
- "reset config files in this profile to the defaults.")
-create_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},
- "Include the config files for parallel "
- "computing apps (ipengine, ipcontroller, etc.)")
-
-
-class ProfileCreate(BaseIPythonApplication):
- name = u'ipython-profile'
- description = create_help
- examples = _create_examples
+
+
+ def _print_profiles(self, profiles):
+ """print list of profiles, indented."""
+ for profile in profiles:
+ print(' %s' % profile)
+
+ def list_profile_dirs(self):
+ profiles = list_bundled_profiles()
+ if profiles:
+ print()
+ print("Available profiles in IPython:")
+ self._print_profiles(profiles)
+ print()
+ print(" The first request for a bundled profile will copy it")
+ print(" into your IPython directory (%s)," % self.ipython_dir)
+ print(" where you can customize it.")
+
+ profiles = list_profiles_in(self.ipython_dir)
+ if profiles:
+ print()
+ print("Available profiles in %s:" % self.ipython_dir)
+ self._print_profiles(profiles)
+
+ profiles = list_profiles_in(py3compat.getcwd())
+ if profiles:
+ print()
+ print("Available profiles in current directory (%s):" % py3compat.getcwd())
+ self._print_profiles(profiles)
+
+ print()
+ print("To use any of the above profiles, start IPython with:")
+ print(" ipython --profile=<name>")
+ print()
+
+ def start(self):
+ self.list_profile_dirs()
+
+
+create_flags = {}
+create_flags.update(base_flags)
+# don't include '--init' flag, which implies running profile create in other apps
+create_flags.pop('init')
+create_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},
+ "reset config files in this profile to the defaults.")
+create_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},
+ "Include the config files for parallel "
+ "computing apps (ipengine, ipcontroller, etc.)")
+
+
+class ProfileCreate(BaseIPythonApplication):
+ name = u'ipython-profile'
+ description = create_help
+ examples = _create_examples
auto_create = Bool(True)
- def _log_format_default(self):
- return "[%(name)s] %(message)s"
-
- def _copy_config_files_default(self):
- return True
-
+ def _log_format_default(self):
+ return "[%(name)s] %(message)s"
+
+ def _copy_config_files_default(self):
+ return True
+
parallel = Bool(False,
help="whether to include parallel computing config files"
).tag(config=True)
@observe('parallel')
def _parallel_changed(self, change):
- parallel_files = [ 'ipcontroller_config.py',
- 'ipengine_config.py',
- 'ipcluster_config.py'
- ]
+ parallel_files = [ 'ipcontroller_config.py',
+ 'ipengine_config.py',
+ 'ipcluster_config.py'
+ ]
if change['new']:
- for cf in parallel_files:
- self.config_files.append(cf)
- else:
- for cf in parallel_files:
- if cf in self.config_files:
- self.config_files.remove(cf)
-
- def parse_command_line(self, argv):
- super(ProfileCreate, self).parse_command_line(argv)
- # accept positional arg as profile name
- if self.extra_args:
- self.profile = self.extra_args[0]
-
- flags = Dict(create_flags)
-
- classes = [ProfileDir]
-
- def _import_app(self, app_path):
- """import an app class"""
- app = None
- name = app_path.rsplit('.', 1)[-1]
- try:
- app = import_item(app_path)
- except ImportError:
- self.log.info("Couldn't import %s, config file will be excluded", name)
- except Exception:
- self.log.warning('Unexpected error importing %s', name, exc_info=True)
- return app
-
- def init_config_files(self):
- super(ProfileCreate, self).init_config_files()
- # use local imports, since these classes may import from here
- from IPython.terminal.ipapp import TerminalIPythonApp
- apps = [TerminalIPythonApp]
- for app_path in (
- 'ipykernel.kernelapp.IPKernelApp',
- ):
- app = self._import_app(app_path)
- if app is not None:
- apps.append(app)
- if self.parallel:
- from ipyparallel.apps.ipcontrollerapp import IPControllerApp
- from ipyparallel.apps.ipengineapp import IPEngineApp
- from ipyparallel.apps.ipclusterapp import IPClusterStart
- apps.extend([
- IPControllerApp,
- IPEngineApp,
- IPClusterStart,
- ])
- for App in apps:
- app = App()
- app.config.update(self.config)
- app.log = self.log
- app.overwrite = self.overwrite
- app.copy_config_files=True
- app.ipython_dir=self.ipython_dir
- app.profile_dir=self.profile_dir
- app.init_config_files()
-
- def stage_default_config_file(self):
- pass
-
-
-class ProfileApp(Application):
- name = u'ipython profile'
- description = profile_help
- examples = _main_examples
-
- subcommands = Dict(dict(
- create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
- list = (ProfileList, ProfileList.description.splitlines()[0]),
- locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
- ))
-
- def start(self):
- if self.subapp is None:
- print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
- print()
- self.print_description()
- self.print_subcommands()
- self.exit(1)
- else:
- return self.subapp.start()
+ for cf in parallel_files:
+ self.config_files.append(cf)
+ else:
+ for cf in parallel_files:
+ if cf in self.config_files:
+ self.config_files.remove(cf)
+
+ def parse_command_line(self, argv):
+ super(ProfileCreate, self).parse_command_line(argv)
+ # accept positional arg as profile name
+ if self.extra_args:
+ self.profile = self.extra_args[0]
+
+ flags = Dict(create_flags)
+
+ classes = [ProfileDir]
+
+ def _import_app(self, app_path):
+ """import an app class"""
+ app = None
+ name = app_path.rsplit('.', 1)[-1]
+ try:
+ app = import_item(app_path)
+ except ImportError:
+ self.log.info("Couldn't import %s, config file will be excluded", name)
+ except Exception:
+ self.log.warning('Unexpected error importing %s', name, exc_info=True)
+ return app
+
+ def init_config_files(self):
+ super(ProfileCreate, self).init_config_files()
+ # use local imports, since these classes may import from here
+ from IPython.terminal.ipapp import TerminalIPythonApp
+ apps = [TerminalIPythonApp]
+ for app_path in (
+ 'ipykernel.kernelapp.IPKernelApp',
+ ):
+ app = self._import_app(app_path)
+ if app is not None:
+ apps.append(app)
+ if self.parallel:
+ from ipyparallel.apps.ipcontrollerapp import IPControllerApp
+ from ipyparallel.apps.ipengineapp import IPEngineApp
+ from ipyparallel.apps.ipclusterapp import IPClusterStart
+ apps.extend([
+ IPControllerApp,
+ IPEngineApp,
+ IPClusterStart,
+ ])
+ for App in apps:
+ app = App()
+ app.config.update(self.config)
+ app.log = self.log
+ app.overwrite = self.overwrite
+ app.copy_config_files=True
+ app.ipython_dir=self.ipython_dir
+ app.profile_dir=self.profile_dir
+ app.init_config_files()
+
+ def stage_default_config_file(self):
+ pass
+
+
+class ProfileApp(Application):
+ name = u'ipython profile'
+ description = profile_help
+ examples = _main_examples
+
+ subcommands = Dict(dict(
+ create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
+ list = (ProfileList, ProfileList.description.splitlines()[0]),
+ locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
+ ))
+
+ def start(self):
+ if self.subapp is None:
+ print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
+ print()
+ self.print_description()
+ self.print_subcommands()
+ self.exit(1)
+ else:
+ return self.subapp.start()
diff --git a/contrib/python/ipython/py2/IPython/core/profiledir.py b/contrib/python/ipython/py2/IPython/core/profiledir.py
index 4e54f8c68c..b777f13da0 100644
--- a/contrib/python/ipython/py2/IPython/core/profiledir.py
+++ b/contrib/python/ipython/py2/IPython/core/profiledir.py
@@ -1,222 +1,222 @@
-# encoding: utf-8
-"""An object for managing IPython profile directories."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import os
-import shutil
-import errno
-
-from traitlets.config.configurable import LoggingConfigurable
-from IPython.paths import get_ipython_package_dir
-from IPython.utils.path import expand_path, ensure_dir_exists
-from IPython.utils import py3compat
+# encoding: utf-8
+"""An object for managing IPython profile directories."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import shutil
+import errno
+
+from traitlets.config.configurable import LoggingConfigurable
+from IPython.paths import get_ipython_package_dir
+from IPython.utils.path import expand_path, ensure_dir_exists
+from IPython.utils import py3compat
from traitlets import Unicode, Bool, observe
-
-#-----------------------------------------------------------------------------
-# Module errors
-#-----------------------------------------------------------------------------
-
-class ProfileDirError(Exception):
- pass
-
-
-#-----------------------------------------------------------------------------
-# Class for managing profile directories
-#-----------------------------------------------------------------------------
-
-class ProfileDir(LoggingConfigurable):
- """An object to manage the profile directory and its resources.
-
- The profile directory is used by all IPython applications, to manage
- configuration, logging and security.
-
- This object knows how to find, create and manage these directories. This
- should be used by any code that wants to handle profiles.
- """
-
- security_dir_name = Unicode('security')
- log_dir_name = Unicode('log')
- startup_dir_name = Unicode('startup')
- pid_dir_name = Unicode('pid')
- static_dir_name = Unicode('static')
- security_dir = Unicode(u'')
- log_dir = Unicode(u'')
- startup_dir = Unicode(u'')
- pid_dir = Unicode(u'')
- static_dir = Unicode(u'')
-
+
+#-----------------------------------------------------------------------------
+# Module errors
+#-----------------------------------------------------------------------------
+
+class ProfileDirError(Exception):
+ pass
+
+
+#-----------------------------------------------------------------------------
+# Class for managing profile directories
+#-----------------------------------------------------------------------------
+
+class ProfileDir(LoggingConfigurable):
+ """An object to manage the profile directory and its resources.
+
+ The profile directory is used by all IPython applications, to manage
+ configuration, logging and security.
+
+ This object knows how to find, create and manage these directories. This
+ should be used by any code that wants to handle profiles.
+ """
+
+ security_dir_name = Unicode('security')
+ log_dir_name = Unicode('log')
+ startup_dir_name = Unicode('startup')
+ pid_dir_name = Unicode('pid')
+ static_dir_name = Unicode('static')
+ security_dir = Unicode(u'')
+ log_dir = Unicode(u'')
+ startup_dir = Unicode(u'')
+ pid_dir = Unicode(u'')
+ static_dir = Unicode(u'')
+
location = Unicode(u'',
- help="""Set the profile location directly. This overrides the logic used by the
- `profile` option.""",
+ help="""Set the profile location directly. This overrides the logic used by the
+ `profile` option.""",
).tag(config=True)
-
- _location_isset = Bool(False) # flag for detecting multiply set location
+
+ _location_isset = Bool(False) # flag for detecting multiply set location
@observe('location')
def _location_changed(self, change):
- if self._location_isset:
- raise RuntimeError("Cannot set profile location more than once.")
- self._location_isset = True
+ if self._location_isset:
+ raise RuntimeError("Cannot set profile location more than once.")
+ self._location_isset = True
new = change['new']
- ensure_dir_exists(new)
-
- # ensure config files exist:
- self.security_dir = os.path.join(new, self.security_dir_name)
- self.log_dir = os.path.join(new, self.log_dir_name)
- self.startup_dir = os.path.join(new, self.startup_dir_name)
- self.pid_dir = os.path.join(new, self.pid_dir_name)
- self.static_dir = os.path.join(new, self.static_dir_name)
- self.check_dirs()
+ ensure_dir_exists(new)
+
+ # ensure config files exist:
+ self.security_dir = os.path.join(new, self.security_dir_name)
+ self.log_dir = os.path.join(new, self.log_dir_name)
+ self.startup_dir = os.path.join(new, self.startup_dir_name)
+ self.pid_dir = os.path.join(new, self.pid_dir_name)
+ self.static_dir = os.path.join(new, self.static_dir_name)
+ self.check_dirs()
- def _mkdir(self, path, mode=None):
- """ensure a directory exists at a given path
-
- This is a version of os.mkdir, with the following differences:
-
- - returns True if it created the directory, False otherwise
- - ignores EEXIST, protecting against race conditions where
- the dir may have been created in between the check and
- the creation
- - sets permissions if requested and the dir already exists
- """
- if os.path.exists(path):
- if mode and os.stat(path).st_mode != mode:
- try:
- os.chmod(path, mode)
- except OSError:
- self.log.warning(
- "Could not set permissions on %s",
- path
- )
- return False
- try:
- if mode:
- os.mkdir(path, mode)
- else:
- os.mkdir(path)
- except OSError as e:
- if e.errno == errno.EEXIST:
- return False
- else:
- raise
-
- return True
+ def _mkdir(self, path, mode=None):
+ """ensure a directory exists at a given path
+
+ This is a version of os.mkdir, with the following differences:
+
+ - returns True if it created the directory, False otherwise
+ - ignores EEXIST, protecting against race conditions where
+ the dir may have been created in between the check and
+ the creation
+ - sets permissions if requested and the dir already exists
+ """
+ if os.path.exists(path):
+ if mode and os.stat(path).st_mode != mode:
+ try:
+ os.chmod(path, mode)
+ except OSError:
+ self.log.warning(
+ "Could not set permissions on %s",
+ path
+ )
+ return False
+ try:
+ if mode:
+ os.mkdir(path, mode)
+ else:
+ os.mkdir(path)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ return False
+ else:
+ raise
+
+ return True
@observe('log_dir')
def check_log_dir(self, change=None):
- self._mkdir(self.log_dir)
+ self._mkdir(self.log_dir)
@observe('startup_dir')
def check_startup_dir(self, change=None):
- self._mkdir(self.startup_dir)
-
- readme = os.path.join(self.startup_dir, 'README')
-
+ self._mkdir(self.startup_dir)
+
+ readme = os.path.join(self.startup_dir, 'README')
+
if not os.path.exists(readme):
import pkgutil
with open(readme, 'wb') as f:
f.write(pkgutil.get_data(__name__, 'profile/README_STARTUP'))
-
+
@observe('security_dir')
def check_security_dir(self, change=None):
- self._mkdir(self.security_dir, 0o40700)
-
+ self._mkdir(self.security_dir, 0o40700)
+
@observe('pid_dir')
def check_pid_dir(self, change=None):
- self._mkdir(self.pid_dir, 0o40700)
-
- def check_dirs(self):
- self.check_security_dir()
- self.check_log_dir()
- self.check_pid_dir()
- self.check_startup_dir()
-
- def copy_config_file(self, config_file, path=None, overwrite=False):
- """Copy a default config file into the active profile directory.
-
- Default configuration files are kept in :mod:`IPython.core.profile`.
- This function moves these from that location to the working profile
- directory.
- """
- dst = os.path.join(self.location, config_file)
- if os.path.isfile(dst) and not overwrite:
- return False
- if path is None:
- path = os.path.join(get_ipython_package_dir(), u'core', u'profile', u'default')
- src = os.path.join(path, config_file)
- shutil.copy(src, dst)
- return True
-
- @classmethod
- def create_profile_dir(cls, profile_dir, config=None):
- """Create a new profile directory given a full path.
-
- Parameters
- ----------
- profile_dir : str
- The full path to the profile directory. If it does exist, it will
- be used. If not, it will be created.
- """
- return cls(location=profile_dir, config=config)
-
- @classmethod
- def create_profile_dir_by_name(cls, path, name=u'default', config=None):
- """Create a profile dir by profile name and path.
-
- Parameters
- ----------
- path : unicode
- The path (directory) to put the profile directory in.
- name : unicode
- The name of the profile. The name of the profile directory will
- be "profile_<profile>".
- """
- if not os.path.isdir(path):
- raise ProfileDirError('Directory not found: %s' % path)
- profile_dir = os.path.join(path, u'profile_' + name)
- return cls(location=profile_dir, config=config)
-
- @classmethod
- def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
- """Find an existing profile dir by profile name, return its ProfileDir.
-
- This searches through a sequence of paths for a profile dir. If it
- is not found, a :class:`ProfileDirError` exception will be raised.
-
- The search path algorithm is:
- 1. ``py3compat.getcwd()``
- 2. ``ipython_dir``
-
- Parameters
- ----------
- ipython_dir : unicode or str
- The IPython directory to use.
- name : unicode or str
- The name of the profile. The name of the profile directory
- will be "profile_<profile>".
- """
- dirname = u'profile_' + name
- paths = [py3compat.getcwd(), ipython_dir]
- for p in paths:
- profile_dir = os.path.join(p, dirname)
- if os.path.isdir(profile_dir):
- return cls(location=profile_dir, config=config)
- else:
- raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
-
- @classmethod
- def find_profile_dir(cls, profile_dir, config=None):
- """Find/create a profile dir and return its ProfileDir.
-
- This will create the profile directory if it doesn't exist.
-
- Parameters
- ----------
- profile_dir : unicode or str
- The path of the profile directory.
- """
- profile_dir = expand_path(profile_dir)
- if not os.path.isdir(profile_dir):
- raise ProfileDirError('Profile directory not found: %s' % profile_dir)
- return cls(location=profile_dir, config=config)
+ self._mkdir(self.pid_dir, 0o40700)
+
+ def check_dirs(self):
+ self.check_security_dir()
+ self.check_log_dir()
+ self.check_pid_dir()
+ self.check_startup_dir()
+
+ def copy_config_file(self, config_file, path=None, overwrite=False):
+ """Copy a default config file into the active profile directory.
+
+ Default configuration files are kept in :mod:`IPython.core.profile`.
+ This function moves these from that location to the working profile
+ directory.
+ """
+ dst = os.path.join(self.location, config_file)
+ if os.path.isfile(dst) and not overwrite:
+ return False
+ if path is None:
+ path = os.path.join(get_ipython_package_dir(), u'core', u'profile', u'default')
+ src = os.path.join(path, config_file)
+ shutil.copy(src, dst)
+ return True
+
+ @classmethod
+ def create_profile_dir(cls, profile_dir, config=None):
+ """Create a new profile directory given a full path.
+
+ Parameters
+ ----------
+ profile_dir : str
+ The full path to the profile directory. If it does exist, it will
+ be used. If not, it will be created.
+ """
+ return cls(location=profile_dir, config=config)
+
+ @classmethod
+ def create_profile_dir_by_name(cls, path, name=u'default', config=None):
+ """Create a profile dir by profile name and path.
+
+ Parameters
+ ----------
+ path : unicode
+ The path (directory) to put the profile directory in.
+ name : unicode
+ The name of the profile. The name of the profile directory will
+ be "profile_<profile>".
+ """
+ if not os.path.isdir(path):
+ raise ProfileDirError('Directory not found: %s' % path)
+ profile_dir = os.path.join(path, u'profile_' + name)
+ return cls(location=profile_dir, config=config)
+
+ @classmethod
+ def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
+ """Find an existing profile dir by profile name, return its ProfileDir.
+
+ This searches through a sequence of paths for a profile dir. If it
+ is not found, a :class:`ProfileDirError` exception will be raised.
+
+ The search path algorithm is:
+ 1. ``py3compat.getcwd()``
+ 2. ``ipython_dir``
+
+ Parameters
+ ----------
+ ipython_dir : unicode or str
+ The IPython directory to use.
+ name : unicode or str
+ The name of the profile. The name of the profile directory
+ will be "profile_<profile>".
+ """
+ dirname = u'profile_' + name
+ paths = [py3compat.getcwd(), ipython_dir]
+ for p in paths:
+ profile_dir = os.path.join(p, dirname)
+ if os.path.isdir(profile_dir):
+ return cls(location=profile_dir, config=config)
+ else:
+ raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
+
+ @classmethod
+ def find_profile_dir(cls, profile_dir, config=None):
+ """Find/create a profile dir and return its ProfileDir.
+
+ This will create the profile directory if it doesn't exist.
+
+ Parameters
+ ----------
+ profile_dir : unicode or str
+ The path of the profile directory.
+ """
+ profile_dir = expand_path(profile_dir)
+ if not os.path.isdir(profile_dir):
+ raise ProfileDirError('Profile directory not found: %s' % profile_dir)
+ return cls(location=profile_dir, config=config)
diff --git a/contrib/python/ipython/py2/IPython/core/prompts.py b/contrib/python/ipython/py2/IPython/core/prompts.py
index 126e84fdfd..7802bc5363 100644
--- a/contrib/python/ipython/py2/IPython/core/prompts.py
+++ b/contrib/python/ipython/py2/IPython/core/prompts.py
@@ -1,26 +1,26 @@
-# -*- coding: utf-8 -*-
+# -*- coding: utf-8 -*-
"""Being removed
-"""
-
+"""
+
from IPython.utils import py3compat
-
-class LazyEvaluate(object):
- """This is used for formatting strings with values that need to be updated
- at that time, such as the current time or working directory."""
- def __init__(self, func, *args, **kwargs):
- self.func = func
- self.args = args
- self.kwargs = kwargs
-
- def __call__(self, **kwargs):
- self.kwargs.update(kwargs)
- return self.func(*self.args, **self.kwargs)
-
- def __str__(self):
- return str(self())
-
- def __unicode__(self):
- return py3compat.unicode_type(self())
-
- def __format__(self, format_spec):
- return format(self(), format_spec)
+
+class LazyEvaluate(object):
+ """This is used for formatting strings with values that need to be updated
+ at that time, such as the current time or working directory."""
+ def __init__(self, func, *args, **kwargs):
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+
+ def __call__(self, **kwargs):
+ self.kwargs.update(kwargs)
+ return self.func(*self.args, **self.kwargs)
+
+ def __str__(self):
+ return str(self())
+
+ def __unicode__(self):
+ return py3compat.unicode_type(self())
+
+ def __format__(self, format_spec):
+ return format(self(), format_spec)
diff --git a/contrib/python/ipython/py2/IPython/core/pylabtools.py b/contrib/python/ipython/py2/IPython/core/pylabtools.py
index 79072b41a9..a1932d8c48 100644
--- a/contrib/python/ipython/py2/IPython/core/pylabtools.py
+++ b/contrib/python/ipython/py2/IPython/core/pylabtools.py
@@ -1,28 +1,28 @@
-# -*- coding: utf-8 -*-
-"""Pylab (matplotlib) support utilities."""
-from __future__ import print_function
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from io import BytesIO
-
-from IPython.core.display import _pngxy
-from IPython.utils.decorators import flag_calls
-from IPython.utils import py3compat
-
-# If user specifies a GUI, that dictates the backend, otherwise we read the
-# user's mpl default from the mpl rc structure
-backends = {'tk': 'TkAgg',
- 'gtk': 'GTKAgg',
- 'gtk3': 'GTK3Agg',
- 'wx': 'WXAgg',
- 'qt4': 'Qt4Agg',
- 'qt5': 'Qt5Agg',
+# -*- coding: utf-8 -*-
+"""Pylab (matplotlib) support utilities."""
+from __future__ import print_function
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from io import BytesIO
+
+from IPython.core.display import _pngxy
+from IPython.utils.decorators import flag_calls
+from IPython.utils import py3compat
+
+# If user specifies a GUI, that dictates the backend, otherwise we read the
+# user's mpl default from the mpl rc structure
+backends = {'tk': 'TkAgg',
+ 'gtk': 'GTKAgg',
+ 'gtk3': 'GTK3Agg',
+ 'wx': 'WXAgg',
+ 'qt4': 'Qt4Agg',
+ 'qt5': 'Qt5Agg',
'qt': 'Qt5Agg',
- 'osx': 'MacOSX',
- 'nbagg': 'nbAgg',
- 'notebook': 'nbAgg',
+ 'osx': 'MacOSX',
+ 'nbagg': 'nbAgg',
+ 'notebook': 'nbAgg',
'agg': 'agg',
'svg': 'svg',
'pdf': 'pdf',
@@ -31,20 +31,20 @@ backends = {'tk': 'TkAgg',
'ipympl': 'module://ipympl.backend_nbagg',
'widget': 'module://ipympl.backend_nbagg',
}
-
-# We also need a reverse backends2guis mapping that will properly choose which
-# GUI support to activate based on the desired matplotlib backend. For the
-# most part it's just a reverse of the above dict, but we also need to add a
-# few others that map to the same GUI manually:
-backend2gui = dict(zip(backends.values(), backends.keys()))
-# Our tests expect backend2gui to just return 'qt'
-backend2gui['Qt4Agg'] = 'qt'
-# In the reverse mapping, there are a few extra valid matplotlib backends that
-# map to the same GUI support
-backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
-backend2gui['GTK3Cairo'] = 'gtk3'
-backend2gui['WX'] = 'wx'
-backend2gui['CocoaAgg'] = 'osx'
+
+# We also need a reverse backends2guis mapping that will properly choose which
+# GUI support to activate based on the desired matplotlib backend. For the
+# most part it's just a reverse of the above dict, but we also need to add a
+# few others that map to the same GUI manually:
+backend2gui = dict(zip(backends.values(), backends.keys()))
+# Our tests expect backend2gui to just return 'qt'
+backend2gui['Qt4Agg'] = 'qt'
+# In the reverse mapping, there are a few extra valid matplotlib backends that
+# map to the same GUI support
+backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
+backend2gui['GTK3Cairo'] = 'gtk3'
+backend2gui['WX'] = 'wx'
+backend2gui['CocoaAgg'] = 'osx'
# And some backends that don't need GUI integration
del backend2gui['nbAgg']
del backend2gui['agg']
@@ -52,138 +52,138 @@ del backend2gui['svg']
del backend2gui['pdf']
del backend2gui['ps']
del backend2gui['module://ipykernel.pylab.backend_inline']
-
-#-----------------------------------------------------------------------------
-# Matplotlib utilities
-#-----------------------------------------------------------------------------
-
-
-def getfigs(*fig_nums):
- """Get a list of matplotlib figures by figure numbers.
-
- If no arguments are given, all available figures are returned. If the
- argument list contains references to invalid figures, a warning is printed
- but the function continues pasting further figures.
-
- Parameters
- ----------
- figs : tuple
- A tuple of ints giving the figure numbers of the figures to return.
- """
- from matplotlib._pylab_helpers import Gcf
- if not fig_nums:
- fig_managers = Gcf.get_all_fig_managers()
- return [fm.canvas.figure for fm in fig_managers]
- else:
- figs = []
- for num in fig_nums:
- f = Gcf.figs.get(num)
- if f is None:
- print('Warning: figure %s not available.' % num)
- else:
- figs.append(f.canvas.figure)
- return figs
-
-
-def figsize(sizex, sizey):
- """Set the default figure size to be [sizex, sizey].
-
- This is just an easy to remember, convenience wrapper that sets::
-
- matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
- """
- import matplotlib
- matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
-
-
-def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
- """Print a figure to an image, and return the resulting file data
-
- Returned data will be bytes unless ``fmt='svg'``,
- in which case it will be unicode.
-
- Any keyword args are passed to fig.canvas.print_figure,
- such as ``quality`` or ``bbox_inches``.
- """
- from matplotlib import rcParams
- # When there's an empty figure, we shouldn't return anything, otherwise we
- # get big blank areas in the qt console.
- if not fig.axes and not fig.lines:
- return
-
+
+#-----------------------------------------------------------------------------
+# Matplotlib utilities
+#-----------------------------------------------------------------------------
+
+
+def getfigs(*fig_nums):
+ """Get a list of matplotlib figures by figure numbers.
+
+ If no arguments are given, all available figures are returned. If the
+ argument list contains references to invalid figures, a warning is printed
+ but the function continues pasting further figures.
+
+ Parameters
+ ----------
+ figs : tuple
+ A tuple of ints giving the figure numbers of the figures to return.
+ """
+ from matplotlib._pylab_helpers import Gcf
+ if not fig_nums:
+ fig_managers = Gcf.get_all_fig_managers()
+ return [fm.canvas.figure for fm in fig_managers]
+ else:
+ figs = []
+ for num in fig_nums:
+ f = Gcf.figs.get(num)
+ if f is None:
+ print('Warning: figure %s not available.' % num)
+ else:
+ figs.append(f.canvas.figure)
+ return figs
+
+
+def figsize(sizex, sizey):
+ """Set the default figure size to be [sizex, sizey].
+
+ This is just an easy to remember, convenience wrapper that sets::
+
+ matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
+ """
+ import matplotlib
+ matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
+
+
+def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
+ """Print a figure to an image, and return the resulting file data
+
+ Returned data will be bytes unless ``fmt='svg'``,
+ in which case it will be unicode.
+
+ Any keyword args are passed to fig.canvas.print_figure,
+ such as ``quality`` or ``bbox_inches``.
+ """
+ from matplotlib import rcParams
+ # When there's an empty figure, we shouldn't return anything, otherwise we
+ # get big blank areas in the qt console.
+ if not fig.axes and not fig.lines:
+ return
+
dpi = fig.dpi
- if fmt == 'retina':
- dpi = dpi * 2
- fmt = 'png'
-
- # build keyword args
- kw = dict(
- format=fmt,
- facecolor=fig.get_facecolor(),
- edgecolor=fig.get_edgecolor(),
- dpi=dpi,
- bbox_inches=bbox_inches,
- )
- # **kwargs get higher priority
- kw.update(kwargs)
-
- bytes_io = BytesIO()
- fig.canvas.print_figure(bytes_io, **kw)
- data = bytes_io.getvalue()
- if fmt == 'svg':
- data = data.decode('utf-8')
- return data
-
-def retina_figure(fig, **kwargs):
- """format a figure as a pixel-doubled (retina) PNG"""
- pngdata = print_figure(fig, fmt='retina', **kwargs)
- # Make sure that retina_figure acts just like print_figure and returns
- # None when the figure is empty.
- if pngdata is None:
- return
- w, h = _pngxy(pngdata)
- metadata = dict(width=w//2, height=h//2)
- return pngdata, metadata
-
-# We need a little factory function here to create the closure where
-# safe_execfile can live.
-def mpl_runner(safe_execfile):
- """Factory to return a matplotlib-enabled runner for %run.
-
- Parameters
- ----------
- safe_execfile : function
- This must be a function with the same interface as the
- :meth:`safe_execfile` method of IPython.
-
- Returns
- -------
- A function suitable for use as the ``runner`` argument of the %run magic
- function.
- """
-
- def mpl_execfile(fname,*where,**kw):
- """matplotlib-aware wrapper around safe_execfile.
-
- Its interface is identical to that of the :func:`execfile` builtin.
-
- This is ultimately a call to execfile(), but wrapped in safeties to
- properly handle interactive rendering."""
-
- import matplotlib
+ if fmt == 'retina':
+ dpi = dpi * 2
+ fmt = 'png'
+
+ # build keyword args
+ kw = dict(
+ format=fmt,
+ facecolor=fig.get_facecolor(),
+ edgecolor=fig.get_edgecolor(),
+ dpi=dpi,
+ bbox_inches=bbox_inches,
+ )
+ # **kwargs get higher priority
+ kw.update(kwargs)
+
+ bytes_io = BytesIO()
+ fig.canvas.print_figure(bytes_io, **kw)
+ data = bytes_io.getvalue()
+ if fmt == 'svg':
+ data = data.decode('utf-8')
+ return data
+
+def retina_figure(fig, **kwargs):
+ """format a figure as a pixel-doubled (retina) PNG"""
+ pngdata = print_figure(fig, fmt='retina', **kwargs)
+ # Make sure that retina_figure acts just like print_figure and returns
+ # None when the figure is empty.
+ if pngdata is None:
+ return
+ w, h = _pngxy(pngdata)
+ metadata = dict(width=w//2, height=h//2)
+ return pngdata, metadata
+
+# We need a little factory function here to create the closure where
+# safe_execfile can live.
+def mpl_runner(safe_execfile):
+ """Factory to return a matplotlib-enabled runner for %run.
+
+ Parameters
+ ----------
+ safe_execfile : function
+ This must be a function with the same interface as the
+ :meth:`safe_execfile` method of IPython.
+
+ Returns
+ -------
+ A function suitable for use as the ``runner`` argument of the %run magic
+ function.
+ """
+
+ def mpl_execfile(fname,*where,**kw):
+ """matplotlib-aware wrapper around safe_execfile.
+
+ Its interface is identical to that of the :func:`execfile` builtin.
+
+ This is ultimately a call to execfile(), but wrapped in safeties to
+ properly handle interactive rendering."""
+
+ import matplotlib
import matplotlib.pyplot as plt
-
- #print '*** Matplotlib runner ***' # dbg
- # turn off rendering until end of script
- is_interactive = matplotlib.rcParams['interactive']
- matplotlib.interactive(False)
- safe_execfile(fname,*where,**kw)
- matplotlib.interactive(is_interactive)
- # make rendering call now, if the user tried to do it
+
+ #print '*** Matplotlib runner ***' # dbg
+ # turn off rendering until end of script
+ is_interactive = matplotlib.rcParams['interactive']
+ matplotlib.interactive(False)
+ safe_execfile(fname,*where,**kw)
+ matplotlib.interactive(is_interactive)
+ # make rendering call now, if the user tried to do it
if plt.draw_if_interactive.called:
plt.draw()
plt.draw_if_interactive.called = False
-
+
# re-draw everything that is stale
try:
da = plt.draw_all
@@ -192,225 +192,225 @@ def mpl_runner(safe_execfile):
else:
da()
- return mpl_execfile
-
-
-def _reshow_nbagg_figure(fig):
- """reshow an nbagg figure"""
- try:
- reshow = fig.canvas.manager.reshow
- except AttributeError:
- raise NotImplementedError()
- else:
- reshow()
-
-
-def select_figure_formats(shell, formats, **kwargs):
- """Select figure formats for the inline backend.
-
- Parameters
- ==========
- shell : InteractiveShell
- The main IPython instance.
- formats : str or set
- One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
- **kwargs : any
- Extra keyword arguments to be passed to fig.canvas.print_figure.
- """
- import matplotlib
- from matplotlib.figure import Figure
-
- svg_formatter = shell.display_formatter.formatters['image/svg+xml']
- png_formatter = shell.display_formatter.formatters['image/png']
- jpg_formatter = shell.display_formatter.formatters['image/jpeg']
- pdf_formatter = shell.display_formatter.formatters['application/pdf']
-
- if isinstance(formats, py3compat.string_types):
- formats = {formats}
- # cast in case of list / tuple
- formats = set(formats)
-
- [ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
+ return mpl_execfile
+
+
+def _reshow_nbagg_figure(fig):
+ """reshow an nbagg figure"""
+ try:
+ reshow = fig.canvas.manager.reshow
+ except AttributeError:
+ raise NotImplementedError()
+ else:
+ reshow()
+
+
+def select_figure_formats(shell, formats, **kwargs):
+ """Select figure formats for the inline backend.
+
+ Parameters
+ ==========
+ shell : InteractiveShell
+ The main IPython instance.
+ formats : str or set
+ One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
+ **kwargs : any
+ Extra keyword arguments to be passed to fig.canvas.print_figure.
+ """
+ import matplotlib
+ from matplotlib.figure import Figure
+
+ svg_formatter = shell.display_formatter.formatters['image/svg+xml']
+ png_formatter = shell.display_formatter.formatters['image/png']
+ jpg_formatter = shell.display_formatter.formatters['image/jpeg']
+ pdf_formatter = shell.display_formatter.formatters['application/pdf']
+
+ if isinstance(formats, py3compat.string_types):
+ formats = {formats}
+ # cast in case of list / tuple
+ formats = set(formats)
+
+ [ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
mplbackend = matplotlib.get_backend().lower()
if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
- formatter = shell.display_formatter.ipython_display_formatter
- formatter.for_type(Figure, _reshow_nbagg_figure)
-
- supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
- bad = formats.difference(supported)
- if bad:
- bs = "%s" % ','.join([repr(f) for f in bad])
- gs = "%s" % ','.join([repr(f) for f in supported])
- raise ValueError("supported formats are: %s not %s" % (gs, bs))
-
- if 'png' in formats:
- png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
- if 'retina' in formats or 'png2x' in formats:
- png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
- if 'jpg' in formats or 'jpeg' in formats:
- jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
- if 'svg' in formats:
- svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
- if 'pdf' in formats:
- pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
-
-#-----------------------------------------------------------------------------
-# Code for initializing matplotlib and importing pylab
-#-----------------------------------------------------------------------------
-
-
-def find_gui_and_backend(gui=None, gui_select=None):
- """Given a gui string return the gui and mpl backend.
-
- Parameters
- ----------
- gui : str
- Can be one of ('tk','gtk','wx','qt','qt4','inline').
- gui_select : str
- Can be one of ('tk','gtk','wx','qt','qt4','inline').
- This is any gui already selected by the shell.
-
- Returns
- -------
- A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
- 'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline').
- """
-
- import matplotlib
-
- if gui and gui != 'auto':
- # select backend based on requested gui
- backend = backends[gui]
- else:
- # We need to read the backend from the original data structure, *not*
- # from mpl.rcParams, since a prior invocation of %matplotlib may have
- # overwritten that.
- # WARNING: this assumes matplotlib 1.1 or newer!!
- backend = matplotlib.rcParamsOrig['backend']
- # In this case, we need to find what the appropriate gui selection call
- # should be for IPython, so we can activate inputhook accordingly
- gui = backend2gui.get(backend, None)
-
- # If we have already had a gui active, we need it and inline are the
- # ones allowed.
- if gui_select and gui != gui_select:
- gui = gui_select
- backend = backends[gui]
-
- return gui, backend
-
-
-def activate_matplotlib(backend):
- """Activate the given backend and set interactive to True."""
-
- import matplotlib
- matplotlib.interactive(True)
-
- # Matplotlib had a bug where even switch_backend could not force
- # the rcParam to update. This needs to be set *before* the module
- # magic of switch_backend().
- matplotlib.rcParams['backend'] = backend
-
- import matplotlib.pyplot
- matplotlib.pyplot.switch_backend(backend)
-
- # This must be imported last in the matplotlib series, after
- # backend/interactivity choices have been made
+ formatter = shell.display_formatter.ipython_display_formatter
+ formatter.for_type(Figure, _reshow_nbagg_figure)
+
+ supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
+ bad = formats.difference(supported)
+ if bad:
+ bs = "%s" % ','.join([repr(f) for f in bad])
+ gs = "%s" % ','.join([repr(f) for f in supported])
+ raise ValueError("supported formats are: %s not %s" % (gs, bs))
+
+ if 'png' in formats:
+ png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
+ if 'retina' in formats or 'png2x' in formats:
+ png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
+ if 'jpg' in formats or 'jpeg' in formats:
+ jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
+ if 'svg' in formats:
+ svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
+ if 'pdf' in formats:
+ pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
+
+#-----------------------------------------------------------------------------
+# Code for initializing matplotlib and importing pylab
+#-----------------------------------------------------------------------------
+
+
+def find_gui_and_backend(gui=None, gui_select=None):
+ """Given a gui string return the gui and mpl backend.
+
+ Parameters
+ ----------
+ gui : str
+ Can be one of ('tk','gtk','wx','qt','qt4','inline').
+ gui_select : str
+ Can be one of ('tk','gtk','wx','qt','qt4','inline').
+ This is any gui already selected by the shell.
+
+ Returns
+ -------
+ A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
+ 'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline').
+ """
+
+ import matplotlib
+
+ if gui and gui != 'auto':
+ # select backend based on requested gui
+ backend = backends[gui]
+ else:
+ # We need to read the backend from the original data structure, *not*
+ # from mpl.rcParams, since a prior invocation of %matplotlib may have
+ # overwritten that.
+ # WARNING: this assumes matplotlib 1.1 or newer!!
+ backend = matplotlib.rcParamsOrig['backend']
+ # In this case, we need to find what the appropriate gui selection call
+ # should be for IPython, so we can activate inputhook accordingly
+ gui = backend2gui.get(backend, None)
+
+ # If we have already had a gui active, we need it and inline are the
+ # ones allowed.
+ if gui_select and gui != gui_select:
+ gui = gui_select
+ backend = backends[gui]
+
+ return gui, backend
+
+
+def activate_matplotlib(backend):
+ """Activate the given backend and set interactive to True."""
+
+ import matplotlib
+ matplotlib.interactive(True)
+
+ # Matplotlib had a bug where even switch_backend could not force
+ # the rcParam to update. This needs to be set *before* the module
+ # magic of switch_backend().
+ matplotlib.rcParams['backend'] = backend
+
+ import matplotlib.pyplot
+ matplotlib.pyplot.switch_backend(backend)
+
+ # This must be imported last in the matplotlib series, after
+ # backend/interactivity choices have been made
import matplotlib.pyplot as plt
-
+
plt.show._needmain = False
- # We need to detect at runtime whether show() is called by the user.
- # For this, we wrap it into a decorator which adds a 'called' flag.
+ # We need to detect at runtime whether show() is called by the user.
+ # For this, we wrap it into a decorator which adds a 'called' flag.
plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
-
-
-def import_pylab(user_ns, import_all=True):
- """Populate the namespace with pylab-related values.
-
- Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
-
- Also imports a few names from IPython (figsize, display, getfigs)
-
- """
-
- # Import numpy as np/pyplot as plt are conventions we're trying to
- # somewhat standardize on. Making them available to users by default
- # will greatly help this.
- s = ("import numpy\n"
- "import matplotlib\n"
- "from matplotlib import pylab, mlab, pyplot\n"
- "np = numpy\n"
- "plt = pyplot\n"
- )
- exec(s, user_ns)
-
- if import_all:
- s = ("from matplotlib.pylab import *\n"
- "from numpy import *\n")
- exec(s, user_ns)
-
- # IPython symbols to add
- user_ns['figsize'] = figsize
- from IPython.core.display import display
- # Add display and getfigs to the user's namespace
- user_ns['display'] = display
- user_ns['getfigs'] = getfigs
-
-
-def configure_inline_support(shell, backend):
- """Configure an IPython shell object for matplotlib use.
-
- Parameters
- ----------
- shell : InteractiveShell instance
-
- backend : matplotlib backend
- """
- # If using our svg payload backend, register the post-execution
- # function that will pick up the results for display. This can only be
- # done with access to the real shell object.
-
- # Note: if we can't load the inline backend, then there's no point
- # continuing (such as in terminal-only shells in environments without
- # zeromq available).
- try:
- from ipykernel.pylab.backend_inline import InlineBackend
- except ImportError:
- return
+
+
+def import_pylab(user_ns, import_all=True):
+ """Populate the namespace with pylab-related values.
+
+ Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
+
+ Also imports a few names from IPython (figsize, display, getfigs)
+
+ """
+
+ # Import numpy as np/pyplot as plt are conventions we're trying to
+ # somewhat standardize on. Making them available to users by default
+ # will greatly help this.
+ s = ("import numpy\n"
+ "import matplotlib\n"
+ "from matplotlib import pylab, mlab, pyplot\n"
+ "np = numpy\n"
+ "plt = pyplot\n"
+ )
+ exec(s, user_ns)
+
+ if import_all:
+ s = ("from matplotlib.pylab import *\n"
+ "from numpy import *\n")
+ exec(s, user_ns)
+
+ # IPython symbols to add
+ user_ns['figsize'] = figsize
+ from IPython.core.display import display
+ # Add display and getfigs to the user's namespace
+ user_ns['display'] = display
+ user_ns['getfigs'] = getfigs
+
+
+def configure_inline_support(shell, backend):
+ """Configure an IPython shell object for matplotlib use.
+
+ Parameters
+ ----------
+ shell : InteractiveShell instance
+
+ backend : matplotlib backend
+ """
+ # If using our svg payload backend, register the post-execution
+ # function that will pick up the results for display. This can only be
+ # done with access to the real shell object.
+
+ # Note: if we can't load the inline backend, then there's no point
+ # continuing (such as in terminal-only shells in environments without
+ # zeromq available).
+ try:
+ from ipykernel.pylab.backend_inline import InlineBackend
+ except ImportError:
+ return
import matplotlib
-
- cfg = InlineBackend.instance(parent=shell)
- cfg.shell = shell
- if cfg not in shell.configurables:
- shell.configurables.append(cfg)
-
- if backend == backends['inline']:
- from ipykernel.pylab.backend_inline import flush_figures
- shell.events.register('post_execute', flush_figures)
-
- # Save rcParams that will be overwrittern
- shell._saved_rcParams = dict()
- for k in cfg.rc:
+
+ cfg = InlineBackend.instance(parent=shell)
+ cfg.shell = shell
+ if cfg not in shell.configurables:
+ shell.configurables.append(cfg)
+
+ if backend == backends['inline']:
+ from ipykernel.pylab.backend_inline import flush_figures
+ shell.events.register('post_execute', flush_figures)
+
+ # Save rcParams that will be overwrittern
+ shell._saved_rcParams = dict()
+ for k in cfg.rc:
shell._saved_rcParams[k] = matplotlib.rcParams[k]
- # load inline_rc
+ # load inline_rc
matplotlib.rcParams.update(cfg.rc)
- new_backend_name = "inline"
- else:
- from ipykernel.pylab.backend_inline import flush_figures
- try:
- shell.events.unregister('post_execute', flush_figures)
- except ValueError:
- pass
- if hasattr(shell, '_saved_rcParams'):
+ new_backend_name = "inline"
+ else:
+ from ipykernel.pylab.backend_inline import flush_figures
+ try:
+ shell.events.unregister('post_execute', flush_figures)
+ except ValueError:
+ pass
+ if hasattr(shell, '_saved_rcParams'):
matplotlib.rcParams.update(shell._saved_rcParams)
- del shell._saved_rcParams
- new_backend_name = "other"
-
- # only enable the formats once -> don't change the enabled formats (which the user may
- # has changed) when getting another "%matplotlib inline" call.
- # See https://github.com/ipython/ipykernel/issues/29
- cur_backend = getattr(configure_inline_support, "current_backend", "unset")
- if new_backend_name != cur_backend:
- # Setup the default figure format
- select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
- configure_inline_support.current_backend = new_backend_name
+ del shell._saved_rcParams
+ new_backend_name = "other"
+
+ # only enable the formats once -> don't change the enabled formats (which the user may
+ # has changed) when getting another "%matplotlib inline" call.
+ # See https://github.com/ipython/ipykernel/issues/29
+ cur_backend = getattr(configure_inline_support, "current_backend", "unset")
+ if new_backend_name != cur_backend:
+ # Setup the default figure format
+ select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
+ configure_inline_support.current_backend = new_backend_name
diff --git a/contrib/python/ipython/py2/IPython/core/release.py b/contrib/python/ipython/py2/IPython/core/release.py
index 94d54bb828..94dea1073b 100644
--- a/contrib/python/ipython/py2/IPython/core/release.py
+++ b/contrib/python/ipython/py2/IPython/core/release.py
@@ -1,123 +1,123 @@
-# -*- coding: utf-8 -*-
-"""Release data for the IPython project."""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2008, IPython Development Team.
-# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
-# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
-# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-# Name of the package for release purposes. This is the name which labels
-# the tarballs and RPMs made by distutils, so it's best to lowercase it.
-name = 'ipython'
-
-# IPython version information. An empty _version_extra corresponds to a full
-# release. 'dev' as a _version_extra string means this is a development
-# version
+# -*- coding: utf-8 -*-
+"""Release data for the IPython project."""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2008, IPython Development Team.
+# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
+# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
+# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+# Name of the package for release purposes. This is the name which labels
+# the tarballs and RPMs made by distutils, so it's best to lowercase it.
+name = 'ipython'
+
+# IPython version information. An empty _version_extra corresponds to a full
+# release. 'dev' as a _version_extra string means this is a development
+# version
_version_major = 5
_version_minor = 9
_version_patch = 0
-_version_extra = '.dev'
+_version_extra = '.dev'
# _version_extra = 'rc1'
-_version_extra = '' # Uncomment this for full releases
-
-# release.codename is deprecated in 2.0, will be removed in 3.0
-codename = ''
-
-# Construct full version string from these.
-_ver = [_version_major, _version_minor, _version_patch]
-
-__version__ = '.'.join(map(str, _ver))
-if _version_extra:
- __version__ = __version__ + _version_extra
-
-version = __version__ # backwards compatibility name
-version_info = (_version_major, _version_minor, _version_patch, _version_extra)
-
-# Change this when incrementing the kernel protocol version
-kernel_protocol_version_info = (5, 0)
-kernel_protocol_version = "%i.%i" % kernel_protocol_version_info
-
-description = "IPython: Productive Interactive Computing"
-
-long_description = \
-"""
-IPython provides a rich toolkit to help you make the most out of using Python
-interactively. Its main components are:
-
-* A powerful interactive Python shell
-* A `Jupyter <http://jupyter.org/>`_ kernel to work with Python code in Jupyter
- notebooks and other interactive frontends.
-
-The enhanced interactive Python shells have the following main features:
-
-* Comprehensive object introspection.
-
-* Input history, persistent across sessions.
-
-* Caching of output results during a session with automatically generated
- references.
-
-* Extensible tab completion, with support by default for completion of python
- variables and keywords, filenames and function keywords.
-
-* Extensible system of 'magic' commands for controlling the environment and
- performing many tasks related either to IPython or the operating system.
-
-* A rich configuration system with easy switching between different setups
- (simpler than changing $PYTHONSTARTUP environment variables every time).
-
-* Session logging and reloading.
-
-* Extensible syntax processing for special purpose situations.
-
-* Access to the system shell with user-extensible alias system.
-
-* Easily embeddable in other Python programs and GUIs.
-
-* Integrated access to the pdb debugger and the Python profiler.
-
-The latest development version is always available from IPython's `GitHub
-site <http://github.com/ipython>`_.
-"""
-
-license = 'BSD'
-
-authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
- 'Janko' : ('Janko Hauser','jhauser@zscout.de'),
- 'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
- 'Ville' : ('Ville Vainio','vivainio@gmail.com'),
- 'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
- 'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'),
- 'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'),
- 'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'),
- 'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
- }
-
-author = 'The IPython Development Team'
-
+_version_extra = '' # Uncomment this for full releases
+
+# release.codename is deprecated in 2.0, will be removed in 3.0
+codename = ''
+
+# Construct full version string from these.
+_ver = [_version_major, _version_minor, _version_patch]
+
+__version__ = '.'.join(map(str, _ver))
+if _version_extra:
+ __version__ = __version__ + _version_extra
+
+version = __version__ # backwards compatibility name
+version_info = (_version_major, _version_minor, _version_patch, _version_extra)
+
+# Change this when incrementing the kernel protocol version
+kernel_protocol_version_info = (5, 0)
+kernel_protocol_version = "%i.%i" % kernel_protocol_version_info
+
+description = "IPython: Productive Interactive Computing"
+
+long_description = \
+"""
+IPython provides a rich toolkit to help you make the most out of using Python
+interactively. Its main components are:
+
+* A powerful interactive Python shell
+* A `Jupyter <http://jupyter.org/>`_ kernel to work with Python code in Jupyter
+ notebooks and other interactive frontends.
+
+The enhanced interactive Python shells have the following main features:
+
+* Comprehensive object introspection.
+
+* Input history, persistent across sessions.
+
+* Caching of output results during a session with automatically generated
+ references.
+
+* Extensible tab completion, with support by default for completion of python
+ variables and keywords, filenames and function keywords.
+
+* Extensible system of 'magic' commands for controlling the environment and
+ performing many tasks related either to IPython or the operating system.
+
+* A rich configuration system with easy switching between different setups
+ (simpler than changing $PYTHONSTARTUP environment variables every time).
+
+* Session logging and reloading.
+
+* Extensible syntax processing for special purpose situations.
+
+* Access to the system shell with user-extensible alias system.
+
+* Easily embeddable in other Python programs and GUIs.
+
+* Integrated access to the pdb debugger and the Python profiler.
+
+The latest development version is always available from IPython's `GitHub
+site <http://github.com/ipython>`_.
+"""
+
+license = 'BSD'
+
+authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
+ 'Janko' : ('Janko Hauser','jhauser@zscout.de'),
+ 'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
+ 'Ville' : ('Ville Vainio','vivainio@gmail.com'),
+ 'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
+ 'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'),
+ 'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'),
+ 'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'),
+ 'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
+ }
+
+author = 'The IPython Development Team'
+
author_email = 'ipython-dev@python.org'
-
+
url = 'https://ipython.org'
-
-
-platforms = ['Linux','Mac OSX','Windows']
-
-keywords = ['Interactive','Interpreter','Shell', 'Embedding']
-
-classifiers = [
- 'Framework :: IPython',
- 'Intended Audience :: Developers',
- 'Intended Audience :: Science/Research',
- 'License :: OSI Approved :: BSD License',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3',
- 'Topic :: System :: Shells'
- ]
+
+
+platforms = ['Linux','Mac OSX','Windows']
+
+keywords = ['Interactive','Interpreter','Shell', 'Embedding']
+
+classifiers = [
+ 'Framework :: IPython',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Science/Research',
+ 'License :: OSI Approved :: BSD License',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Topic :: System :: Shells'
+ ]
diff --git a/contrib/python/ipython/py2/IPython/core/shadowns.py b/contrib/python/ipython/py2/IPython/core/shadowns.py
index c9868ea569..d2d93b61bd 100644
--- a/contrib/python/ipython/py2/IPython/core/shadowns.py
+++ b/contrib/python/ipython/py2/IPython/core/shadowns.py
@@ -1 +1 @@
-""" Shadow namespace """ \ No newline at end of file
+""" Shadow namespace """ \ No newline at end of file
diff --git a/contrib/python/ipython/py2/IPython/core/shellapp.py b/contrib/python/ipython/py2/IPython/core/shellapp.py
index 18ef594527..213648246e 100644
--- a/contrib/python/ipython/py2/IPython/core/shellapp.py
+++ b/contrib/python/ipython/py2/IPython/core/shellapp.py
@@ -1,415 +1,415 @@
-# encoding: utf-8
-"""
-A mixin for :class:`~IPython.core.application.Application` classes that
-launch InteractiveShell instances, load extensions, etc.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-import glob
+# encoding: utf-8
+"""
+A mixin for :class:`~IPython.core.application.Application` classes that
+launch InteractiveShell instances, load extensions, etc.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import glob
from itertools import chain
-import os
-import sys
-
-from traitlets.config.application import boolean_flag
-from traitlets.config.configurable import Configurable
-from traitlets.config.loader import Config
+import os
+import sys
+
+from traitlets.config.application import boolean_flag
+from traitlets.config.configurable import Configurable
+from traitlets.config.loader import Config
from IPython.core.application import SYSTEM_CONFIG_DIRS, ENV_CONFIG_DIRS
-from IPython.core import pylabtools
-from IPython.utils import py3compat
-from IPython.utils.contexts import preserve_keys
-from IPython.utils.path import filefind
-from traitlets import (
+from IPython.core import pylabtools
+from IPython.utils import py3compat
+from IPython.utils.contexts import preserve_keys
+from IPython.utils.path import filefind
+from traitlets import (
Unicode, Instance, List, Bool, CaselessStrEnum, observe,
-)
+)
from IPython.terminal import pt_inputhooks
-
-#-----------------------------------------------------------------------------
-# Aliases and Flags
-#-----------------------------------------------------------------------------
-
+
+#-----------------------------------------------------------------------------
+# Aliases and Flags
+#-----------------------------------------------------------------------------
+
gui_keys = tuple(sorted(pt_inputhooks.backends) + sorted(pt_inputhooks.aliases))
-
-backend_keys = sorted(pylabtools.backends.keys())
-backend_keys.insert(0, 'auto')
-
-shell_flags = {}
-
-addflag = lambda *args: shell_flags.update(boolean_flag(*args))
-addflag('autoindent', 'InteractiveShell.autoindent',
- 'Turn on autoindenting.', 'Turn off autoindenting.'
-)
-addflag('automagic', 'InteractiveShell.automagic',
- """Turn on the auto calling of magic commands. Type %%magic at the
- IPython prompt for more information.""",
- 'Turn off the auto calling of magic commands.'
-)
-addflag('pdb', 'InteractiveShell.pdb',
- "Enable auto calling the pdb debugger after every exception.",
- "Disable auto calling the pdb debugger after every exception."
-)
-addflag('pprint', 'PlainTextFormatter.pprint',
- "Enable auto pretty printing of results.",
- "Disable auto pretty printing of results."
-)
-addflag('color-info', 'InteractiveShell.color_info',
- """IPython can display information about objects via a set of functions,
- and optionally can use colors for this, syntax highlighting
- source code and various other elements. This is on by default, but can cause
- problems with some pagers. If you see such problems, you can disable the
- colours.""",
- "Disable using colors for info related things."
-)
-nosep_config = Config()
-nosep_config.InteractiveShell.separate_in = ''
-nosep_config.InteractiveShell.separate_out = ''
-nosep_config.InteractiveShell.separate_out2 = ''
-
-shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
-shell_flags['pylab'] = (
- {'InteractiveShellApp' : {'pylab' : 'auto'}},
- """Pre-load matplotlib and numpy for interactive use with
- the default matplotlib backend."""
-)
-shell_flags['matplotlib'] = (
- {'InteractiveShellApp' : {'matplotlib' : 'auto'}},
- """Configure matplotlib for interactive use with
- the default matplotlib backend."""
-)
-
-# it's possible we don't want short aliases for *all* of these:
-shell_aliases = dict(
- autocall='InteractiveShell.autocall',
- colors='InteractiveShell.colors',
- logfile='InteractiveShell.logfile',
- logappend='InteractiveShell.logappend',
- c='InteractiveShellApp.code_to_run',
- m='InteractiveShellApp.module_to_run',
- ext='InteractiveShellApp.extra_extension',
- gui='InteractiveShellApp.gui',
- pylab='InteractiveShellApp.pylab',
- matplotlib='InteractiveShellApp.matplotlib',
-)
-shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
-
-#-----------------------------------------------------------------------------
-# Main classes and functions
-#-----------------------------------------------------------------------------
-
-class InteractiveShellApp(Configurable):
- """A Mixin for applications that start InteractiveShell instances.
-
- Provides configurables for loading extensions and executing files
- as part of configuring a Shell environment.
-
- The following methods should be called by the :meth:`initialize` method
- of the subclass:
-
- - :meth:`init_path`
- - :meth:`init_shell` (to be implemented by the subclass)
- - :meth:`init_gui_pylab`
- - :meth:`init_extensions`
- - :meth:`init_code`
- """
+
+backend_keys = sorted(pylabtools.backends.keys())
+backend_keys.insert(0, 'auto')
+
+shell_flags = {}
+
+addflag = lambda *args: shell_flags.update(boolean_flag(*args))
+addflag('autoindent', 'InteractiveShell.autoindent',
+ 'Turn on autoindenting.', 'Turn off autoindenting.'
+)
+addflag('automagic', 'InteractiveShell.automagic',
+ """Turn on the auto calling of magic commands. Type %%magic at the
+ IPython prompt for more information.""",
+ 'Turn off the auto calling of magic commands.'
+)
+addflag('pdb', 'InteractiveShell.pdb',
+ "Enable auto calling the pdb debugger after every exception.",
+ "Disable auto calling the pdb debugger after every exception."
+)
+addflag('pprint', 'PlainTextFormatter.pprint',
+ "Enable auto pretty printing of results.",
+ "Disable auto pretty printing of results."
+)
+addflag('color-info', 'InteractiveShell.color_info',
+ """IPython can display information about objects via a set of functions,
+ and optionally can use colors for this, syntax highlighting
+ source code and various other elements. This is on by default, but can cause
+ problems with some pagers. If you see such problems, you can disable the
+ colours.""",
+ "Disable using colors for info related things."
+)
+nosep_config = Config()
+nosep_config.InteractiveShell.separate_in = ''
+nosep_config.InteractiveShell.separate_out = ''
+nosep_config.InteractiveShell.separate_out2 = ''
+
+shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
+shell_flags['pylab'] = (
+ {'InteractiveShellApp' : {'pylab' : 'auto'}},
+ """Pre-load matplotlib and numpy for interactive use with
+ the default matplotlib backend."""
+)
+shell_flags['matplotlib'] = (
+ {'InteractiveShellApp' : {'matplotlib' : 'auto'}},
+ """Configure matplotlib for interactive use with
+ the default matplotlib backend."""
+)
+
+# it's possible we don't want short aliases for *all* of these:
+shell_aliases = dict(
+ autocall='InteractiveShell.autocall',
+ colors='InteractiveShell.colors',
+ logfile='InteractiveShell.logfile',
+ logappend='InteractiveShell.logappend',
+ c='InteractiveShellApp.code_to_run',
+ m='InteractiveShellApp.module_to_run',
+ ext='InteractiveShellApp.extra_extension',
+ gui='InteractiveShellApp.gui',
+ pylab='InteractiveShellApp.pylab',
+ matplotlib='InteractiveShellApp.matplotlib',
+)
+shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
+
+#-----------------------------------------------------------------------------
+# Main classes and functions
+#-----------------------------------------------------------------------------
+
+class InteractiveShellApp(Configurable):
+ """A Mixin for applications that start InteractiveShell instances.
+
+ Provides configurables for loading extensions and executing files
+ as part of configuring a Shell environment.
+
+ The following methods should be called by the :meth:`initialize` method
+ of the subclass:
+
+ - :meth:`init_path`
+ - :meth:`init_shell` (to be implemented by the subclass)
+ - :meth:`init_gui_pylab`
+ - :meth:`init_extensions`
+ - :meth:`init_code`
+ """
extensions = List(Unicode(),
- help="A list of dotted module names of IPython extensions to load."
+ help="A list of dotted module names of IPython extensions to load."
).tag(config=True)
extra_extension = Unicode('',
- help="dotted module name of an IPython extension to load."
+ help="dotted module name of an IPython extension to load."
).tag(config=True)
-
+
reraise_ipython_extension_failures = Bool(False,
- help="Reraise exceptions encountered loading IPython extensions?",
+ help="Reraise exceptions encountered loading IPython extensions?",
).tag(config=True)
-
- # Extensions that are always loaded (not configurable)
+
+ # Extensions that are always loaded (not configurable)
default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)
hide_initial_ns = Bool(True,
- help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
- be hidden from tools like %who?"""
+ help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
+ be hidden from tools like %who?"""
).tag(config=True)
-
+
exec_files = List(Unicode(),
- help="""List of files to run at IPython startup."""
+ help="""List of files to run at IPython startup."""
).tag(config=True)
exec_PYTHONSTARTUP = Bool(True,
- help="""Run the file referenced by the PYTHONSTARTUP environment
- variable at IPython startup."""
+ help="""Run the file referenced by the PYTHONSTARTUP environment
+ variable at IPython startup."""
).tag(config=True)
file_to_run = Unicode('',
help="""A file to be run""").tag(config=True)
-
+
exec_lines = List(Unicode(),
- help="""lines of code to run at IPython startup."""
+ help="""lines of code to run at IPython startup."""
).tag(config=True)
code_to_run = Unicode('',
- help="Execute the given command string."
+ help="Execute the given command string."
).tag(config=True)
module_to_run = Unicode('',
- help="Run the module as a script."
+ help="Run the module as a script."
).tag(config=True)
gui = CaselessStrEnum(gui_keys, allow_none=True,
- help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
+ help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
).tag(config=True)
- matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
- help="""Configure matplotlib for interactive use with
- the default matplotlib backend."""
+ matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
+ help="""Configure matplotlib for interactive use with
+ the default matplotlib backend."""
).tag(config=True)
- pylab = CaselessStrEnum(backend_keys, allow_none=True,
- help="""Pre-load matplotlib and numpy for interactive use,
- selecting a particular matplotlib backend and loop integration.
- """
+ pylab = CaselessStrEnum(backend_keys, allow_none=True,
+ help="""Pre-load matplotlib and numpy for interactive use,
+ selecting a particular matplotlib backend and loop integration.
+ """
).tag(config=True)
pylab_import_all = Bool(True,
- help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
- and an ``import *`` is done from numpy and pylab, when using pylab mode.
+ help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
+ and an ``import *`` is done from numpy and pylab, when using pylab mode.
- When False, pylab mode should not import any names into the user namespace.
- """
+ When False, pylab mode should not import any names into the user namespace.
+ """
).tag(config=True)
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
- allow_none=True)
+ shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
+ allow_none=True)
# whether interact-loop should start
interact = Bool(True)
- user_ns = Instance(dict, args=None, allow_none=True)
+ user_ns = Instance(dict, args=None, allow_none=True)
@observe('user_ns')
def _user_ns_changed(self, change):
- if self.shell is not None:
+ if self.shell is not None:
self.shell.user_ns = change['new']
- self.shell.init_user_ns()
-
- def init_path(self):
- """Add current working directory, '', to sys.path"""
- if sys.path[0] != '':
- sys.path.insert(0, '')
-
- def init_shell(self):
- raise NotImplementedError("Override in subclasses")
-
- def init_gui_pylab(self):
- """Enable GUI event loop integration, taking pylab into account."""
- enable = False
- shell = self.shell
- if self.pylab:
- enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
- key = self.pylab
- elif self.matplotlib:
- enable = shell.enable_matplotlib
- key = self.matplotlib
- elif self.gui:
- enable = shell.enable_gui
- key = self.gui
-
- if not enable:
- return
-
- try:
- r = enable(key)
- except ImportError:
- self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
- self.shell.showtraceback()
- return
- except Exception:
- self.log.warning("GUI event loop or pylab initialization failed")
- self.shell.showtraceback()
- return
-
- if isinstance(r, tuple):
- gui, backend = r[:2]
- self.log.info("Enabling GUI event loop integration, "
- "eventloop=%s, matplotlib=%s", gui, backend)
- if key == "auto":
- print("Using matplotlib backend: %s" % backend)
- else:
- gui = r
- self.log.info("Enabling GUI event loop integration, "
- "eventloop=%s", gui)
-
- def init_extensions(self):
- """Load all IPython extensions in IPythonApp.extensions.
-
- This uses the :meth:`ExtensionManager.load_extensions` to load all
- the extensions listed in ``self.extensions``.
- """
- try:
- self.log.debug("Loading IPython extensions...")
- extensions = self.default_extensions + self.extensions
- if self.extra_extension:
- extensions.append(self.extra_extension)
- for ext in extensions:
- try:
- self.log.info("Loading IPython extension: %s" % ext)
- self.shell.extension_manager.load_extension(ext)
- except:
- if self.reraise_ipython_extension_failures:
- raise
- msg = ("Error in loading extension: {ext}\n"
- "Check your config files in {location}".format(
- ext=ext,
- location=self.profile_dir.location
- ))
- self.log.warning(msg, exc_info=True)
- except:
- if self.reraise_ipython_extension_failures:
- raise
- self.log.warning("Unknown error in loading extensions:", exc_info=True)
-
- def init_code(self):
- """run the pre-flight code, specified via exec_lines"""
- self._run_startup_files()
- self._run_exec_lines()
- self._run_exec_files()
-
- # Hide variables defined here from %who etc.
- if self.hide_initial_ns:
- self.shell.user_ns_hidden.update(self.shell.user_ns)
-
- # command-line execution (ipython -i script.py, ipython -m module)
- # should *not* be excluded from %whos
- self._run_cmd_line_code()
- self._run_module()
-
- # flush output, so itwon't be attached to the first cell
- sys.stdout.flush()
- sys.stderr.flush()
-
- def _run_exec_lines(self):
- """Run lines of code in IPythonApp.exec_lines in the user's namespace."""
- if not self.exec_lines:
- return
- try:
- self.log.debug("Running code from IPythonApp.exec_lines...")
- for line in self.exec_lines:
- try:
- self.log.info("Running code in user namespace: %s" %
- line)
- self.shell.run_cell(line, store_history=False)
- except:
- self.log.warning("Error in executing line in user "
- "namespace: %s" % line)
- self.shell.showtraceback()
- except:
- self.log.warning("Unknown error in handling IPythonApp.exec_lines:")
- self.shell.showtraceback()
-
- def _exec_file(self, fname, shell_futures=False):
- try:
- full_filename = filefind(fname, [u'.', self.ipython_dir])
+ self.shell.init_user_ns()
+
+ def init_path(self):
+ """Add current working directory, '', to sys.path"""
+ if sys.path[0] != '':
+ sys.path.insert(0, '')
+
+ def init_shell(self):
+ raise NotImplementedError("Override in subclasses")
+
+ def init_gui_pylab(self):
+ """Enable GUI event loop integration, taking pylab into account."""
+ enable = False
+ shell = self.shell
+ if self.pylab:
+ enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
+ key = self.pylab
+ elif self.matplotlib:
+ enable = shell.enable_matplotlib
+ key = self.matplotlib
+ elif self.gui:
+ enable = shell.enable_gui
+ key = self.gui
+
+ if not enable:
+ return
+
+ try:
+ r = enable(key)
+ except ImportError:
+ self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
+ self.shell.showtraceback()
+ return
+ except Exception:
+ self.log.warning("GUI event loop or pylab initialization failed")
+ self.shell.showtraceback()
+ return
+
+ if isinstance(r, tuple):
+ gui, backend = r[:2]
+ self.log.info("Enabling GUI event loop integration, "
+ "eventloop=%s, matplotlib=%s", gui, backend)
+ if key == "auto":
+ print("Using matplotlib backend: %s" % backend)
+ else:
+ gui = r
+ self.log.info("Enabling GUI event loop integration, "
+ "eventloop=%s", gui)
+
+ def init_extensions(self):
+ """Load all IPython extensions in IPythonApp.extensions.
+
+ This uses the :meth:`ExtensionManager.load_extensions` to load all
+ the extensions listed in ``self.extensions``.
+ """
+ try:
+ self.log.debug("Loading IPython extensions...")
+ extensions = self.default_extensions + self.extensions
+ if self.extra_extension:
+ extensions.append(self.extra_extension)
+ for ext in extensions:
+ try:
+ self.log.info("Loading IPython extension: %s" % ext)
+ self.shell.extension_manager.load_extension(ext)
+ except:
+ if self.reraise_ipython_extension_failures:
+ raise
+ msg = ("Error in loading extension: {ext}\n"
+ "Check your config files in {location}".format(
+ ext=ext,
+ location=self.profile_dir.location
+ ))
+ self.log.warning(msg, exc_info=True)
+ except:
+ if self.reraise_ipython_extension_failures:
+ raise
+ self.log.warning("Unknown error in loading extensions:", exc_info=True)
+
+ def init_code(self):
+ """run the pre-flight code, specified via exec_lines"""
+ self._run_startup_files()
+ self._run_exec_lines()
+ self._run_exec_files()
+
+ # Hide variables defined here from %who etc.
+ if self.hide_initial_ns:
+ self.shell.user_ns_hidden.update(self.shell.user_ns)
+
+ # command-line execution (ipython -i script.py, ipython -m module)
+ # should *not* be excluded from %whos
+ self._run_cmd_line_code()
+ self._run_module()
+
+ # flush output, so itwon't be attached to the first cell
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ def _run_exec_lines(self):
+ """Run lines of code in IPythonApp.exec_lines in the user's namespace."""
+ if not self.exec_lines:
+ return
+ try:
+ self.log.debug("Running code from IPythonApp.exec_lines...")
+ for line in self.exec_lines:
+ try:
+ self.log.info("Running code in user namespace: %s" %
+ line)
+ self.shell.run_cell(line, store_history=False)
+ except:
+ self.log.warning("Error in executing line in user "
+ "namespace: %s" % line)
+ self.shell.showtraceback()
+ except:
+ self.log.warning("Unknown error in handling IPythonApp.exec_lines:")
+ self.shell.showtraceback()
+
+ def _exec_file(self, fname, shell_futures=False):
+ try:
+ full_filename = filefind(fname, [u'.', self.ipython_dir])
except IOError:
- self.log.warning("File not found: %r"%fname)
- return
- # Make sure that the running script gets a proper sys.argv as if it
- # were run from a system shell.
- save_argv = sys.argv
- sys.argv = [full_filename] + self.extra_args[1:]
- # protect sys.argv from potential unicode strings on Python 2:
- if not py3compat.PY3:
- sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
- try:
- if os.path.isfile(full_filename):
- self.log.info("Running file in user namespace: %s" %
- full_filename)
- # Ensure that __file__ is always defined to match Python
- # behavior.
- with preserve_keys(self.shell.user_ns, '__file__'):
- self.shell.user_ns['__file__'] = fname
- if full_filename.endswith('.ipy'):
- self.shell.safe_execfile_ipy(full_filename,
- shell_futures=shell_futures)
- else:
- # default to python, even without extension
- self.shell.safe_execfile(full_filename,
- self.shell.user_ns,
- shell_futures=shell_futures,
- raise_exceptions=True)
- finally:
- sys.argv = save_argv
-
- def _run_startup_files(self):
- """Run files from profile startup directory"""
+ self.log.warning("File not found: %r"%fname)
+ return
+ # Make sure that the running script gets a proper sys.argv as if it
+ # were run from a system shell.
+ save_argv = sys.argv
+ sys.argv = [full_filename] + self.extra_args[1:]
+ # protect sys.argv from potential unicode strings on Python 2:
+ if not py3compat.PY3:
+ sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
+ try:
+ if os.path.isfile(full_filename):
+ self.log.info("Running file in user namespace: %s" %
+ full_filename)
+ # Ensure that __file__ is always defined to match Python
+ # behavior.
+ with preserve_keys(self.shell.user_ns, '__file__'):
+ self.shell.user_ns['__file__'] = fname
+ if full_filename.endswith('.ipy'):
+ self.shell.safe_execfile_ipy(full_filename,
+ shell_futures=shell_futures)
+ else:
+ # default to python, even without extension
+ self.shell.safe_execfile(full_filename,
+ self.shell.user_ns,
+ shell_futures=shell_futures,
+ raise_exceptions=True)
+ finally:
+ sys.argv = save_argv
+
+ def _run_startup_files(self):
+ """Run files from profile startup directory"""
startup_dirs = [self.profile_dir.startup_dir] + [
os.path.join(p, 'startup') for p in chain(ENV_CONFIG_DIRS, SYSTEM_CONFIG_DIRS)
]
- startup_files = []
-
- if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
- not (self.file_to_run or self.code_to_run or self.module_to_run):
- python_startup = os.environ['PYTHONSTARTUP']
- self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
- try:
- self._exec_file(python_startup)
- except:
- self.log.warning("Unknown error in handling PYTHONSTARTUP file %s:", python_startup)
- self.shell.showtraceback()
+ startup_files = []
+
+ if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
+ not (self.file_to_run or self.code_to_run or self.module_to_run):
+ python_startup = os.environ['PYTHONSTARTUP']
+ self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
+ try:
+ self._exec_file(python_startup)
+ except:
+ self.log.warning("Unknown error in handling PYTHONSTARTUP file %s:", python_startup)
+ self.shell.showtraceback()
for startup_dir in startup_dirs[::-1]:
startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
- if not startup_files:
- return
-
- self.log.debug("Running startup files from %s...", startup_dir)
- try:
- for fname in sorted(startup_files):
- self._exec_file(fname)
- except:
- self.log.warning("Unknown error in handling startup files:")
- self.shell.showtraceback()
-
- def _run_exec_files(self):
- """Run files from IPythonApp.exec_files"""
- if not self.exec_files:
- return
-
- self.log.debug("Running files in IPythonApp.exec_files...")
- try:
- for fname in self.exec_files:
- self._exec_file(fname)
- except:
- self.log.warning("Unknown error in handling IPythonApp.exec_files:")
- self.shell.showtraceback()
-
- def _run_cmd_line_code(self):
- """Run code or file specified at the command-line"""
- if self.code_to_run:
- line = self.code_to_run
- try:
- self.log.info("Running code given at command line (c=): %s" %
- line)
- self.shell.run_cell(line, store_history=False)
- except:
- self.log.warning("Error in executing line in user namespace: %s" %
- line)
- self.shell.showtraceback()
+ if not startup_files:
+ return
+
+ self.log.debug("Running startup files from %s...", startup_dir)
+ try:
+ for fname in sorted(startup_files):
+ self._exec_file(fname)
+ except:
+ self.log.warning("Unknown error in handling startup files:")
+ self.shell.showtraceback()
+
+ def _run_exec_files(self):
+ """Run files from IPythonApp.exec_files"""
+ if not self.exec_files:
+ return
+
+ self.log.debug("Running files in IPythonApp.exec_files...")
+ try:
+ for fname in self.exec_files:
+ self._exec_file(fname)
+ except:
+ self.log.warning("Unknown error in handling IPythonApp.exec_files:")
+ self.shell.showtraceback()
+
+ def _run_cmd_line_code(self):
+ """Run code or file specified at the command-line"""
+ if self.code_to_run:
+ line = self.code_to_run
+ try:
+ self.log.info("Running code given at command line (c=): %s" %
+ line)
+ self.shell.run_cell(line, store_history=False)
+ except:
+ self.log.warning("Error in executing line in user namespace: %s" %
+ line)
+ self.shell.showtraceback()
if not self.interact:
self.exit(1)
-
- # Like Python itself, ignore the second if the first of these is present
- elif self.file_to_run:
- fname = self.file_to_run
+
+ # Like Python itself, ignore the second if the first of these is present
+ elif self.file_to_run:
+ fname = self.file_to_run
if os.path.isdir(fname):
fname = os.path.join(fname, "__main__.py")
- try:
- self._exec_file(fname, shell_futures=True)
- except:
- self.shell.showtraceback(tb_offset=4)
+ try:
+ self._exec_file(fname, shell_futures=True)
+ except:
+ self.shell.showtraceback(tb_offset=4)
if not self.interact:
self.exit(1)
-
- def _run_module(self):
- """Run module specified at the command-line."""
- if self.module_to_run:
- # Make sure that the module gets a proper sys.argv as if it were
- # run using `python -m`.
- save_argv = sys.argv
- sys.argv = [sys.executable] + self.extra_args
- try:
- self.shell.safe_run_module(self.module_to_run,
- self.shell.user_ns)
- finally:
- sys.argv = save_argv
+
+ def _run_module(self):
+ """Run module specified at the command-line."""
+ if self.module_to_run:
+ # Make sure that the module gets a proper sys.argv as if it were
+ # run using `python -m`.
+ save_argv = sys.argv
+ sys.argv = [sys.executable] + self.extra_args
+ try:
+ self.shell.safe_run_module(self.module_to_run,
+ self.shell.user_ns)
+ finally:
+ sys.argv = save_argv
diff --git a/contrib/python/ipython/py2/IPython/core/splitinput.py b/contrib/python/ipython/py2/IPython/core/splitinput.py
index 2c54687cb1..7b957726fb 100644
--- a/contrib/python/ipython/py2/IPython/core/splitinput.py
+++ b/contrib/python/ipython/py2/IPython/core/splitinput.py
@@ -1,137 +1,137 @@
-# encoding: utf-8
-"""
-Simple utility for splitting user input. This is used by both inputsplitter and
-prefilter.
-
-Authors:
-
-* Brian Granger
-* Fernando Perez
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import re
-import sys
-
-from IPython.utils import py3compat
-from IPython.utils.encoding import get_stream_enc
-
-#-----------------------------------------------------------------------------
-# Main function
-#-----------------------------------------------------------------------------
-
-# RegExp for splitting line contents into pre-char//first word-method//rest.
-# For clarity, each group in on one line.
-
-# WARNING: update the regexp if the escapes in interactiveshell are changed, as
-# they are hardwired in.
-
-# Although it's not solely driven by the regex, note that:
-# ,;/% only trigger if they are the first character on the line
-# ! and !! trigger if they are first char(s) *or* follow an indent
-# ? triggers as first or last char.
-
-line_split = re.compile("""
- ^(\s*) # any leading space
- ([,;/%]|!!?|\?\??)? # escape character or characters
- \s*(%{0,2}[\w\.\*]*) # function/method, possibly with leading %
- # to correctly treat things like '?%magic'
- (.*?$|$) # rest of line
- """, re.VERBOSE)
-
-
-def split_user_input(line, pattern=None):
- """Split user input into initial whitespace, escape character, function part
- and the rest.
- """
- # We need to ensure that the rest of this routine deals only with unicode
- encoding = get_stream_enc(sys.stdin, 'utf-8')
- line = py3compat.cast_unicode(line, encoding)
-
- if pattern is None:
- pattern = line_split
- match = pattern.match(line)
- if not match:
- # print "match failed for line '%s'" % line
- try:
- ifun, the_rest = line.split(None,1)
- except ValueError:
- # print "split failed for line '%s'" % line
- ifun, the_rest = line, u''
- pre = re.match('^(\s*)(.*)',line).groups()[0]
- esc = ""
- else:
- pre, esc, ifun, the_rest = match.groups()
-
- #print 'line:<%s>' % line # dbg
- #print 'pre <%s> ifun <%s> rest <%s>' % (pre,ifun.strip(),the_rest) # dbg
- return pre, esc or '', ifun.strip(), the_rest.lstrip()
-
-
-class LineInfo(object):
- """A single line of input and associated info.
-
- Includes the following as properties:
-
- line
- The original, raw line
-
- continue_prompt
- Is this line a continuation in a sequence of multiline input?
-
- pre
- Any leading whitespace.
-
- esc
- The escape character(s) in pre or the empty string if there isn't one.
- Note that '!!' and '??' are possible values for esc. Otherwise it will
- always be a single character.
-
- ifun
- The 'function part', which is basically the maximal initial sequence
- of valid python identifiers and the '.' character. This is what is
- checked for alias and magic transformations, used for auto-calling,
- etc. In contrast to Python identifiers, it may start with "%" and contain
- "*".
-
- the_rest
- Everything else on the line.
- """
- def __init__(self, line, continue_prompt=False):
- self.line = line
- self.continue_prompt = continue_prompt
- self.pre, self.esc, self.ifun, self.the_rest = split_user_input(line)
-
- self.pre_char = self.pre.strip()
- if self.pre_char:
- self.pre_whitespace = '' # No whitespace allowd before esc chars
- else:
- self.pre_whitespace = self.pre
-
- def ofind(self, ip):
- """Do a full, attribute-walking lookup of the ifun in the various
- namespaces for the given IPython InteractiveShell instance.
-
- Return a dict with keys: {found, obj, ospace, ismagic}
-
- Note: can cause state changes because of calling getattr, but should
- only be run if autocall is on and if the line hasn't matched any
- other, less dangerous handlers.
-
- Does cache the results of the call, so can be called multiple times
- without worrying about *further* damaging state.
- """
- return ip._ofind(self.ifun)
-
- def __str__(self):
- return "LineInfo [%s|%s|%s|%s]" %(self.pre, self.esc, self.ifun, self.the_rest)
+# encoding: utf-8
+"""
+Simple utility for splitting user input. This is used by both inputsplitter and
+prefilter.
+
+Authors:
+
+* Brian Granger
+* Fernando Perez
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import re
+import sys
+
+from IPython.utils import py3compat
+from IPython.utils.encoding import get_stream_enc
+
+#-----------------------------------------------------------------------------
+# Main function
+#-----------------------------------------------------------------------------
+
+# RegExp for splitting line contents into pre-char//first word-method//rest.
+# For clarity, each group in on one line.
+
+# WARNING: update the regexp if the escapes in interactiveshell are changed, as
+# they are hardwired in.
+
+# Although it's not solely driven by the regex, note that:
+# ,;/% only trigger if they are the first character on the line
+# ! and !! trigger if they are first char(s) *or* follow an indent
+# ? triggers as first or last char.
+
+line_split = re.compile("""
+ ^(\s*) # any leading space
+ ([,;/%]|!!?|\?\??)? # escape character or characters
+ \s*(%{0,2}[\w\.\*]*) # function/method, possibly with leading %
+ # to correctly treat things like '?%magic'
+ (.*?$|$) # rest of line
+ """, re.VERBOSE)
+
+
+def split_user_input(line, pattern=None):
+ """Split user input into initial whitespace, escape character, function part
+ and the rest.
+ """
+ # We need to ensure that the rest of this routine deals only with unicode
+ encoding = get_stream_enc(sys.stdin, 'utf-8')
+ line = py3compat.cast_unicode(line, encoding)
+
+ if pattern is None:
+ pattern = line_split
+ match = pattern.match(line)
+ if not match:
+ # print "match failed for line '%s'" % line
+ try:
+ ifun, the_rest = line.split(None,1)
+ except ValueError:
+ # print "split failed for line '%s'" % line
+ ifun, the_rest = line, u''
+ pre = re.match('^(\s*)(.*)',line).groups()[0]
+ esc = ""
+ else:
+ pre, esc, ifun, the_rest = match.groups()
+
+ #print 'line:<%s>' % line # dbg
+ #print 'pre <%s> ifun <%s> rest <%s>' % (pre,ifun.strip(),the_rest) # dbg
+ return pre, esc or '', ifun.strip(), the_rest.lstrip()
+
+
+class LineInfo(object):
+ """A single line of input and associated info.
+
+ Includes the following as properties:
+
+ line
+ The original, raw line
+
+ continue_prompt
+ Is this line a continuation in a sequence of multiline input?
+
+ pre
+ Any leading whitespace.
+
+ esc
+ The escape character(s) in pre or the empty string if there isn't one.
+ Note that '!!' and '??' are possible values for esc. Otherwise it will
+ always be a single character.
+
+ ifun
+ The 'function part', which is basically the maximal initial sequence
+ of valid python identifiers and the '.' character. This is what is
+ checked for alias and magic transformations, used for auto-calling,
+ etc. In contrast to Python identifiers, it may start with "%" and contain
+ "*".
+
+ the_rest
+ Everything else on the line.
+ """
+ def __init__(self, line, continue_prompt=False):
+ self.line = line
+ self.continue_prompt = continue_prompt
+ self.pre, self.esc, self.ifun, self.the_rest = split_user_input(line)
+
+ self.pre_char = self.pre.strip()
+ if self.pre_char:
+ self.pre_whitespace = '' # No whitespace allowd before esc chars
+ else:
+ self.pre_whitespace = self.pre
+
+ def ofind(self, ip):
+ """Do a full, attribute-walking lookup of the ifun in the various
+ namespaces for the given IPython InteractiveShell instance.
+
+ Return a dict with keys: {found, obj, ospace, ismagic}
+
+ Note: can cause state changes because of calling getattr, but should
+ only be run if autocall is on and if the line hasn't matched any
+ other, less dangerous handlers.
+
+ Does cache the results of the call, so can be called multiple times
+ without worrying about *further* damaging state.
+ """
+ return ip._ofind(self.ifun)
+
+ def __str__(self):
+ return "LineInfo [%s|%s|%s|%s]" %(self.pre, self.esc, self.ifun, self.the_rest)
diff --git a/contrib/python/ipython/py2/IPython/core/ultratb.py b/contrib/python/ipython/py2/IPython/core/ultratb.py
index 4fc1e9574b..a855145825 100644
--- a/contrib/python/ipython/py2/IPython/core/ultratb.py
+++ b/contrib/python/ipython/py2/IPython/core/ultratb.py
@@ -1,1499 +1,1499 @@
-# -*- coding: utf-8 -*-
-"""
-Verbose and colourful traceback formatting.
-
-**ColorTB**
-
-I've always found it a bit hard to visually parse tracebacks in Python. The
-ColorTB class is a solution to that problem. It colors the different parts of a
-traceback in a manner similar to what you would expect from a syntax-highlighting
-text editor.
-
-Installation instructions for ColorTB::
-
- import sys,ultratb
- sys.excepthook = ultratb.ColorTB()
-
-**VerboseTB**
-
-I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds
-of useful info when a traceback occurs. Ping originally had it spit out HTML
-and intended it for CGI programmers, but why should they have all the fun? I
-altered it to spit out colored text to the terminal. It's a bit overwhelming,
-but kind of neat, and maybe useful for long-running programs that you believe
-are bug-free. If a crash *does* occur in that type of program you want details.
-Give it a shot--you'll love it or you'll hate it.
-
-.. note::
-
- The Verbose mode prints the variables currently visible where the exception
- happened (shortening their strings if too long). This can potentially be
- very slow, if you happen to have a huge data structure whose string
- representation is complex to compute. Your computer may appear to freeze for
- a while with cpu usage at 100%. If this occurs, you can cancel the traceback
- with Ctrl-C (maybe hitting it more than once).
-
- If you encounter this kind of situation often, you may want to use the
- Verbose_novars mode instead of the regular Verbose, which avoids formatting
- variables (but otherwise includes the information and context given by
- Verbose).
-
+# -*- coding: utf-8 -*-
+"""
+Verbose and colourful traceback formatting.
+
+**ColorTB**
+
+I've always found it a bit hard to visually parse tracebacks in Python. The
+ColorTB class is a solution to that problem. It colors the different parts of a
+traceback in a manner similar to what you would expect from a syntax-highlighting
+text editor.
+
+Installation instructions for ColorTB::
+
+ import sys,ultratb
+ sys.excepthook = ultratb.ColorTB()
+
+**VerboseTB**
+
+I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds
+of useful info when a traceback occurs. Ping originally had it spit out HTML
+and intended it for CGI programmers, but why should they have all the fun? I
+altered it to spit out colored text to the terminal. It's a bit overwhelming,
+but kind of neat, and maybe useful for long-running programs that you believe
+are bug-free. If a crash *does* occur in that type of program you want details.
+Give it a shot--you'll love it or you'll hate it.
+
+.. note::
+
+ The Verbose mode prints the variables currently visible where the exception
+ happened (shortening their strings if too long). This can potentially be
+ very slow, if you happen to have a huge data structure whose string
+ representation is complex to compute. Your computer may appear to freeze for
+ a while with cpu usage at 100%. If this occurs, you can cancel the traceback
+ with Ctrl-C (maybe hitting it more than once).
+
+ If you encounter this kind of situation often, you may want to use the
+ Verbose_novars mode instead of the regular Verbose, which avoids formatting
+ variables (but otherwise includes the information and context given by
+ Verbose).
+
.. note::
-
+
The verbose mode print all variables in the stack, which means it can
potentially leak sensitive information like access keys, or unencryted
password.
-Installation instructions for VerboseTB::
-
- import sys,ultratb
- sys.excepthook = ultratb.VerboseTB()
-
-Note: Much of the code in this module was lifted verbatim from the standard
-library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'.
-
-Color schemes
--------------
-
-The colors are defined in the class TBTools through the use of the
-ColorSchemeTable class. Currently the following exist:
-
- - NoColor: allows all of this module to be used in any terminal (the color
- escapes are just dummy blank strings).
-
- - Linux: is meant to look good in a terminal like the Linux console (black
- or very dark background).
-
- - LightBG: similar to Linux but swaps dark/light colors to be more readable
- in light background terminals.
-
+Installation instructions for VerboseTB::
+
+ import sys,ultratb
+ sys.excepthook = ultratb.VerboseTB()
+
+Note: Much of the code in this module was lifted verbatim from the standard
+library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'.
+
+Color schemes
+-------------
+
+The colors are defined in the class TBTools through the use of the
+ColorSchemeTable class. Currently the following exist:
+
+ - NoColor: allows all of this module to be used in any terminal (the color
+ escapes are just dummy blank strings).
+
+ - Linux: is meant to look good in a terminal like the Linux console (black
+ or very dark background).
+
+ - LightBG: similar to Linux but swaps dark/light colors to be more readable
+ in light background terminals.
+
- Neutral: a neutral color scheme that should be readable on both light and
dark background
-You can implement other color schemes easily, the syntax is fairly
-self-explanatory. Please send back new schemes you develop to the author for
-possible inclusion in future releases.
-
-Inheritance diagram:
-
-.. inheritance-diagram:: IPython.core.ultratb
- :parts: 3
-"""
-
-#*****************************************************************************
-# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
-# Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#*****************************************************************************
-
+You can implement other color schemes easily, the syntax is fairly
+self-explanatory. Please send back new schemes you develop to the author for
+possible inclusion in future releases.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.core.ultratb
+ :parts: 3
+"""
+
+#*****************************************************************************
+# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
+# Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
from __future__ import absolute_import
-from __future__ import unicode_literals
-from __future__ import print_function
-
-import dis
-import inspect
-import keyword
-import linecache
-import os
-import pydoc
-import re
-import sys
-import time
-import tokenize
-import traceback
-import types
-
-try: # Python 2
- generate_tokens = tokenize.generate_tokens
-except AttributeError: # Python 3
- generate_tokens = tokenize.tokenize
-
-# For purposes of monkeypatching inspect to fix a bug in it.
-from inspect import getsourcefile, getfile, getmodule, \
- ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode
-
-# IPython's own modules
-from IPython import get_ipython
-from IPython.core import debugger
-from IPython.core.display_trap import DisplayTrap
-from IPython.core.excolors import exception_colors
-from IPython.utils import PyColorize
-from IPython.utils import openpy
-from IPython.utils import path as util_path
-from IPython.utils import py3compat
-from IPython.utils import ulinecache
-from IPython.utils.data import uniq_stable
+from __future__ import unicode_literals
+from __future__ import print_function
+
+import dis
+import inspect
+import keyword
+import linecache
+import os
+import pydoc
+import re
+import sys
+import time
+import tokenize
+import traceback
+import types
+
+try: # Python 2
+ generate_tokens = tokenize.generate_tokens
+except AttributeError: # Python 3
+ generate_tokens = tokenize.tokenize
+
+# For purposes of monkeypatching inspect to fix a bug in it.
+from inspect import getsourcefile, getfile, getmodule, \
+ ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode
+
+# IPython's own modules
+from IPython import get_ipython
+from IPython.core import debugger
+from IPython.core.display_trap import DisplayTrap
+from IPython.core.excolors import exception_colors
+from IPython.utils import PyColorize
+from IPython.utils import openpy
+from IPython.utils import path as util_path
+from IPython.utils import py3compat
+from IPython.utils import ulinecache
+from IPython.utils.data import uniq_stable
from IPython.utils.terminal import get_terminal_size
from logging import info, error
-
+
import IPython.utils.colorable as colorable
-# Globals
-# amount of space to put line numbers before verbose tracebacks
-INDENT_SIZE = 8
-
-# Default color scheme. This is used, for example, by the traceback
-# formatter. When running in an actual IPython instance, the user's rc.colors
-# value is used, but having a module global makes this functionality available
-# to users of ultratb who are NOT running inside ipython.
-DEFAULT_SCHEME = 'NoColor'
-
-# ---------------------------------------------------------------------------
-# Code begins
-
-# Utility functions
-def inspect_error():
- """Print a message about internal inspect errors.
-
- These are unfortunately quite common."""
-
- error('Internal Python error in the inspect module.\n'
- 'Below is the traceback from this internal error.\n')
-
-
-# This function is a monkeypatch we apply to the Python inspect module. We have
-# now found when it's needed (see discussion on issue gh-1456), and we have a
-# test case (IPython.core.tests.test_ultratb.ChangedPyFileTest) that fails if
-# the monkeypatch is not applied. TK, Aug 2012.
-def findsource(object):
- """Return the entire source file and starting line number for an object.
-
- The argument may be a module, class, method, function, traceback, frame,
- or code object. The source code is returned as a list of all the lines
- in the file and the line number indexes a line in that list. An IOError
- is raised if the source code cannot be retrieved.
-
- FIXED version with which we monkeypatch the stdlib to work around a bug."""
-
- file = getsourcefile(object) or getfile(object)
- # If the object is a frame, then trying to get the globals dict from its
- # module won't work. Instead, the frame object itself has the globals
- # dictionary.
- globals_dict = None
- if inspect.isframe(object):
- # XXX: can this ever be false?
- globals_dict = object.f_globals
- else:
- module = getmodule(object, file)
- if module:
- globals_dict = module.__dict__
- lines = linecache.getlines(file, globals_dict)
- if not lines:
- raise IOError('could not get source code')
-
- if ismodule(object):
- return lines, 0
-
- if isclass(object):
- name = object.__name__
- pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
- # make some effort to find the best matching class definition:
- # use the one with the least indentation, which is the one
- # that's most probably not inside a function definition.
- candidates = []
- for i in range(len(lines)):
- match = pat.match(lines[i])
- if match:
- # if it's at toplevel, it's already the best one
- if lines[i][0] == 'c':
- return lines, i
- # else add whitespace to candidate list
- candidates.append((match.group(1), i))
- if candidates:
- # this will sort by whitespace, and by line number,
- # less whitespace first
- candidates.sort()
- return lines, candidates[0][1]
- else:
- raise IOError('could not find class definition')
-
- if ismethod(object):
- object = object.__func__
- if isfunction(object):
- object = object.__code__
- if istraceback(object):
- object = object.tb_frame
- if isframe(object):
- object = object.f_code
- if iscode(object):
- if not hasattr(object, 'co_firstlineno'):
- raise IOError('could not find function definition')
- pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
- pmatch = pat.match
- # fperez - fix: sometimes, co_firstlineno can give a number larger than
- # the length of lines, which causes an error. Safeguard against that.
- lnum = min(object.co_firstlineno, len(lines)) - 1
- while lnum > 0:
+# Globals
+# amount of space to put line numbers before verbose tracebacks
+INDENT_SIZE = 8
+
+# Default color scheme. This is used, for example, by the traceback
+# formatter. When running in an actual IPython instance, the user's rc.colors
+# value is used, but having a module global makes this functionality available
+# to users of ultratb who are NOT running inside ipython.
+DEFAULT_SCHEME = 'NoColor'
+
+# ---------------------------------------------------------------------------
+# Code begins
+
+# Utility functions
+def inspect_error():
+ """Print a message about internal inspect errors.
+
+ These are unfortunately quite common."""
+
+ error('Internal Python error in the inspect module.\n'
+ 'Below is the traceback from this internal error.\n')
+
+
+# This function is a monkeypatch we apply to the Python inspect module. We have
+# now found when it's needed (see discussion on issue gh-1456), and we have a
+# test case (IPython.core.tests.test_ultratb.ChangedPyFileTest) that fails if
+# the monkeypatch is not applied. TK, Aug 2012.
+def findsource(object):
+ """Return the entire source file and starting line number for an object.
+
+ The argument may be a module, class, method, function, traceback, frame,
+ or code object. The source code is returned as a list of all the lines
+ in the file and the line number indexes a line in that list. An IOError
+ is raised if the source code cannot be retrieved.
+
+ FIXED version with which we monkeypatch the stdlib to work around a bug."""
+
+ file = getsourcefile(object) or getfile(object)
+ # If the object is a frame, then trying to get the globals dict from its
+ # module won't work. Instead, the frame object itself has the globals
+ # dictionary.
+ globals_dict = None
+ if inspect.isframe(object):
+ # XXX: can this ever be false?
+ globals_dict = object.f_globals
+ else:
+ module = getmodule(object, file)
+ if module:
+ globals_dict = module.__dict__
+ lines = linecache.getlines(file, globals_dict)
+ if not lines:
+ raise IOError('could not get source code')
+
+ if ismodule(object):
+ return lines, 0
+
+ if isclass(object):
+ name = object.__name__
+ pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
+ # make some effort to find the best matching class definition:
+ # use the one with the least indentation, which is the one
+ # that's most probably not inside a function definition.
+ candidates = []
+ for i in range(len(lines)):
+ match = pat.match(lines[i])
+ if match:
+ # if it's at toplevel, it's already the best one
+ if lines[i][0] == 'c':
+ return lines, i
+ # else add whitespace to candidate list
+ candidates.append((match.group(1), i))
+ if candidates:
+ # this will sort by whitespace, and by line number,
+ # less whitespace first
+ candidates.sort()
+ return lines, candidates[0][1]
+ else:
+ raise IOError('could not find class definition')
+
+ if ismethod(object):
+ object = object.__func__
+ if isfunction(object):
+ object = object.__code__
+ if istraceback(object):
+ object = object.tb_frame
+ if isframe(object):
+ object = object.f_code
+ if iscode(object):
+ if not hasattr(object, 'co_firstlineno'):
+ raise IOError('could not find function definition')
+ pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
+ pmatch = pat.match
+ # fperez - fix: sometimes, co_firstlineno can give a number larger than
+ # the length of lines, which causes an error. Safeguard against that.
+ lnum = min(object.co_firstlineno, len(lines)) - 1
+ while lnum > 0:
if pmatch(lines[lnum]):
break
- lnum -= 1
-
- return lines, lnum
- raise IOError('could not find code object')
-
-
-# This is a patched version of inspect.getargs that applies the (unmerged)
-# patch for http://bugs.python.org/issue14611 by Stefano Taschini. This fixes
-# https://github.com/ipython/ipython/issues/8205 and
-# https://github.com/ipython/ipython/issues/8293
-def getargs(co):
- """Get information about the arguments accepted by a code object.
-
- Three things are returned: (args, varargs, varkw), where 'args' is
- a list of argument names (possibly containing nested lists), and
- 'varargs' and 'varkw' are the names of the * and ** arguments or None."""
- if not iscode(co):
- raise TypeError('{!r} is not a code object'.format(co))
-
- nargs = co.co_argcount
- names = co.co_varnames
- args = list(names[:nargs])
- step = 0
-
- # The following acrobatics are for anonymous (tuple) arguments.
- for i in range(nargs):
- if args[i][:1] in ('', '.'):
- stack, remain, count = [], [], []
- while step < len(co.co_code):
- op = ord(co.co_code[step])
- step = step + 1
- if op >= dis.HAVE_ARGUMENT:
- opname = dis.opname[op]
- value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256
- step = step + 2
- if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
- remain.append(value)
- count.append(value)
- elif opname in ('STORE_FAST', 'STORE_DEREF'):
- if op in dis.haslocal:
- stack.append(co.co_varnames[value])
- elif op in dis.hasfree:
- stack.append((co.co_cellvars + co.co_freevars)[value])
- # Special case for sublists of length 1: def foo((bar))
- # doesn't generate the UNPACK_TUPLE bytecode, so if
- # `remain` is empty here, we have such a sublist.
- if not remain:
- stack[0] = [stack[0]]
- break
- else:
- remain[-1] = remain[-1] - 1
- while remain[-1] == 0:
- remain.pop()
- size = count.pop()
- stack[-size:] = [stack[-size:]]
+ lnum -= 1
+
+ return lines, lnum
+ raise IOError('could not find code object')
+
+
+# This is a patched version of inspect.getargs that applies the (unmerged)
+# patch for http://bugs.python.org/issue14611 by Stefano Taschini. This fixes
+# https://github.com/ipython/ipython/issues/8205 and
+# https://github.com/ipython/ipython/issues/8293
+def getargs(co):
+ """Get information about the arguments accepted by a code object.
+
+ Three things are returned: (args, varargs, varkw), where 'args' is
+ a list of argument names (possibly containing nested lists), and
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None."""
+ if not iscode(co):
+ raise TypeError('{!r} is not a code object'.format(co))
+
+ nargs = co.co_argcount
+ names = co.co_varnames
+ args = list(names[:nargs])
+ step = 0
+
+ # The following acrobatics are for anonymous (tuple) arguments.
+ for i in range(nargs):
+ if args[i][:1] in ('', '.'):
+ stack, remain, count = [], [], []
+ while step < len(co.co_code):
+ op = ord(co.co_code[step])
+ step = step + 1
+ if op >= dis.HAVE_ARGUMENT:
+ opname = dis.opname[op]
+ value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256
+ step = step + 2
+ if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
+ remain.append(value)
+ count.append(value)
+ elif opname in ('STORE_FAST', 'STORE_DEREF'):
+ if op in dis.haslocal:
+ stack.append(co.co_varnames[value])
+ elif op in dis.hasfree:
+ stack.append((co.co_cellvars + co.co_freevars)[value])
+ # Special case for sublists of length 1: def foo((bar))
+ # doesn't generate the UNPACK_TUPLE bytecode, so if
+ # `remain` is empty here, we have such a sublist.
+ if not remain:
+ stack[0] = [stack[0]]
+ break
+ else:
+ remain[-1] = remain[-1] - 1
+ while remain[-1] == 0:
+ remain.pop()
+ size = count.pop()
+ stack[-size:] = [stack[-size:]]
if not remain:
break
- remain[-1] = remain[-1] - 1
+ remain[-1] = remain[-1] - 1
if not remain:
break
- args[i] = stack[0]
-
- varargs = None
- if co.co_flags & inspect.CO_VARARGS:
- varargs = co.co_varnames[nargs]
- nargs = nargs + 1
- varkw = None
- if co.co_flags & inspect.CO_VARKEYWORDS:
- varkw = co.co_varnames[nargs]
- return inspect.Arguments(args, varargs, varkw)
-
-
-# Monkeypatch inspect to apply our bugfix.
-def with_patch_inspect(f):
- """decorator for monkeypatching inspect.findsource"""
-
- def wrapped(*args, **kwargs):
- save_findsource = inspect.findsource
- save_getargs = inspect.getargs
- inspect.findsource = findsource
- inspect.getargs = getargs
- try:
- return f(*args, **kwargs)
- finally:
- inspect.findsource = save_findsource
- inspect.getargs = save_getargs
-
- return wrapped
-
-
-if py3compat.PY3:
- fixed_getargvalues = inspect.getargvalues
-else:
- # Fixes for https://github.com/ipython/ipython/issues/8293
- # and https://github.com/ipython/ipython/issues/8205.
- # The relevant bug is caused by failure to correctly handle anonymous tuple
- # unpacking, which only exists in Python 2.
- fixed_getargvalues = with_patch_inspect(inspect.getargvalues)
-
-
-def fix_frame_records_filenames(records):
- """Try to fix the filenames in each record from inspect.getinnerframes().
-
- Particularly, modules loaded from within zip files have useless filenames
- attached to their code object, and inspect.getinnerframes() just uses it.
- """
- fixed_records = []
- for frame, filename, line_no, func_name, lines, index in records:
- # Look inside the frame's globals dictionary for __file__,
- # which should be better. However, keep Cython filenames since
- # we prefer the source filenames over the compiled .so file.
- filename = py3compat.cast_unicode_py2(filename, "utf-8")
- if not filename.endswith(('.pyx', '.pxd', '.pxi')):
- better_fn = frame.f_globals.get('__file__', None)
- if isinstance(better_fn, str):
- # Check the type just in case someone did something weird with
- # __file__. It might also be None if the error occurred during
- # import.
- filename = better_fn
- fixed_records.append((frame, filename, line_no, func_name, lines, index))
- return fixed_records
-
-
-@with_patch_inspect
-def _fixed_getinnerframes(etb, context=1, tb_offset=0):
- LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
-
- records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))
- # If the error is at the console, don't build any context, since it would
- # otherwise produce 5 blank lines printed out (there is no file at the
- # console)
- rec_check = records[tb_offset:]
- try:
- rname = rec_check[0][1]
- if rname == '<ipython console>' or rname.endswith('<string>'):
- return rec_check
- except IndexError:
- pass
-
- aux = traceback.extract_tb(etb)
- assert len(records) == len(aux)
- for i, (file, lnum, _, _) in zip(range(len(records)), aux):
- maybeStart = lnum - 1 - context // 2
- start = max(maybeStart, 0)
- end = start + context
- lines = ulinecache.getlines(file)[start:end]
- buf = list(records[i])
- buf[LNUM_POS] = lnum
- buf[INDEX_POS] = lnum - 1 - start
- buf[LINES_POS] = lines
- records[i] = tuple(buf)
- return records[tb_offset:]
-
-# Helper function -- largely belongs to VerboseTB, but we need the same
-# functionality to produce a pseudo verbose TB for SyntaxErrors, so that they
-# can be recognized properly by ipython.el's py-traceback-line-re
-# (SyntaxErrors have to be treated specially because they have no traceback)
-
-_parser = PyColorize.Parser()
-
-
-def _format_traceback_lines(lnum, index, lines, Colors, lvals=None, scheme=None):
- numbers_width = INDENT_SIZE - 1
- res = []
- i = lnum - index
-
- # This lets us get fully syntax-highlighted tracebacks.
- if scheme is None:
- ipinst = get_ipython()
- if ipinst is not None:
- scheme = ipinst.colors
- else:
- scheme = DEFAULT_SCHEME
-
- _line_format = _parser.format2
-
- for line in lines:
- line = py3compat.cast_unicode(line)
-
- new_line, err = _line_format(line, 'str', scheme)
- if not err: line = new_line
-
- if i == lnum:
- # This is the line with the error
- pad = numbers_width - len(str(i))
+ args[i] = stack[0]
+
+ varargs = None
+ if co.co_flags & inspect.CO_VARARGS:
+ varargs = co.co_varnames[nargs]
+ nargs = nargs + 1
+ varkw = None
+ if co.co_flags & inspect.CO_VARKEYWORDS:
+ varkw = co.co_varnames[nargs]
+ return inspect.Arguments(args, varargs, varkw)
+
+
+# Monkeypatch inspect to apply our bugfix.
+def with_patch_inspect(f):
+ """decorator for monkeypatching inspect.findsource"""
+
+ def wrapped(*args, **kwargs):
+ save_findsource = inspect.findsource
+ save_getargs = inspect.getargs
+ inspect.findsource = findsource
+ inspect.getargs = getargs
+ try:
+ return f(*args, **kwargs)
+ finally:
+ inspect.findsource = save_findsource
+ inspect.getargs = save_getargs
+
+ return wrapped
+
+
+if py3compat.PY3:
+ fixed_getargvalues = inspect.getargvalues
+else:
+ # Fixes for https://github.com/ipython/ipython/issues/8293
+ # and https://github.com/ipython/ipython/issues/8205.
+ # The relevant bug is caused by failure to correctly handle anonymous tuple
+ # unpacking, which only exists in Python 2.
+ fixed_getargvalues = with_patch_inspect(inspect.getargvalues)
+
+
+def fix_frame_records_filenames(records):
+ """Try to fix the filenames in each record from inspect.getinnerframes().
+
+ Particularly, modules loaded from within zip files have useless filenames
+ attached to their code object, and inspect.getinnerframes() just uses it.
+ """
+ fixed_records = []
+ for frame, filename, line_no, func_name, lines, index in records:
+ # Look inside the frame's globals dictionary for __file__,
+ # which should be better. However, keep Cython filenames since
+ # we prefer the source filenames over the compiled .so file.
+ filename = py3compat.cast_unicode_py2(filename, "utf-8")
+ if not filename.endswith(('.pyx', '.pxd', '.pxi')):
+ better_fn = frame.f_globals.get('__file__', None)
+ if isinstance(better_fn, str):
+ # Check the type just in case someone did something weird with
+ # __file__. It might also be None if the error occurred during
+ # import.
+ filename = better_fn
+ fixed_records.append((frame, filename, line_no, func_name, lines, index))
+ return fixed_records
+
+
+@with_patch_inspect
+def _fixed_getinnerframes(etb, context=1, tb_offset=0):
+ LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
+
+ records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))
+ # If the error is at the console, don't build any context, since it would
+ # otherwise produce 5 blank lines printed out (there is no file at the
+ # console)
+ rec_check = records[tb_offset:]
+ try:
+ rname = rec_check[0][1]
+ if rname == '<ipython console>' or rname.endswith('<string>'):
+ return rec_check
+ except IndexError:
+ pass
+
+ aux = traceback.extract_tb(etb)
+ assert len(records) == len(aux)
+ for i, (file, lnum, _, _) in zip(range(len(records)), aux):
+ maybeStart = lnum - 1 - context // 2
+ start = max(maybeStart, 0)
+ end = start + context
+ lines = ulinecache.getlines(file)[start:end]
+ buf = list(records[i])
+ buf[LNUM_POS] = lnum
+ buf[INDEX_POS] = lnum - 1 - start
+ buf[LINES_POS] = lines
+ records[i] = tuple(buf)
+ return records[tb_offset:]
+
+# Helper function -- largely belongs to VerboseTB, but we need the same
+# functionality to produce a pseudo verbose TB for SyntaxErrors, so that they
+# can be recognized properly by ipython.el's py-traceback-line-re
+# (SyntaxErrors have to be treated specially because they have no traceback)
+
+_parser = PyColorize.Parser()
+
+
+def _format_traceback_lines(lnum, index, lines, Colors, lvals=None, scheme=None):
+ numbers_width = INDENT_SIZE - 1
+ res = []
+ i = lnum - index
+
+ # This lets us get fully syntax-highlighted tracebacks.
+ if scheme is None:
+ ipinst = get_ipython()
+ if ipinst is not None:
+ scheme = ipinst.colors
+ else:
+ scheme = DEFAULT_SCHEME
+
+ _line_format = _parser.format2
+
+ for line in lines:
+ line = py3compat.cast_unicode(line)
+
+ new_line, err = _line_format(line, 'str', scheme)
+ if not err: line = new_line
+
+ if i == lnum:
+ # This is the line with the error
+ pad = numbers_width - len(str(i))
num = '%s%s' % (debugger.make_arrow(pad), str(lnum))
- line = '%s%s%s %s%s' % (Colors.linenoEm, num,
- Colors.line, line, Colors.Normal)
- else:
- num = '%*s' % (numbers_width, i)
- line = '%s%s%s %s' % (Colors.lineno, num,
- Colors.Normal, line)
-
- res.append(line)
- if lvals and i == lnum:
- res.append(lvals + '\n')
- i = i + 1
- return res
-
-def is_recursion_error(etype, value, records):
- try:
- # RecursionError is new in Python 3.5
- recursion_error_type = RecursionError
- except NameError:
- recursion_error_type = RuntimeError
-
- # The default recursion limit is 1000, but some of that will be taken up
- # by stack frames in IPython itself. >500 frames probably indicates
- # a recursion error.
- return (etype is recursion_error_type) \
+ line = '%s%s%s %s%s' % (Colors.linenoEm, num,
+ Colors.line, line, Colors.Normal)
+ else:
+ num = '%*s' % (numbers_width, i)
+ line = '%s%s%s %s' % (Colors.lineno, num,
+ Colors.Normal, line)
+
+ res.append(line)
+ if lvals and i == lnum:
+ res.append(lvals + '\n')
+ i = i + 1
+ return res
+
+def is_recursion_error(etype, value, records):
+ try:
+ # RecursionError is new in Python 3.5
+ recursion_error_type = RecursionError
+ except NameError:
+ recursion_error_type = RuntimeError
+
+ # The default recursion limit is 1000, but some of that will be taken up
+ # by stack frames in IPython itself. >500 frames probably indicates
+ # a recursion error.
+ return (etype is recursion_error_type) \
and str("recursion") in str(value).lower() \
- and len(records) > 500
-
-def find_recursion(etype, value, records):
- """Identify the repeating stack frames from a RecursionError traceback
-
- 'records' is a list as returned by VerboseTB.get_records()
-
- Returns (last_unique, repeat_length)
- """
- # This involves a bit of guesswork - we want to show enough of the traceback
- # to indicate where the recursion is occurring. We guess that the innermost
- # quarter of the traceback (250 frames by default) is repeats, and find the
- # first frame (from in to out) that looks different.
- if not is_recursion_error(etype, value, records):
- return len(records), 0
-
- # Select filename, lineno, func_name to track frames with
- records = [r[1:4] for r in records]
- inner_frames = records[-(len(records)//4):]
- frames_repeated = set(inner_frames)
-
- last_seen_at = {}
- longest_repeat = 0
- i = len(records)
- for frame in reversed(records):
- i -= 1
- if frame not in frames_repeated:
- last_unique = i
- break
-
- if frame in last_seen_at:
- distance = last_seen_at[frame] - i
- longest_repeat = max(longest_repeat, distance)
-
- last_seen_at[frame] = i
- else:
- last_unique = 0 # The whole traceback was recursion
-
- return last_unique, longest_repeat
-
-#---------------------------------------------------------------------------
-# Module classes
+ and len(records) > 500
+
+def find_recursion(etype, value, records):
+ """Identify the repeating stack frames from a RecursionError traceback
+
+ 'records' is a list as returned by VerboseTB.get_records()
+
+ Returns (last_unique, repeat_length)
+ """
+ # This involves a bit of guesswork - we want to show enough of the traceback
+ # to indicate where the recursion is occurring. We guess that the innermost
+ # quarter of the traceback (250 frames by default) is repeats, and find the
+ # first frame (from in to out) that looks different.
+ if not is_recursion_error(etype, value, records):
+ return len(records), 0
+
+ # Select filename, lineno, func_name to track frames with
+ records = [r[1:4] for r in records]
+ inner_frames = records[-(len(records)//4):]
+ frames_repeated = set(inner_frames)
+
+ last_seen_at = {}
+ longest_repeat = 0
+ i = len(records)
+ for frame in reversed(records):
+ i -= 1
+ if frame not in frames_repeated:
+ last_unique = i
+ break
+
+ if frame in last_seen_at:
+ distance = last_seen_at[frame] - i
+ longest_repeat = max(longest_repeat, distance)
+
+ last_seen_at[frame] = i
+ else:
+ last_unique = 0 # The whole traceback was recursion
+
+ return last_unique, longest_repeat
+
+#---------------------------------------------------------------------------
+# Module classes
class TBTools(colorable.Colorable):
- """Basic tools used by all traceback printer classes."""
-
- # Number of frames to skip when reporting tracebacks
- tb_offset = 0
-
+ """Basic tools used by all traceback printer classes."""
+
+ # Number of frames to skip when reporting tracebacks
+ tb_offset = 0
+
def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None, parent=None, config=None):
- # Whether to call the interactive pdb debugger after printing
- # tracebacks or not
+ # Whether to call the interactive pdb debugger after printing
+ # tracebacks or not
super(TBTools, self).__init__(parent=parent, config=config)
- self.call_pdb = call_pdb
-
- # Output stream to write to. Note that we store the original value in
- # a private attribute and then make the public ostream a property, so
+ self.call_pdb = call_pdb
+
+ # Output stream to write to. Note that we store the original value in
+ # a private attribute and then make the public ostream a property, so
# that we can delay accessing sys.stdout until runtime. The way
# things are written now, the sys.stdout object is dynamically managed
- # so a reference to it should NEVER be stored statically. This
- # property approach confines this detail to a single location, and all
- # subclasses can simply access self.ostream for writing.
- self._ostream = ostream
-
- # Create color table
- self.color_scheme_table = exception_colors()
-
- self.set_colors(color_scheme)
- self.old_scheme = color_scheme # save initial value for toggles
-
- if call_pdb:
+ # so a reference to it should NEVER be stored statically. This
+ # property approach confines this detail to a single location, and all
+ # subclasses can simply access self.ostream for writing.
+ self._ostream = ostream
+
+ # Create color table
+ self.color_scheme_table = exception_colors()
+
+ self.set_colors(color_scheme)
+ self.old_scheme = color_scheme # save initial value for toggles
+
+ if call_pdb:
self.pdb = debugger.Pdb()
- else:
- self.pdb = None
-
- def _get_ostream(self):
- """Output stream that exceptions are written to.
-
- Valid values are:
-
- - None: the default, which means that IPython will dynamically resolve
+ else:
+ self.pdb = None
+
+ def _get_ostream(self):
+ """Output stream that exceptions are written to.
+
+ Valid values are:
+
+ - None: the default, which means that IPython will dynamically resolve
to sys.stdout. This ensures compatibility with most tools, including
- Windows (where plain stdout doesn't recognize ANSI escapes).
-
- - Any object with 'write' and 'flush' attributes.
- """
+ Windows (where plain stdout doesn't recognize ANSI escapes).
+
+ - Any object with 'write' and 'flush' attributes.
+ """
return sys.stdout if self._ostream is None else self._ostream
-
- def _set_ostream(self, val):
- assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush'))
- self._ostream = val
-
- ostream = property(_get_ostream, _set_ostream)
-
- def set_colors(self, *args, **kw):
- """Shorthand access to the color table scheme selector method."""
-
- # Set own color table
- self.color_scheme_table.set_active_scheme(*args, **kw)
- # for convenience, set Colors to the active scheme
- self.Colors = self.color_scheme_table.active_colors
- # Also set colors of debugger
- if hasattr(self, 'pdb') and self.pdb is not None:
- self.pdb.set_colors(*args, **kw)
-
- def color_toggle(self):
- """Toggle between the currently active color scheme and NoColor."""
-
- if self.color_scheme_table.active_scheme_name == 'NoColor':
- self.color_scheme_table.set_active_scheme(self.old_scheme)
- self.Colors = self.color_scheme_table.active_colors
- else:
- self.old_scheme = self.color_scheme_table.active_scheme_name
- self.color_scheme_table.set_active_scheme('NoColor')
- self.Colors = self.color_scheme_table.active_colors
-
- def stb2text(self, stb):
- """Convert a structured traceback (a list) to a string."""
- return '\n'.join(stb)
-
- def text(self, etype, value, tb, tb_offset=None, context=5):
- """Return formatted traceback.
-
- Subclasses may override this if they add extra arguments.
- """
- tb_list = self.structured_traceback(etype, value, tb,
- tb_offset, context)
- return self.stb2text(tb_list)
-
- def structured_traceback(self, etype, evalue, tb, tb_offset=None,
- context=5, mode=None):
- """Return a list of traceback frames.
-
- Must be implemented by each class.
- """
- raise NotImplementedError()
-
-
-#---------------------------------------------------------------------------
-class ListTB(TBTools):
- """Print traceback information from a traceback list, with optional color.
-
- Calling requires 3 arguments: (etype, evalue, elist)
- as would be obtained by::
-
- etype, evalue, tb = sys.exc_info()
- if tb:
- elist = traceback.extract_tb(tb)
- else:
- elist = None
-
- It can thus be used by programs which need to process the traceback before
- printing (such as console replacements based on the code module from the
- standard library).
-
- Because they are meant to be called without a full traceback (only a
- list), instances of this class can't call the interactive pdb debugger."""
-
+
+ def _set_ostream(self, val):
+ assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush'))
+ self._ostream = val
+
+ ostream = property(_get_ostream, _set_ostream)
+
+ def set_colors(self, *args, **kw):
+ """Shorthand access to the color table scheme selector method."""
+
+ # Set own color table
+ self.color_scheme_table.set_active_scheme(*args, **kw)
+ # for convenience, set Colors to the active scheme
+ self.Colors = self.color_scheme_table.active_colors
+ # Also set colors of debugger
+ if hasattr(self, 'pdb') and self.pdb is not None:
+ self.pdb.set_colors(*args, **kw)
+
+ def color_toggle(self):
+ """Toggle between the currently active color scheme and NoColor."""
+
+ if self.color_scheme_table.active_scheme_name == 'NoColor':
+ self.color_scheme_table.set_active_scheme(self.old_scheme)
+ self.Colors = self.color_scheme_table.active_colors
+ else:
+ self.old_scheme = self.color_scheme_table.active_scheme_name
+ self.color_scheme_table.set_active_scheme('NoColor')
+ self.Colors = self.color_scheme_table.active_colors
+
+ def stb2text(self, stb):
+ """Convert a structured traceback (a list) to a string."""
+ return '\n'.join(stb)
+
+ def text(self, etype, value, tb, tb_offset=None, context=5):
+ """Return formatted traceback.
+
+ Subclasses may override this if they add extra arguments.
+ """
+ tb_list = self.structured_traceback(etype, value, tb,
+ tb_offset, context)
+ return self.stb2text(tb_list)
+
+ def structured_traceback(self, etype, evalue, tb, tb_offset=None,
+ context=5, mode=None):
+ """Return a list of traceback frames.
+
+ Must be implemented by each class.
+ """
+ raise NotImplementedError()
+
+
+#---------------------------------------------------------------------------
+class ListTB(TBTools):
+ """Print traceback information from a traceback list, with optional color.
+
+ Calling requires 3 arguments: (etype, evalue, elist)
+ as would be obtained by::
+
+ etype, evalue, tb = sys.exc_info()
+ if tb:
+ elist = traceback.extract_tb(tb)
+ else:
+ elist = None
+
+ It can thus be used by programs which need to process the traceback before
+ printing (such as console replacements based on the code module from the
+ standard library).
+
+ Because they are meant to be called without a full traceback (only a
+ list), instances of this class can't call the interactive pdb debugger."""
+
def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None, parent=None):
- TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
+ TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
ostream=ostream, parent=parent)
-
- def __call__(self, etype, value, elist):
- self.ostream.flush()
- self.ostream.write(self.text(etype, value, elist))
- self.ostream.write('\n')
-
- def structured_traceback(self, etype, value, elist, tb_offset=None,
- context=5):
- """Return a color formatted string with the traceback info.
-
- Parameters
- ----------
- etype : exception type
- Type of the exception raised.
-
- value : object
- Data stored in the exception
-
- elist : list
- List of frames, see class docstring for details.
-
- tb_offset : int, optional
- Number of frames in the traceback to skip. If not given, the
- instance value is used (set in constructor).
-
- context : int, optional
- Number of lines of context information to print.
-
- Returns
- -------
- String with formatted exception.
- """
- tb_offset = self.tb_offset if tb_offset is None else tb_offset
- Colors = self.Colors
- out_list = []
- if elist:
-
- if tb_offset and len(elist) > tb_offset:
- elist = elist[tb_offset:]
-
- out_list.append('Traceback %s(most recent call last)%s:' %
- (Colors.normalEm, Colors.Normal) + '\n')
- out_list.extend(self._format_list(elist))
- # The exception info should be a single entry in the list.
- lines = ''.join(self._format_exception_only(etype, value))
- out_list.append(lines)
-
- # Note: this code originally read:
-
- ## for line in lines[:-1]:
- ## out_list.append(" "+line)
- ## out_list.append(lines[-1])
-
- # This means it was indenting everything but the last line by a little
- # bit. I've disabled this for now, but if we see ugliness somewhere we
- # can restore it.
-
- return out_list
-
- def _format_list(self, extracted_list):
- """Format a list of traceback entry tuples for printing.
-
- Given a list of tuples as returned by extract_tb() or
- extract_stack(), return a list of strings ready for printing.
- Each string in the resulting list corresponds to the item with the
- same index in the argument list. Each string ends in a newline;
- the strings may contain internal newlines as well, for those items
- whose source text line is not None.
-
- Lifted almost verbatim from traceback.py
- """
-
- Colors = self.Colors
- list = []
- for filename, lineno, name, line in extracted_list[:-1]:
- item = ' File %s"%s"%s, line %s%d%s, in %s%s%s\n' % \
- (Colors.filename, py3compat.cast_unicode_py2(filename, "utf-8"), Colors.Normal,
- Colors.lineno, lineno, Colors.Normal,
- Colors.name, py3compat.cast_unicode_py2(name, "utf-8"), Colors.Normal)
- if line:
- item += ' %s\n' % line.strip()
- list.append(item)
- # Emphasize the last entry
- filename, lineno, name, line = extracted_list[-1]
- item = '%s File %s"%s"%s, line %s%d%s, in %s%s%s%s\n' % \
- (Colors.normalEm,
- Colors.filenameEm, py3compat.cast_unicode_py2(filename, "utf-8"), Colors.normalEm,
- Colors.linenoEm, lineno, Colors.normalEm,
- Colors.nameEm, py3compat.cast_unicode_py2(name, "utf-8"), Colors.normalEm,
- Colors.Normal)
- if line:
- item += '%s %s%s\n' % (Colors.line, line.strip(),
- Colors.Normal)
- list.append(item)
- return list
-
- def _format_exception_only(self, etype, value):
- """Format the exception part of a traceback.
-
- The arguments are the exception type and value such as given by
- sys.exc_info()[:2]. The return value is a list of strings, each ending
- in a newline. Normally, the list contains a single string; however,
- for SyntaxError exceptions, it contains several lines that (when
- printed) display detailed information about where the syntax error
- occurred. The message indicating which exception occurred is the
- always last string in the list.
-
- Also lifted nearly verbatim from traceback.py
- """
- have_filedata = False
- Colors = self.Colors
- list = []
+
+ def __call__(self, etype, value, elist):
+ self.ostream.flush()
+ self.ostream.write(self.text(etype, value, elist))
+ self.ostream.write('\n')
+
+ def structured_traceback(self, etype, value, elist, tb_offset=None,
+ context=5):
+ """Return a color formatted string with the traceback info.
+
+ Parameters
+ ----------
+ etype : exception type
+ Type of the exception raised.
+
+ value : object
+ Data stored in the exception
+
+ elist : list
+ List of frames, see class docstring for details.
+
+ tb_offset : int, optional
+ Number of frames in the traceback to skip. If not given, the
+ instance value is used (set in constructor).
+
+ context : int, optional
+ Number of lines of context information to print.
+
+ Returns
+ -------
+ String with formatted exception.
+ """
+ tb_offset = self.tb_offset if tb_offset is None else tb_offset
+ Colors = self.Colors
+ out_list = []
+ if elist:
+
+ if tb_offset and len(elist) > tb_offset:
+ elist = elist[tb_offset:]
+
+ out_list.append('Traceback %s(most recent call last)%s:' %
+ (Colors.normalEm, Colors.Normal) + '\n')
+ out_list.extend(self._format_list(elist))
+ # The exception info should be a single entry in the list.
+ lines = ''.join(self._format_exception_only(etype, value))
+ out_list.append(lines)
+
+ # Note: this code originally read:
+
+ ## for line in lines[:-1]:
+ ## out_list.append(" "+line)
+ ## out_list.append(lines[-1])
+
+ # This means it was indenting everything but the last line by a little
+ # bit. I've disabled this for now, but if we see ugliness somewhere we
+ # can restore it.
+
+ return out_list
+
+ def _format_list(self, extracted_list):
+ """Format a list of traceback entry tuples for printing.
+
+ Given a list of tuples as returned by extract_tb() or
+ extract_stack(), return a list of strings ready for printing.
+ Each string in the resulting list corresponds to the item with the
+ same index in the argument list. Each string ends in a newline;
+ the strings may contain internal newlines as well, for those items
+ whose source text line is not None.
+
+ Lifted almost verbatim from traceback.py
+ """
+
+ Colors = self.Colors
+ list = []
+ for filename, lineno, name, line in extracted_list[:-1]:
+ item = ' File %s"%s"%s, line %s%d%s, in %s%s%s\n' % \
+ (Colors.filename, py3compat.cast_unicode_py2(filename, "utf-8"), Colors.Normal,
+ Colors.lineno, lineno, Colors.Normal,
+ Colors.name, py3compat.cast_unicode_py2(name, "utf-8"), Colors.Normal)
+ if line:
+ item += ' %s\n' % line.strip()
+ list.append(item)
+ # Emphasize the last entry
+ filename, lineno, name, line = extracted_list[-1]
+ item = '%s File %s"%s"%s, line %s%d%s, in %s%s%s%s\n' % \
+ (Colors.normalEm,
+ Colors.filenameEm, py3compat.cast_unicode_py2(filename, "utf-8"), Colors.normalEm,
+ Colors.linenoEm, lineno, Colors.normalEm,
+ Colors.nameEm, py3compat.cast_unicode_py2(name, "utf-8"), Colors.normalEm,
+ Colors.Normal)
+ if line:
+ item += '%s %s%s\n' % (Colors.line, line.strip(),
+ Colors.Normal)
+ list.append(item)
+ return list
+
+ def _format_exception_only(self, etype, value):
+ """Format the exception part of a traceback.
+
+ The arguments are the exception type and value such as given by
+ sys.exc_info()[:2]. The return value is a list of strings, each ending
+ in a newline. Normally, the list contains a single string; however,
+ for SyntaxError exceptions, it contains several lines that (when
+ printed) display detailed information about where the syntax error
+ occurred. The message indicating which exception occurred is the
+ always last string in the list.
+
+ Also lifted nearly verbatim from traceback.py
+ """
+ have_filedata = False
+ Colors = self.Colors
+ list = []
stype = py3compat.cast_unicode(Colors.excName + etype.__name__ + Colors.Normal)
- if value is None:
- # Not sure if this can still happen in Python 2.6 and above
+ if value is None:
+ # Not sure if this can still happen in Python 2.6 and above
list.append(stype + '\n')
- else:
- if issubclass(etype, SyntaxError):
- have_filedata = True
- if not value.filename: value.filename = "<string>"
- if value.lineno:
- lineno = value.lineno
- textline = ulinecache.getline(value.filename, value.lineno)
- else:
- lineno = 'unknown'
- textline = ''
- list.append('%s File %s"%s"%s, line %s%s%s\n' % \
- (Colors.normalEm,
- Colors.filenameEm, py3compat.cast_unicode(value.filename), Colors.normalEm,
- Colors.linenoEm, lineno, Colors.Normal ))
- if textline == '':
- textline = py3compat.cast_unicode(value.text, "utf-8")
-
- if textline is not None:
- i = 0
- while i < len(textline) and textline[i].isspace():
- i += 1
- list.append('%s %s%s\n' % (Colors.line,
- textline.strip(),
- Colors.Normal))
- if value.offset is not None:
- s = ' '
- for c in textline[i:value.offset - 1]:
- if c.isspace():
- s += c
- else:
- s += ' '
- list.append('%s%s^%s\n' % (Colors.caret, s,
- Colors.Normal))
-
- try:
- s = value.msg
- except Exception:
- s = self._some_str(value)
- if s:
+ else:
+ if issubclass(etype, SyntaxError):
+ have_filedata = True
+ if not value.filename: value.filename = "<string>"
+ if value.lineno:
+ lineno = value.lineno
+ textline = ulinecache.getline(value.filename, value.lineno)
+ else:
+ lineno = 'unknown'
+ textline = ''
+ list.append('%s File %s"%s"%s, line %s%s%s\n' % \
+ (Colors.normalEm,
+ Colors.filenameEm, py3compat.cast_unicode(value.filename), Colors.normalEm,
+ Colors.linenoEm, lineno, Colors.Normal ))
+ if textline == '':
+ textline = py3compat.cast_unicode(value.text, "utf-8")
+
+ if textline is not None:
+ i = 0
+ while i < len(textline) and textline[i].isspace():
+ i += 1
+ list.append('%s %s%s\n' % (Colors.line,
+ textline.strip(),
+ Colors.Normal))
+ if value.offset is not None:
+ s = ' '
+ for c in textline[i:value.offset - 1]:
+ if c.isspace():
+ s += c
+ else:
+ s += ' '
+ list.append('%s%s^%s\n' % (Colors.caret, s,
+ Colors.Normal))
+
+ try:
+ s = value.msg
+ except Exception:
+ s = self._some_str(value)
+ if s:
list.append('%s%s:%s %s\n' % (stype, Colors.excName,
- Colors.Normal, s))
- else:
+ Colors.Normal, s))
+ else:
list.append('%s\n' % stype)
-
- # sync with user hooks
- if have_filedata:
- ipinst = get_ipython()
- if ipinst is not None:
- ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0)
-
- return list
-
- def get_exception_only(self, etype, value):
- """Only print the exception type and message, without a traceback.
-
- Parameters
- ----------
- etype : exception type
- value : exception value
- """
- return ListTB.structured_traceback(self, etype, value, [])
-
- def show_exception_only(self, etype, evalue):
- """Only print the exception type and message, without a traceback.
-
- Parameters
- ----------
- etype : exception type
- value : exception value
- """
- # This method needs to use __call__ from *this* class, not the one from
- # a subclass whose signature or behavior may be different
- ostream = self.ostream
- ostream.flush()
- ostream.write('\n'.join(self.get_exception_only(etype, evalue)))
- ostream.flush()
-
- def _some_str(self, value):
- # Lifted from traceback.py
- try:
+
+ # sync with user hooks
+ if have_filedata:
+ ipinst = get_ipython()
+ if ipinst is not None:
+ ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0)
+
+ return list
+
+ def get_exception_only(self, etype, value):
+ """Only print the exception type and message, without a traceback.
+
+ Parameters
+ ----------
+ etype : exception type
+ value : exception value
+ """
+ return ListTB.structured_traceback(self, etype, value, [])
+
+ def show_exception_only(self, etype, evalue):
+ """Only print the exception type and message, without a traceback.
+
+ Parameters
+ ----------
+ etype : exception type
+ value : exception value
+ """
+ # This method needs to use __call__ from *this* class, not the one from
+ # a subclass whose signature or behavior may be different
+ ostream = self.ostream
+ ostream.flush()
+ ostream.write('\n'.join(self.get_exception_only(etype, evalue)))
+ ostream.flush()
+
+ def _some_str(self, value):
+ # Lifted from traceback.py
+ try:
return py3compat.cast_unicode(str(value))
- except:
+ except:
return u'<unprintable %s object>' % type(value).__name__
-
-
-#----------------------------------------------------------------------------
-class VerboseTB(TBTools):
- """A port of Ka-Ping Yee's cgitb.py module that outputs color text instead
- of HTML. Requires inspect and pydoc. Crazy, man.
-
- Modified version which optionally strips the topmost entries from the
- traceback, to be used with alternate interpreters (because their own code
- would appear in the traceback)."""
-
- def __init__(self, color_scheme='Linux', call_pdb=False, ostream=None,
- tb_offset=0, long_header=False, include_vars=True,
+
+
+#----------------------------------------------------------------------------
+class VerboseTB(TBTools):
+ """A port of Ka-Ping Yee's cgitb.py module that outputs color text instead
+ of HTML. Requires inspect and pydoc. Crazy, man.
+
+ Modified version which optionally strips the topmost entries from the
+ traceback, to be used with alternate interpreters (because their own code
+ would appear in the traceback)."""
+
+ def __init__(self, color_scheme='Linux', call_pdb=False, ostream=None,
+ tb_offset=0, long_header=False, include_vars=True,
check_cache=None, debugger_cls = None):
- """Specify traceback offset, headers and color scheme.
-
- Define how many frames to drop from the tracebacks. Calling it with
- tb_offset=1 allows use of this handler in interpreters which will have
- their own code at the top of the traceback (VerboseTB will first
- remove that frame before printing the traceback info)."""
- TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
- ostream=ostream)
- self.tb_offset = tb_offset
- self.long_header = long_header
- self.include_vars = include_vars
- # By default we use linecache.checkcache, but the user can provide a
- # different check_cache implementation. This is used by the IPython
- # kernel to provide tracebacks for interactive code that is cached,
- # by a compiler instance that flushes the linecache but preserves its
- # own code cache.
- if check_cache is None:
- check_cache = linecache.checkcache
- self.check_cache = check_cache
-
+ """Specify traceback offset, headers and color scheme.
+
+ Define how many frames to drop from the tracebacks. Calling it with
+ tb_offset=1 allows use of this handler in interpreters which will have
+ their own code at the top of the traceback (VerboseTB will first
+ remove that frame before printing the traceback info)."""
+ TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
+ ostream=ostream)
+ self.tb_offset = tb_offset
+ self.long_header = long_header
+ self.include_vars = include_vars
+ # By default we use linecache.checkcache, but the user can provide a
+ # different check_cache implementation. This is used by the IPython
+ # kernel to provide tracebacks for interactive code that is cached,
+ # by a compiler instance that flushes the linecache but preserves its
+ # own code cache.
+ if check_cache is None:
+ check_cache = linecache.checkcache
+ self.check_cache = check_cache
+
self.debugger_cls = debugger_cls or debugger.Pdb
- def format_records(self, records, last_unique, recursion_repeat):
- """Format the stack frames of the traceback"""
- frames = []
- for r in records[:last_unique+recursion_repeat+1]:
- #print '*** record:',file,lnum,func,lines,index # dbg
- frames.append(self.format_record(*r))
-
- if recursion_repeat:
- frames.append('... last %d frames repeated, from the frame below ...\n' % recursion_repeat)
- frames.append(self.format_record(*records[last_unique+recursion_repeat+1]))
-
- return frames
-
- def format_record(self, frame, file, lnum, func, lines, index):
- """Format a single stack frame"""
- Colors = self.Colors # just a shorthand + quicker name lookup
- ColorsNormal = Colors.Normal # used a lot
- col_scheme = self.color_scheme_table.active_scheme_name
- indent = ' ' * INDENT_SIZE
- em_normal = '%s\n%s%s' % (Colors.valEm, indent, ColorsNormal)
- undefined = '%sundefined%s' % (Colors.em, ColorsNormal)
- tpl_link = '%s%%s%s' % (Colors.filenameEm, ColorsNormal)
- tpl_call = 'in %s%%s%s%%s%s' % (Colors.vName, Colors.valEm,
- ColorsNormal)
- tpl_call_fail = 'in %s%%s%s(***failed resolving arguments***)%s' % \
- (Colors.vName, Colors.valEm, ColorsNormal)
- tpl_local_var = '%s%%s%s' % (Colors.vName, ColorsNormal)
- tpl_global_var = '%sglobal%s %s%%s%s' % (Colors.em, ColorsNormal,
- Colors.vName, ColorsNormal)
- tpl_name_val = '%%s %s= %%s%s' % (Colors.valEm, ColorsNormal)
-
- tpl_line = '%s%%s%s %%s' % (Colors.lineno, ColorsNormal)
- tpl_line_em = '%s%%s%s %%s%s' % (Colors.linenoEm, Colors.line,
- ColorsNormal)
-
- abspath = os.path.abspath
-
-
- if not file:
- file = '?'
- elif file.startswith(str("<")) and file.endswith(str(">")):
- # Not a real filename, no problem...
- pass
- elif not os.path.isabs(file):
- # Try to make the filename absolute by trying all
- # sys.path entries (which is also what linecache does)
- for dirname in sys.path:
- try:
- fullname = os.path.join(dirname, file)
- if os.path.isfile(fullname):
- file = os.path.abspath(fullname)
- break
- except Exception:
- # Just in case that sys.path contains very
- # strange entries...
- pass
-
- file = py3compat.cast_unicode(file, util_path.fs_encoding)
- link = tpl_link % file
- args, varargs, varkw, locals = fixed_getargvalues(frame)
-
- if func == '?':
- call = ''
- else:
- # Decide whether to include variable details or not
- var_repr = self.include_vars and eqrepr or nullrepr
- try:
- call = tpl_call % (func, inspect.formatargvalues(args,
- varargs, varkw,
- locals, formatvalue=var_repr))
- except KeyError:
- # This happens in situations like errors inside generator
- # expressions, where local variables are listed in the
- # line, but can't be extracted from the frame. I'm not
- # 100% sure this isn't actually a bug in inspect itself,
- # but since there's no info for us to compute with, the
- # best we can do is report the failure and move on. Here
- # we must *not* call any traceback construction again,
- # because that would mess up use of %debug later on. So we
- # simply report the failure and move on. The only
- # limitation will be that this frame won't have locals
- # listed in the call signature. Quite subtle problem...
- # I can't think of a good way to validate this in a unit
- # test, but running a script consisting of:
- # dict( (k,v.strip()) for (k,v) in range(10) )
- # will illustrate the error, if this exception catch is
- # disabled.
- call = tpl_call_fail % func
-
- # Don't attempt to tokenize binary files.
- if file.endswith(('.so', '.pyd', '.dll')):
- return '%s %s\n' % (link, call)
-
- elif file.endswith(('.pyc', '.pyo')):
- # Look up the corresponding source file.
+ def format_records(self, records, last_unique, recursion_repeat):
+ """Format the stack frames of the traceback"""
+ frames = []
+ for r in records[:last_unique+recursion_repeat+1]:
+ #print '*** record:',file,lnum,func,lines,index # dbg
+ frames.append(self.format_record(*r))
+
+ if recursion_repeat:
+ frames.append('... last %d frames repeated, from the frame below ...\n' % recursion_repeat)
+ frames.append(self.format_record(*records[last_unique+recursion_repeat+1]))
+
+ return frames
+
+ def format_record(self, frame, file, lnum, func, lines, index):
+ """Format a single stack frame"""
+ Colors = self.Colors # just a shorthand + quicker name lookup
+ ColorsNormal = Colors.Normal # used a lot
+ col_scheme = self.color_scheme_table.active_scheme_name
+ indent = ' ' * INDENT_SIZE
+ em_normal = '%s\n%s%s' % (Colors.valEm, indent, ColorsNormal)
+ undefined = '%sundefined%s' % (Colors.em, ColorsNormal)
+ tpl_link = '%s%%s%s' % (Colors.filenameEm, ColorsNormal)
+ tpl_call = 'in %s%%s%s%%s%s' % (Colors.vName, Colors.valEm,
+ ColorsNormal)
+ tpl_call_fail = 'in %s%%s%s(***failed resolving arguments***)%s' % \
+ (Colors.vName, Colors.valEm, ColorsNormal)
+ tpl_local_var = '%s%%s%s' % (Colors.vName, ColorsNormal)
+ tpl_global_var = '%sglobal%s %s%%s%s' % (Colors.em, ColorsNormal,
+ Colors.vName, ColorsNormal)
+ tpl_name_val = '%%s %s= %%s%s' % (Colors.valEm, ColorsNormal)
+
+ tpl_line = '%s%%s%s %%s' % (Colors.lineno, ColorsNormal)
+ tpl_line_em = '%s%%s%s %%s%s' % (Colors.linenoEm, Colors.line,
+ ColorsNormal)
+
+ abspath = os.path.abspath
+
+
+ if not file:
+ file = '?'
+ elif file.startswith(str("<")) and file.endswith(str(">")):
+ # Not a real filename, no problem...
+ pass
+ elif not os.path.isabs(file):
+ # Try to make the filename absolute by trying all
+ # sys.path entries (which is also what linecache does)
+ for dirname in sys.path:
+ try:
+ fullname = os.path.join(dirname, file)
+ if os.path.isfile(fullname):
+ file = os.path.abspath(fullname)
+ break
+ except Exception:
+ # Just in case that sys.path contains very
+ # strange entries...
+ pass
+
+ file = py3compat.cast_unicode(file, util_path.fs_encoding)
+ link = tpl_link % file
+ args, varargs, varkw, locals = fixed_getargvalues(frame)
+
+ if func == '?':
+ call = ''
+ else:
+ # Decide whether to include variable details or not
+ var_repr = self.include_vars and eqrepr or nullrepr
+ try:
+ call = tpl_call % (func, inspect.formatargvalues(args,
+ varargs, varkw,
+ locals, formatvalue=var_repr))
+ except KeyError:
+ # This happens in situations like errors inside generator
+ # expressions, where local variables are listed in the
+ # line, but can't be extracted from the frame. I'm not
+ # 100% sure this isn't actually a bug in inspect itself,
+ # but since there's no info for us to compute with, the
+ # best we can do is report the failure and move on. Here
+ # we must *not* call any traceback construction again,
+ # because that would mess up use of %debug later on. So we
+ # simply report the failure and move on. The only
+ # limitation will be that this frame won't have locals
+ # listed in the call signature. Quite subtle problem...
+ # I can't think of a good way to validate this in a unit
+ # test, but running a script consisting of:
+ # dict( (k,v.strip()) for (k,v) in range(10) )
+ # will illustrate the error, if this exception catch is
+ # disabled.
+ call = tpl_call_fail % func
+
+ # Don't attempt to tokenize binary files.
+ if file.endswith(('.so', '.pyd', '.dll')):
+ return '%s %s\n' % (link, call)
+
+ elif file.endswith(('.pyc', '.pyo')):
+ # Look up the corresponding source file.
try:
file = openpy.source_from_cache(file)
except ValueError:
# Failed to get the source file for some reason
# E.g. https://github.com/ipython/ipython/issues/9486
return '%s %s\n' % (link, call)
-
- def linereader(file=file, lnum=[lnum], getline=ulinecache.getline):
- line = getline(file, lnum[0])
- lnum[0] += 1
- return line
-
- # Build the list of names on this line of code where the exception
- # occurred.
- try:
- names = []
- name_cont = False
-
- for token_type, token, start, end, line in generate_tokens(linereader):
- # build composite names
- if token_type == tokenize.NAME and token not in keyword.kwlist:
- if name_cont:
- # Continuation of a dotted name
- try:
- names[-1].append(token)
- except IndexError:
- names.append([token])
- name_cont = False
- else:
- # Regular new names. We append everything, the caller
- # will be responsible for pruning the list later. It's
- # very tricky to try to prune as we go, b/c composite
- # names can fool us. The pruning at the end is easy
- # to do (or the caller can print a list with repeated
- # names if so desired.
- names.append([token])
- elif token == '.':
- name_cont = True
- elif token_type == tokenize.NEWLINE:
- break
-
- except (IndexError, UnicodeDecodeError, SyntaxError):
- # signals exit of tokenizer
- # SyntaxError can occur if the file is not actually Python
- # - see gh-6300
- pass
- except tokenize.TokenError as msg:
- _m = ("An unexpected error occurred while tokenizing input\n"
- "The following traceback may be corrupted or invalid\n"
- "The error message is: %s\n" % msg)
- error(_m)
-
- # Join composite names (e.g. "dict.fromkeys")
- names = ['.'.join(n) for n in names]
- # prune names list of duplicates, but keep the right order
- unique_names = uniq_stable(names)
-
- # Start loop over vars
- lvals = []
- if self.include_vars:
- for name_full in unique_names:
- name_base = name_full.split('.', 1)[0]
- if name_base in frame.f_code.co_varnames:
- if name_base in locals:
- try:
- value = repr(eval(name_full, locals))
- except:
- value = undefined
- else:
- value = undefined
- name = tpl_local_var % name_full
- else:
- if name_base in frame.f_globals:
- try:
- value = repr(eval(name_full, frame.f_globals))
- except:
- value = undefined
- else:
- value = undefined
- name = tpl_global_var % name_full
- lvals.append(tpl_name_val % (name, value))
- if lvals:
- lvals = '%s%s' % (indent, em_normal.join(lvals))
- else:
- lvals = ''
-
- level = '%s %s\n' % (link, call)
-
- if index is None:
- return level
- else:
- return '%s%s' % (level, ''.join(
- _format_traceback_lines(lnum, index, lines, Colors, lvals,
- col_scheme)))
-
- def prepare_chained_exception_message(self, cause):
- direct_cause = "\nThe above exception was the direct cause of the following exception:\n"
- exception_during_handling = "\nDuring handling of the above exception, another exception occurred:\n"
-
- if cause:
- message = [[direct_cause]]
- else:
- message = [[exception_during_handling]]
- return message
-
- def prepare_header(self, etype, long_version=False):
- colors = self.Colors # just a shorthand + quicker name lookup
- colorsnormal = colors.Normal # used a lot
- exc = '%s%s%s' % (colors.excName, etype, colorsnormal)
+
+ def linereader(file=file, lnum=[lnum], getline=ulinecache.getline):
+ line = getline(file, lnum[0])
+ lnum[0] += 1
+ return line
+
+ # Build the list of names on this line of code where the exception
+ # occurred.
+ try:
+ names = []
+ name_cont = False
+
+ for token_type, token, start, end, line in generate_tokens(linereader):
+ # build composite names
+ if token_type == tokenize.NAME and token not in keyword.kwlist:
+ if name_cont:
+ # Continuation of a dotted name
+ try:
+ names[-1].append(token)
+ except IndexError:
+ names.append([token])
+ name_cont = False
+ else:
+ # Regular new names. We append everything, the caller
+ # will be responsible for pruning the list later. It's
+ # very tricky to try to prune as we go, b/c composite
+ # names can fool us. The pruning at the end is easy
+ # to do (or the caller can print a list with repeated
+ # names if so desired.
+ names.append([token])
+ elif token == '.':
+ name_cont = True
+ elif token_type == tokenize.NEWLINE:
+ break
+
+ except (IndexError, UnicodeDecodeError, SyntaxError):
+ # signals exit of tokenizer
+ # SyntaxError can occur if the file is not actually Python
+ # - see gh-6300
+ pass
+ except tokenize.TokenError as msg:
+ _m = ("An unexpected error occurred while tokenizing input\n"
+ "The following traceback may be corrupted or invalid\n"
+ "The error message is: %s\n" % msg)
+ error(_m)
+
+ # Join composite names (e.g. "dict.fromkeys")
+ names = ['.'.join(n) for n in names]
+ # prune names list of duplicates, but keep the right order
+ unique_names = uniq_stable(names)
+
+ # Start loop over vars
+ lvals = []
+ if self.include_vars:
+ for name_full in unique_names:
+ name_base = name_full.split('.', 1)[0]
+ if name_base in frame.f_code.co_varnames:
+ if name_base in locals:
+ try:
+ value = repr(eval(name_full, locals))
+ except:
+ value = undefined
+ else:
+ value = undefined
+ name = tpl_local_var % name_full
+ else:
+ if name_base in frame.f_globals:
+ try:
+ value = repr(eval(name_full, frame.f_globals))
+ except:
+ value = undefined
+ else:
+ value = undefined
+ name = tpl_global_var % name_full
+ lvals.append(tpl_name_val % (name, value))
+ if lvals:
+ lvals = '%s%s' % (indent, em_normal.join(lvals))
+ else:
+ lvals = ''
+
+ level = '%s %s\n' % (link, call)
+
+ if index is None:
+ return level
+ else:
+ return '%s%s' % (level, ''.join(
+ _format_traceback_lines(lnum, index, lines, Colors, lvals,
+ col_scheme)))
+
+ def prepare_chained_exception_message(self, cause):
+ direct_cause = "\nThe above exception was the direct cause of the following exception:\n"
+ exception_during_handling = "\nDuring handling of the above exception, another exception occurred:\n"
+
+ if cause:
+ message = [[direct_cause]]
+ else:
+ message = [[exception_during_handling]]
+ return message
+
+ def prepare_header(self, etype, long_version=False):
+ colors = self.Colors # just a shorthand + quicker name lookup
+ colorsnormal = colors.Normal # used a lot
+ exc = '%s%s%s' % (colors.excName, etype, colorsnormal)
width = min(75, get_terminal_size()[0])
- if long_version:
- # Header with the exception type, python version, and date
- pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
- date = time.ctime(time.time())
-
+ if long_version:
+ # Header with the exception type, python version, and date
+ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
+ date = time.ctime(time.time())
+
head = '%s%s%s\n%s%s%s\n%s' % (colors.topline, '-' * width, colorsnormal,
exc, ' ' * (width - len(str(etype)) - len(pyver)),
pyver, date.rjust(width) )
- head += "\nA problem occurred executing Python code. Here is the sequence of function" \
- "\ncalls leading up to the error, with the most recent (innermost) call last."
- else:
- # Simplified header
- head = '%s%s' % (exc, 'Traceback (most recent call last)'. \
+ head += "\nA problem occurred executing Python code. Here is the sequence of function" \
+ "\ncalls leading up to the error, with the most recent (innermost) call last."
+ else:
+ # Simplified header
+ head = '%s%s' % (exc, 'Traceback (most recent call last)'. \
rjust(width - len(str(etype))) )
-
- return head
-
- def format_exception(self, etype, evalue):
- colors = self.Colors # just a shorthand + quicker name lookup
- colorsnormal = colors.Normal # used a lot
- indent = ' ' * INDENT_SIZE
- # Get (safely) a string form of the exception info
- try:
- etype_str, evalue_str = map(str, (etype, evalue))
- except:
- # User exception is improperly defined.
- etype, evalue = str, sys.exc_info()[:2]
- etype_str, evalue_str = map(str, (etype, evalue))
- # ... and format it
- exception = ['%s%s%s: %s' % (colors.excName, etype_str,
- colorsnormal, py3compat.cast_unicode(evalue_str))]
-
- if (not py3compat.PY3) and type(evalue) is types.InstanceType:
- try:
- names = [w for w in dir(evalue) if isinstance(w, py3compat.string_types)]
- except:
- # Every now and then, an object with funny internals blows up
- # when dir() is called on it. We do the best we can to report
- # the problem and continue
- _m = '%sException reporting error (object with broken dir())%s:'
- exception.append(_m % (colors.excName, colorsnormal))
- etype_str, evalue_str = map(str, sys.exc_info()[:2])
- exception.append('%s%s%s: %s' % (colors.excName, etype_str,
- colorsnormal, py3compat.cast_unicode(evalue_str)))
- names = []
- for name in names:
- value = text_repr(getattr(evalue, name))
- exception.append('\n%s%s = %s' % (indent, name, value))
-
- return exception
-
- def format_exception_as_a_whole(self, etype, evalue, etb, number_of_lines_of_context, tb_offset):
- """Formats the header, traceback and exception message for a single exception.
-
- This may be called multiple times by Python 3 exception chaining
- (PEP 3134).
- """
- # some locals
- orig_etype = etype
- try:
- etype = etype.__name__
- except AttributeError:
- pass
-
- tb_offset = self.tb_offset if tb_offset is None else tb_offset
- head = self.prepare_header(etype, self.long_header)
- records = self.get_records(etb, number_of_lines_of_context, tb_offset)
-
- if records is None:
- return ""
-
- last_unique, recursion_repeat = find_recursion(orig_etype, evalue, records)
-
- frames = self.format_records(records, last_unique, recursion_repeat)
-
- formatted_exception = self.format_exception(etype, evalue)
- if records:
- filepath, lnum = records[-1][1:3]
- filepath = os.path.abspath(filepath)
- ipinst = get_ipython()
- if ipinst is not None:
- ipinst.hooks.synchronize_with_editor(filepath, lnum, 0)
-
- return [[head] + frames + [''.join(formatted_exception[0])]]
-
- def get_records(self, etb, number_of_lines_of_context, tb_offset):
- try:
- # Try the default getinnerframes and Alex's: Alex's fixes some
- # problems, but it generates empty tracebacks for console errors
- # (5 blanks lines) where none should be returned.
- return _fixed_getinnerframes(etb, number_of_lines_of_context, tb_offset)
+
+ return head
+
+ def format_exception(self, etype, evalue):
+ colors = self.Colors # just a shorthand + quicker name lookup
+ colorsnormal = colors.Normal # used a lot
+ indent = ' ' * INDENT_SIZE
+ # Get (safely) a string form of the exception info
+ try:
+ etype_str, evalue_str = map(str, (etype, evalue))
+ except:
+ # User exception is improperly defined.
+ etype, evalue = str, sys.exc_info()[:2]
+ etype_str, evalue_str = map(str, (etype, evalue))
+ # ... and format it
+ exception = ['%s%s%s: %s' % (colors.excName, etype_str,
+ colorsnormal, py3compat.cast_unicode(evalue_str))]
+
+ if (not py3compat.PY3) and type(evalue) is types.InstanceType:
+ try:
+ names = [w for w in dir(evalue) if isinstance(w, py3compat.string_types)]
+ except:
+ # Every now and then, an object with funny internals blows up
+ # when dir() is called on it. We do the best we can to report
+ # the problem and continue
+ _m = '%sException reporting error (object with broken dir())%s:'
+ exception.append(_m % (colors.excName, colorsnormal))
+ etype_str, evalue_str = map(str, sys.exc_info()[:2])
+ exception.append('%s%s%s: %s' % (colors.excName, etype_str,
+ colorsnormal, py3compat.cast_unicode(evalue_str)))
+ names = []
+ for name in names:
+ value = text_repr(getattr(evalue, name))
+ exception.append('\n%s%s = %s' % (indent, name, value))
+
+ return exception
+
+ def format_exception_as_a_whole(self, etype, evalue, etb, number_of_lines_of_context, tb_offset):
+ """Formats the header, traceback and exception message for a single exception.
+
+ This may be called multiple times by Python 3 exception chaining
+ (PEP 3134).
+ """
+ # some locals
+ orig_etype = etype
+ try:
+ etype = etype.__name__
+ except AttributeError:
+ pass
+
+ tb_offset = self.tb_offset if tb_offset is None else tb_offset
+ head = self.prepare_header(etype, self.long_header)
+ records = self.get_records(etb, number_of_lines_of_context, tb_offset)
+
+ if records is None:
+ return ""
+
+ last_unique, recursion_repeat = find_recursion(orig_etype, evalue, records)
+
+ frames = self.format_records(records, last_unique, recursion_repeat)
+
+ formatted_exception = self.format_exception(etype, evalue)
+ if records:
+ filepath, lnum = records[-1][1:3]
+ filepath = os.path.abspath(filepath)
+ ipinst = get_ipython()
+ if ipinst is not None:
+ ipinst.hooks.synchronize_with_editor(filepath, lnum, 0)
+
+ return [[head] + frames + [''.join(formatted_exception[0])]]
+
+ def get_records(self, etb, number_of_lines_of_context, tb_offset):
+ try:
+ # Try the default getinnerframes and Alex's: Alex's fixes some
+ # problems, but it generates empty tracebacks for console errors
+ # (5 blanks lines) where none should be returned.
+ return _fixed_getinnerframes(etb, number_of_lines_of_context, tb_offset)
except UnicodeDecodeError:
# This can occur if a file's encoding magic comment is wrong.
# I can't see a way to recover without duplicating a bunch of code
# from the stdlib traceback module. --TK
error('\nUnicodeDecodeError while processing traceback.\n')
return None
- except:
- # FIXME: I've been getting many crash reports from python 2.3
- # users, traceable to inspect.py. If I can find a small test-case
- # to reproduce this, I should either write a better workaround or
- # file a bug report against inspect (if that's the real problem).
- # So far, I haven't been able to find an isolated example to
- # reproduce the problem.
- inspect_error()
- traceback.print_exc(file=self.ostream)
- info('\nUnfortunately, your original traceback can not be constructed.\n')
- return None
-
- def get_parts_of_chained_exception(self, evalue):
- def get_chained_exception(exception_value):
- cause = getattr(exception_value, '__cause__', None)
- if cause:
- return cause
- if getattr(exception_value, '__suppress_context__', False):
- return None
- return getattr(exception_value, '__context__', None)
-
- chained_evalue = get_chained_exception(evalue)
-
- if chained_evalue:
- return chained_evalue.__class__, chained_evalue, chained_evalue.__traceback__
-
- def structured_traceback(self, etype, evalue, etb, tb_offset=None,
- number_of_lines_of_context=5):
- """Return a nice text document describing the traceback."""
-
- formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context,
- tb_offset)
-
- colors = self.Colors # just a shorthand + quicker name lookup
- colorsnormal = colors.Normal # used a lot
+ except:
+ # FIXME: I've been getting many crash reports from python 2.3
+ # users, traceable to inspect.py. If I can find a small test-case
+ # to reproduce this, I should either write a better workaround or
+ # file a bug report against inspect (if that's the real problem).
+ # So far, I haven't been able to find an isolated example to
+ # reproduce the problem.
+ inspect_error()
+ traceback.print_exc(file=self.ostream)
+ info('\nUnfortunately, your original traceback can not be constructed.\n')
+ return None
+
+ def get_parts_of_chained_exception(self, evalue):
+ def get_chained_exception(exception_value):
+ cause = getattr(exception_value, '__cause__', None)
+ if cause:
+ return cause
+ if getattr(exception_value, '__suppress_context__', False):
+ return None
+ return getattr(exception_value, '__context__', None)
+
+ chained_evalue = get_chained_exception(evalue)
+
+ if chained_evalue:
+ return chained_evalue.__class__, chained_evalue, chained_evalue.__traceback__
+
+ def structured_traceback(self, etype, evalue, etb, tb_offset=None,
+ number_of_lines_of_context=5):
+ """Return a nice text document describing the traceback."""
+
+ formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context,
+ tb_offset)
+
+ colors = self.Colors # just a shorthand + quicker name lookup
+ colorsnormal = colors.Normal # used a lot
head = '%s%s%s' % (colors.topline, '-' * min(75, get_terminal_size()[0]), colorsnormal)
- structured_traceback_parts = [head]
- if py3compat.PY3:
- chained_exceptions_tb_offset = 0
- lines_of_context = 3
- formatted_exceptions = formatted_exception
- exception = self.get_parts_of_chained_exception(evalue)
- if exception:
- formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
- etype, evalue, etb = exception
- else:
- evalue = None
- chained_exc_ids = set()
- while evalue:
- formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context,
- chained_exceptions_tb_offset)
- exception = self.get_parts_of_chained_exception(evalue)
-
- if exception and not id(exception[1]) in chained_exc_ids:
- chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop
- formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
- etype, evalue, etb = exception
- else:
- evalue = None
-
- # we want to see exceptions in a reversed order:
- # the first exception should be on top
- for formatted_exception in reversed(formatted_exceptions):
- structured_traceback_parts += formatted_exception
- else:
- structured_traceback_parts += formatted_exception[0]
-
- return structured_traceback_parts
-
- def debugger(self, force=False):
- """Call up the pdb debugger if desired, always clean up the tb
- reference.
-
- Keywords:
-
- - force(False): by default, this routine checks the instance call_pdb
- flag and does not actually invoke the debugger if the flag is false.
- The 'force' option forces the debugger to activate even if the flag
- is false.
-
- If the call_pdb flag is set, the pdb interactive debugger is
- invoked. In all cases, the self.tb reference to the current traceback
- is deleted to prevent lingering references which hamper memory
- management.
-
- Note that each call to pdb() does an 'import readline', so if your app
- requires a special setup for the readline completers, you'll have to
- fix that by hand after invoking the exception handler."""
-
- if force or self.call_pdb:
- if self.pdb is None:
+ structured_traceback_parts = [head]
+ if py3compat.PY3:
+ chained_exceptions_tb_offset = 0
+ lines_of_context = 3
+ formatted_exceptions = formatted_exception
+ exception = self.get_parts_of_chained_exception(evalue)
+ if exception:
+ formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
+ etype, evalue, etb = exception
+ else:
+ evalue = None
+ chained_exc_ids = set()
+ while evalue:
+ formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context,
+ chained_exceptions_tb_offset)
+ exception = self.get_parts_of_chained_exception(evalue)
+
+ if exception and not id(exception[1]) in chained_exc_ids:
+ chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop
+ formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
+ etype, evalue, etb = exception
+ else:
+ evalue = None
+
+ # we want to see exceptions in a reversed order:
+ # the first exception should be on top
+ for formatted_exception in reversed(formatted_exceptions):
+ structured_traceback_parts += formatted_exception
+ else:
+ structured_traceback_parts += formatted_exception[0]
+
+ return structured_traceback_parts
+
+ def debugger(self, force=False):
+ """Call up the pdb debugger if desired, always clean up the tb
+ reference.
+
+ Keywords:
+
+ - force(False): by default, this routine checks the instance call_pdb
+ flag and does not actually invoke the debugger if the flag is false.
+ The 'force' option forces the debugger to activate even if the flag
+ is false.
+
+ If the call_pdb flag is set, the pdb interactive debugger is
+ invoked. In all cases, the self.tb reference to the current traceback
+ is deleted to prevent lingering references which hamper memory
+ management.
+
+ Note that each call to pdb() does an 'import readline', so if your app
+ requires a special setup for the readline completers, you'll have to
+ fix that by hand after invoking the exception handler."""
+
+ if force or self.call_pdb:
+ if self.pdb is None:
self.pdb = self.debugger_cls()
- # the system displayhook may have changed, restore the original
- # for pdb
- display_trap = DisplayTrap(hook=sys.__displayhook__)
- with display_trap:
- self.pdb.reset()
- # Find the right frame so we don't pop up inside ipython itself
- if hasattr(self, 'tb') and self.tb is not None:
- etb = self.tb
- else:
- etb = self.tb = sys.last_traceback
- while self.tb is not None and self.tb.tb_next is not None:
- self.tb = self.tb.tb_next
- if etb and etb.tb_next:
- etb = etb.tb_next
- self.pdb.botframe = etb.tb_frame
- self.pdb.interaction(self.tb.tb_frame, self.tb)
-
- if hasattr(self, 'tb'):
- del self.tb
-
- def handler(self, info=None):
- (etype, evalue, etb) = info or sys.exc_info()
- self.tb = etb
- ostream = self.ostream
- ostream.flush()
- ostream.write(self.text(etype, evalue, etb))
- ostream.write('\n')
- ostream.flush()
-
- # Changed so an instance can just be called as VerboseTB_inst() and print
- # out the right info on its own.
- def __call__(self, etype=None, evalue=None, etb=None):
- """This hook can replace sys.excepthook (for Python 2.1 or higher)."""
- if etb is None:
- self.handler()
- else:
- self.handler((etype, evalue, etb))
- try:
- self.debugger()
- except KeyboardInterrupt:
- print("\nKeyboardInterrupt")
-
-
-#----------------------------------------------------------------------------
-class FormattedTB(VerboseTB, ListTB):
- """Subclass ListTB but allow calling with a traceback.
-
- It can thus be used as a sys.excepthook for Python > 2.1.
-
- Also adds 'Context' and 'Verbose' modes, not available in ListTB.
-
- Allows a tb_offset to be specified. This is useful for situations where
- one needs to remove a number of topmost frames from the traceback (such as
- occurs with python programs that themselves execute other python code,
- like Python shells). """
-
- def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False,
- ostream=None,
- tb_offset=0, long_header=False, include_vars=False,
+ # the system displayhook may have changed, restore the original
+ # for pdb
+ display_trap = DisplayTrap(hook=sys.__displayhook__)
+ with display_trap:
+ self.pdb.reset()
+ # Find the right frame so we don't pop up inside ipython itself
+ if hasattr(self, 'tb') and self.tb is not None:
+ etb = self.tb
+ else:
+ etb = self.tb = sys.last_traceback
+ while self.tb is not None and self.tb.tb_next is not None:
+ self.tb = self.tb.tb_next
+ if etb and etb.tb_next:
+ etb = etb.tb_next
+ self.pdb.botframe = etb.tb_frame
+ self.pdb.interaction(self.tb.tb_frame, self.tb)
+
+ if hasattr(self, 'tb'):
+ del self.tb
+
+ def handler(self, info=None):
+ (etype, evalue, etb) = info or sys.exc_info()
+ self.tb = etb
+ ostream = self.ostream
+ ostream.flush()
+ ostream.write(self.text(etype, evalue, etb))
+ ostream.write('\n')
+ ostream.flush()
+
+ # Changed so an instance can just be called as VerboseTB_inst() and print
+ # out the right info on its own.
+ def __call__(self, etype=None, evalue=None, etb=None):
+ """This hook can replace sys.excepthook (for Python 2.1 or higher)."""
+ if etb is None:
+ self.handler()
+ else:
+ self.handler((etype, evalue, etb))
+ try:
+ self.debugger()
+ except KeyboardInterrupt:
+ print("\nKeyboardInterrupt")
+
+
+#----------------------------------------------------------------------------
+class FormattedTB(VerboseTB, ListTB):
+ """Subclass ListTB but allow calling with a traceback.
+
+ It can thus be used as a sys.excepthook for Python > 2.1.
+
+ Also adds 'Context' and 'Verbose' modes, not available in ListTB.
+
+ Allows a tb_offset to be specified. This is useful for situations where
+ one needs to remove a number of topmost frames from the traceback (such as
+ occurs with python programs that themselves execute other python code,
+ like Python shells). """
+
+ def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False,
+ ostream=None,
+ tb_offset=0, long_header=False, include_vars=False,
check_cache=None, debugger_cls=None):
-
- # NEVER change the order of this list. Put new modes at the end:
- self.valid_modes = ['Plain', 'Context', 'Verbose']
- self.verbose_modes = self.valid_modes[1:3]
-
- VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
- ostream=ostream, tb_offset=tb_offset,
- long_header=long_header, include_vars=include_vars,
+
+ # NEVER change the order of this list. Put new modes at the end:
+ self.valid_modes = ['Plain', 'Context', 'Verbose']
+ self.verbose_modes = self.valid_modes[1:3]
+
+ VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
+ ostream=ostream, tb_offset=tb_offset,
+ long_header=long_header, include_vars=include_vars,
check_cache=check_cache, debugger_cls=debugger_cls)
-
- # Different types of tracebacks are joined with different separators to
- # form a single string. They are taken from this dict
- self._join_chars = dict(Plain='', Context='\n', Verbose='\n')
- # set_mode also sets the tb_join_char attribute
- self.set_mode(mode)
-
- def _extract_tb(self, tb):
- if tb:
- return traceback.extract_tb(tb)
- else:
- return None
-
- def structured_traceback(self, etype, value, tb, tb_offset=None, number_of_lines_of_context=5):
- tb_offset = self.tb_offset if tb_offset is None else tb_offset
- mode = self.mode
- if mode in self.verbose_modes:
- # Verbose modes need a full traceback
- return VerboseTB.structured_traceback(
- self, etype, value, tb, tb_offset, number_of_lines_of_context
- )
- else:
- # We must check the source cache because otherwise we can print
- # out-of-date source code.
- self.check_cache()
- # Now we can extract and format the exception
- elist = self._extract_tb(tb)
- return ListTB.structured_traceback(
- self, etype, value, elist, tb_offset, number_of_lines_of_context
- )
-
- def stb2text(self, stb):
- """Convert a structured traceback (a list) to a string."""
- return self.tb_join_char.join(stb)
-
-
- def set_mode(self, mode=None):
- """Switch to the desired mode.
-
- If mode is not specified, cycles through the available modes."""
-
- if not mode:
- new_idx = (self.valid_modes.index(self.mode) + 1 ) % \
- len(self.valid_modes)
- self.mode = self.valid_modes[new_idx]
- elif mode not in self.valid_modes:
- raise ValueError('Unrecognized mode in FormattedTB: <' + mode + '>\n'
- 'Valid modes: ' + str(self.valid_modes))
- else:
- self.mode = mode
- # include variable details only in 'Verbose' mode
- self.include_vars = (self.mode == self.valid_modes[2])
- # Set the join character for generating text tracebacks
- self.tb_join_char = self._join_chars[self.mode]
-
- # some convenient shortcuts
- def plain(self):
- self.set_mode(self.valid_modes[0])
-
- def context(self):
- self.set_mode(self.valid_modes[1])
-
- def verbose(self):
- self.set_mode(self.valid_modes[2])
-
-
-#----------------------------------------------------------------------------
-class AutoFormattedTB(FormattedTB):
- """A traceback printer which can be called on the fly.
-
- It will find out about exceptions by itself.
-
- A brief example::
-
- AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux')
- try:
- ...
- except:
- AutoTB() # or AutoTB(out=logfile) where logfile is an open file object
- """
-
- def __call__(self, etype=None, evalue=None, etb=None,
- out=None, tb_offset=None):
- """Print out a formatted exception traceback.
-
- Optional arguments:
- - out: an open file-like object to direct output to.
-
- - tb_offset: the number of frames to skip over in the stack, on a
- per-call basis (this overrides temporarily the instance's tb_offset
- given at initialization time. """
-
- if out is None:
- out = self.ostream
- out.flush()
- out.write(self.text(etype, evalue, etb, tb_offset))
- out.write('\n')
- out.flush()
- # FIXME: we should remove the auto pdb behavior from here and leave
- # that to the clients.
- try:
- self.debugger()
- except KeyboardInterrupt:
- print("\nKeyboardInterrupt")
-
- def structured_traceback(self, etype=None, value=None, tb=None,
- tb_offset=None, number_of_lines_of_context=5):
- if etype is None:
- etype, value, tb = sys.exc_info()
- self.tb = tb
- return FormattedTB.structured_traceback(
- self, etype, value, tb, tb_offset, number_of_lines_of_context)
-
-
-#---------------------------------------------------------------------------
-
-# A simple class to preserve Nathan's original functionality.
-class ColorTB(FormattedTB):
- """Shorthand to initialize a FormattedTB in Linux colors mode."""
-
- def __init__(self, color_scheme='Linux', call_pdb=0, **kwargs):
- FormattedTB.__init__(self, color_scheme=color_scheme,
- call_pdb=call_pdb, **kwargs)
-
-
-class SyntaxTB(ListTB):
- """Extension which holds some state: the last exception value"""
-
- def __init__(self, color_scheme='NoColor'):
- ListTB.__init__(self, color_scheme)
- self.last_syntax_error = None
-
- def __call__(self, etype, value, elist):
- self.last_syntax_error = value
-
- ListTB.__call__(self, etype, value, elist)
-
- def structured_traceback(self, etype, value, elist, tb_offset=None,
- context=5):
- # If the source file has been edited, the line in the syntax error can
- # be wrong (retrieved from an outdated cache). This replaces it with
- # the current value.
- if isinstance(value, SyntaxError) \
- and isinstance(value.filename, py3compat.string_types) \
- and isinstance(value.lineno, int):
- linecache.checkcache(value.filename)
- newtext = ulinecache.getline(value.filename, value.lineno)
- if newtext:
- value.text = newtext
+
+ # Different types of tracebacks are joined with different separators to
+ # form a single string. They are taken from this dict
+ self._join_chars = dict(Plain='', Context='\n', Verbose='\n')
+ # set_mode also sets the tb_join_char attribute
+ self.set_mode(mode)
+
+ def _extract_tb(self, tb):
+ if tb:
+ return traceback.extract_tb(tb)
+ else:
+ return None
+
+ def structured_traceback(self, etype, value, tb, tb_offset=None, number_of_lines_of_context=5):
+ tb_offset = self.tb_offset if tb_offset is None else tb_offset
+ mode = self.mode
+ if mode in self.verbose_modes:
+ # Verbose modes need a full traceback
+ return VerboseTB.structured_traceback(
+ self, etype, value, tb, tb_offset, number_of_lines_of_context
+ )
+ else:
+ # We must check the source cache because otherwise we can print
+ # out-of-date source code.
+ self.check_cache()
+ # Now we can extract and format the exception
+ elist = self._extract_tb(tb)
+ return ListTB.structured_traceback(
+ self, etype, value, elist, tb_offset, number_of_lines_of_context
+ )
+
+ def stb2text(self, stb):
+ """Convert a structured traceback (a list) to a string."""
+ return self.tb_join_char.join(stb)
+
+
+ def set_mode(self, mode=None):
+ """Switch to the desired mode.
+
+ If mode is not specified, cycles through the available modes."""
+
+ if not mode:
+ new_idx = (self.valid_modes.index(self.mode) + 1 ) % \
+ len(self.valid_modes)
+ self.mode = self.valid_modes[new_idx]
+ elif mode not in self.valid_modes:
+ raise ValueError('Unrecognized mode in FormattedTB: <' + mode + '>\n'
+ 'Valid modes: ' + str(self.valid_modes))
+ else:
+ self.mode = mode
+ # include variable details only in 'Verbose' mode
+ self.include_vars = (self.mode == self.valid_modes[2])
+ # Set the join character for generating text tracebacks
+ self.tb_join_char = self._join_chars[self.mode]
+
+ # some convenient shortcuts
+ def plain(self):
+ self.set_mode(self.valid_modes[0])
+
+ def context(self):
+ self.set_mode(self.valid_modes[1])
+
+ def verbose(self):
+ self.set_mode(self.valid_modes[2])
+
+
+#----------------------------------------------------------------------------
+class AutoFormattedTB(FormattedTB):
+ """A traceback printer which can be called on the fly.
+
+ It will find out about exceptions by itself.
+
+ A brief example::
+
+ AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux')
+ try:
+ ...
+ except:
+ AutoTB() # or AutoTB(out=logfile) where logfile is an open file object
+ """
+
+ def __call__(self, etype=None, evalue=None, etb=None,
+ out=None, tb_offset=None):
+ """Print out a formatted exception traceback.
+
+ Optional arguments:
+ - out: an open file-like object to direct output to.
+
+ - tb_offset: the number of frames to skip over in the stack, on a
+ per-call basis (this overrides temporarily the instance's tb_offset
+ given at initialization time. """
+
+ if out is None:
+ out = self.ostream
+ out.flush()
+ out.write(self.text(etype, evalue, etb, tb_offset))
+ out.write('\n')
+ out.flush()
+ # FIXME: we should remove the auto pdb behavior from here and leave
+ # that to the clients.
+ try:
+ self.debugger()
+ except KeyboardInterrupt:
+ print("\nKeyboardInterrupt")
+
+ def structured_traceback(self, etype=None, value=None, tb=None,
+ tb_offset=None, number_of_lines_of_context=5):
+ if etype is None:
+ etype, value, tb = sys.exc_info()
+ self.tb = tb
+ return FormattedTB.structured_traceback(
+ self, etype, value, tb, tb_offset, number_of_lines_of_context)
+
+
+#---------------------------------------------------------------------------
+
+# A simple class to preserve Nathan's original functionality.
+class ColorTB(FormattedTB):
+ """Shorthand to initialize a FormattedTB in Linux colors mode."""
+
+ def __init__(self, color_scheme='Linux', call_pdb=0, **kwargs):
+ FormattedTB.__init__(self, color_scheme=color_scheme,
+ call_pdb=call_pdb, **kwargs)
+
+
+class SyntaxTB(ListTB):
+ """Extension which holds some state: the last exception value"""
+
+ def __init__(self, color_scheme='NoColor'):
+ ListTB.__init__(self, color_scheme)
+ self.last_syntax_error = None
+
+ def __call__(self, etype, value, elist):
+ self.last_syntax_error = value
+
+ ListTB.__call__(self, etype, value, elist)
+
+ def structured_traceback(self, etype, value, elist, tb_offset=None,
+ context=5):
+ # If the source file has been edited, the line in the syntax error can
+ # be wrong (retrieved from an outdated cache). This replaces it with
+ # the current value.
+ if isinstance(value, SyntaxError) \
+ and isinstance(value.filename, py3compat.string_types) \
+ and isinstance(value.lineno, int):
+ linecache.checkcache(value.filename)
+ newtext = ulinecache.getline(value.filename, value.lineno)
+ if newtext:
+ value.text = newtext
self.last_syntax_error = value
- return super(SyntaxTB, self).structured_traceback(etype, value, elist,
- tb_offset=tb_offset, context=context)
-
- def clear_err_state(self):
- """Return the current error state and clear it"""
- e = self.last_syntax_error
- self.last_syntax_error = None
- return e
-
- def stb2text(self, stb):
- """Convert a structured traceback (a list) to a string."""
- return ''.join(stb)
-
-
-# some internal-use functions
-def text_repr(value):
- """Hopefully pretty robust repr equivalent."""
- # this is pretty horrible but should always return *something*
- try:
- return pydoc.text.repr(value)
- except KeyboardInterrupt:
- raise
- except:
- try:
- return repr(value)
- except KeyboardInterrupt:
- raise
- except:
- try:
- # all still in an except block so we catch
- # getattr raising
- name = getattr(value, '__name__', None)
- if name:
- # ick, recursion
- return text_repr(name)
- klass = getattr(value, '__class__', None)
- if klass:
- return '%s instance' % text_repr(klass)
- except KeyboardInterrupt:
- raise
- except:
- return 'UNRECOVERABLE REPR FAILURE'
-
-
-def eqrepr(value, repr=text_repr):
- return '=%s' % repr(value)
-
-
-def nullrepr(value, repr=text_repr):
- return ''
+ return super(SyntaxTB, self).structured_traceback(etype, value, elist,
+ tb_offset=tb_offset, context=context)
+
+ def clear_err_state(self):
+ """Return the current error state and clear it"""
+ e = self.last_syntax_error
+ self.last_syntax_error = None
+ return e
+
+ def stb2text(self, stb):
+ """Convert a structured traceback (a list) to a string."""
+ return ''.join(stb)
+
+
+# some internal-use functions
+def text_repr(value):
+ """Hopefully pretty robust repr equivalent."""
+ # this is pretty horrible but should always return *something*
+ try:
+ return pydoc.text.repr(value)
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ return repr(value)
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ # all still in an except block so we catch
+ # getattr raising
+ name = getattr(value, '__name__', None)
+ if name:
+ # ick, recursion
+ return text_repr(name)
+ klass = getattr(value, '__class__', None)
+ if klass:
+ return '%s instance' % text_repr(klass)
+ except KeyboardInterrupt:
+ raise
+ except:
+ return 'UNRECOVERABLE REPR FAILURE'
+
+
+def eqrepr(value, repr=text_repr):
+ return '=%s' % repr(value)
+
+
+def nullrepr(value, repr=text_repr):
+ return ''
diff --git a/contrib/python/ipython/py2/IPython/core/usage.py b/contrib/python/ipython/py2/IPython/core/usage.py
index 8a890c7792..c4d3c16eca 100644
--- a/contrib/python/ipython/py2/IPython/core/usage.py
+++ b/contrib/python/ipython/py2/IPython/core/usage.py
@@ -1,344 +1,344 @@
-# -*- coding: utf-8 -*-
-"""Usage information for the main IPython applications.
-"""
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-import sys
-from IPython.core import release
-
-cl_usage = """\
-=========
- IPython
-=========
-
-Tools for Interactive Computing in Python
-=========================================
-
- A Python shell with automatic history (input and output), dynamic object
- introspection, easier configuration, command completion, access to the
- system shell and more. IPython can also be embedded in running programs.
-
-
-Usage
-
- ipython [subcommand] [options] [-c cmd | -m mod | file] [--] [arg] ...
-
- If invoked with no options, it executes the file and exits, passing the
- remaining arguments to the script, just as if you had specified the same
- command with python. You may need to specify `--` before args to be passed
- to the script, to prevent IPython from attempting to parse them. If you
- specify the option `-i` before the filename, it will enter an interactive
- IPython session after running the script, rather than exiting. Files ending
- in .py will be treated as normal Python, but files ending in .ipy can
- contain special IPython syntax (magic commands, shell expansions, etc.).
-
- Almost all configuration in IPython is available via the command-line. Do
- `ipython --help-all` to see all available options. For persistent
- configuration, look into your `ipython_config.py` configuration file for
- details.
-
- This file is typically installed in the `IPYTHONDIR` directory, and there
- is a separate configuration directory for each profile. The default profile
- directory will be located in $IPYTHONDIR/profile_default. IPYTHONDIR
- defaults to to `$HOME/.ipython`. For Windows users, $HOME resolves to
- C:\\Users\\YourUserName in most instances.
-
- To initialize a profile with the default configuration file, do::
-
- $> ipython profile create
-
- and start editing `IPYTHONDIR/profile_default/ipython_config.py`
-
- In IPython's documentation, we will refer to this directory as
- `IPYTHONDIR`, you can change its default location by creating an
- environment variable with this name and setting it to the desired path.
-
- For more information, see the manual available in HTML and PDF in your
- installation, or online at http://ipython.org/documentation.html.
-"""
-
-interactive_usage = """
-IPython -- An enhanced Interactive Python
-=========================================
-
+# -*- coding: utf-8 -*-
+"""Usage information for the main IPython applications.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+import sys
+from IPython.core import release
+
+cl_usage = """\
+=========
+ IPython
+=========
+
+Tools for Interactive Computing in Python
+=========================================
+
+ A Python shell with automatic history (input and output), dynamic object
+ introspection, easier configuration, command completion, access to the
+ system shell and more. IPython can also be embedded in running programs.
+
+
+Usage
+
+ ipython [subcommand] [options] [-c cmd | -m mod | file] [--] [arg] ...
+
+ If invoked with no options, it executes the file and exits, passing the
+ remaining arguments to the script, just as if you had specified the same
+ command with python. You may need to specify `--` before args to be passed
+ to the script, to prevent IPython from attempting to parse them. If you
+ specify the option `-i` before the filename, it will enter an interactive
+ IPython session after running the script, rather than exiting. Files ending
+ in .py will be treated as normal Python, but files ending in .ipy can
+ contain special IPython syntax (magic commands, shell expansions, etc.).
+
+ Almost all configuration in IPython is available via the command-line. Do
+ `ipython --help-all` to see all available options. For persistent
+ configuration, look into your `ipython_config.py` configuration file for
+ details.
+
+ This file is typically installed in the `IPYTHONDIR` directory, and there
+ is a separate configuration directory for each profile. The default profile
+ directory will be located in $IPYTHONDIR/profile_default. IPYTHONDIR
+ defaults to to `$HOME/.ipython`. For Windows users, $HOME resolves to
+ C:\\Users\\YourUserName in most instances.
+
+ To initialize a profile with the default configuration file, do::
+
+ $> ipython profile create
+
+ and start editing `IPYTHONDIR/profile_default/ipython_config.py`
+
+ In IPython's documentation, we will refer to this directory as
+ `IPYTHONDIR`, you can change its default location by creating an
+ environment variable with this name and setting it to the desired path.
+
+ For more information, see the manual available in HTML and PDF in your
+ installation, or online at http://ipython.org/documentation.html.
+"""
+
+interactive_usage = """
+IPython -- An enhanced Interactive Python
+=========================================
+
IPython offers a fully compatible replacement for the standard Python
interpreter, with convenient shell features, special commands, command
history mechanism and output results caching.
-
-At your system command line, type 'ipython -h' to see the command line
-options available. This document only describes interactive features.
-
-MAIN FEATURES
--------------
-
+
+At your system command line, type 'ipython -h' to see the command line
+options available. This document only describes interactive features.
+
+MAIN FEATURES
+-------------
+
* Access to the standard Python help with object docstrings and the Python
manuals. Simply type 'help' (no quotes) to invoke it.
-
-* Magic commands: type %magic for information on the magic subsystem.
-
-* System command aliases, via the %alias command or the configuration file(s).
-
-* Dynamic object information:
-
+
+* Magic commands: type %magic for information on the magic subsystem.
+
+* System command aliases, via the %alias command or the configuration file(s).
+
+* Dynamic object information:
+
Typing ?word or word? prints detailed information about an object. Certain
long strings (code, etc.) get snipped in the center for brevity.
-
- Typing ??word or word?? gives access to the full information without
+
+ Typing ??word or word?? gives access to the full information without
snipping long strings. Strings that are longer than the screen are printed
through the less pager.
-
- The ?/?? system gives access to the full source code for any object (if
- available), shows function prototypes and other useful information.
-
- If you just want to see an object's docstring, type '%pdoc object' (without
- quotes, and without % if you have automagic on).
-
+
+ The ?/?? system gives access to the full source code for any object (if
+ available), shows function prototypes and other useful information.
+
+ If you just want to see an object's docstring, type '%pdoc object' (without
+ quotes, and without % if you have automagic on).
+
* Tab completion in the local namespace:
-
- At any time, hitting tab will complete any available python commands or
- variable names, and show you a list of the possible completions if there's
- no unambiguous one. It will also complete filenames in the current directory.
-
+
+ At any time, hitting tab will complete any available python commands or
+ variable names, and show you a list of the possible completions if there's
+ no unambiguous one. It will also complete filenames in the current directory.
+
* Search previous command history in multiple ways:
-
+
- Start typing, and then use arrow keys up/down or (Ctrl-p/Ctrl-n) to search
through the history items that match what you've typed so far.
-
- - Hit Ctrl-r: opens a search prompt. Begin typing and the system searches
- your history for lines that match what you've typed so far, completing as
- much as it can.
-
+
+ - Hit Ctrl-r: opens a search prompt. Begin typing and the system searches
+ your history for lines that match what you've typed so far, completing as
+ much as it can.
+
- %hist: search history by index.
-
-* Persistent command history across sessions.
-
-* Logging of input with the ability to save and restore a working session.
-
+
+* Persistent command history across sessions.
+
+* Logging of input with the ability to save and restore a working session.
+
* System shell with !. Typing !ls will run 'ls' in the current directory.
-
-* The reload command does a 'deep' reload of a module: changes made to the
- module since you imported will actually be available without having to exit.
-
-* Verbose and colored exception traceback printouts. See the magic xmode and
- xcolor functions for details (just type %magic).
-
-* Input caching system:
-
- IPython offers numbered prompts (In/Out) with input and output caching. All
- input is saved and can be retrieved as variables (besides the usual arrow
- key recall).
-
- The following GLOBAL variables always exist (so don't overwrite them!):
- _i: stores previous input.
- _ii: next previous.
- _iii: next-next previous.
- _ih : a list of all input _ih[n] is the input from line n.
-
- Additionally, global variables named _i<n> are dynamically created (<n>
- being the prompt counter), such that _i<n> == _ih[<n>]
-
- For example, what you typed at prompt 14 is available as _i14 and _ih[14].
-
- You can create macros which contain multiple input lines from this history,
- for later re-execution, with the %macro function.
-
- The history function %hist allows you to see any part of your input history
- by printing a range of the _i variables. Note that inputs which contain
- magic functions (%) appear in the history with a prepended comment. This is
- because they aren't really valid Python code, so you can't exec them.
-
-* Output caching system:
-
- For output that is returned from actions, a system similar to the input
- cache exists but using _ instead of _i. Only actions that produce a result
- (NOT assignments, for example) are cached. If you are familiar with
- Mathematica, IPython's _ variables behave exactly like Mathematica's %
- variables.
-
- The following GLOBAL variables always exist (so don't overwrite them!):
- _ (one underscore): previous output.
- __ (two underscores): next previous.
- ___ (three underscores): next-next previous.
-
- Global variables named _<n> are dynamically created (<n> being the prompt
- counter), such that the result of output <n> is always available as _<n>.
-
- Finally, a global dictionary named _oh exists with entries for all lines
- which generated output.
-
-* Directory history:
-
- Your history of visited directories is kept in the global list _dh, and the
- magic %cd command can be used to go to any entry in that list.
-
-* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython)
-
- 1. Auto-parentheses
-
- Callable objects (i.e. functions, methods, etc) can be invoked like
- this (notice the commas between the arguments)::
+
+* The reload command does a 'deep' reload of a module: changes made to the
+ module since you imported will actually be available without having to exit.
+
+* Verbose and colored exception traceback printouts. See the magic xmode and
+ xcolor functions for details (just type %magic).
+
+* Input caching system:
+
+ IPython offers numbered prompts (In/Out) with input and output caching. All
+ input is saved and can be retrieved as variables (besides the usual arrow
+ key recall).
+
+ The following GLOBAL variables always exist (so don't overwrite them!):
+ _i: stores previous input.
+ _ii: next previous.
+ _iii: next-next previous.
+ _ih : a list of all input _ih[n] is the input from line n.
+
+ Additionally, global variables named _i<n> are dynamically created (<n>
+ being the prompt counter), such that _i<n> == _ih[<n>]
+
+ For example, what you typed at prompt 14 is available as _i14 and _ih[14].
+
+ You can create macros which contain multiple input lines from this history,
+ for later re-execution, with the %macro function.
+
+ The history function %hist allows you to see any part of your input history
+ by printing a range of the _i variables. Note that inputs which contain
+ magic functions (%) appear in the history with a prepended comment. This is
+ because they aren't really valid Python code, so you can't exec them.
+
+* Output caching system:
+
+ For output that is returned from actions, a system similar to the input
+ cache exists but using _ instead of _i. Only actions that produce a result
+ (NOT assignments, for example) are cached. If you are familiar with
+ Mathematica, IPython's _ variables behave exactly like Mathematica's %
+ variables.
+
+ The following GLOBAL variables always exist (so don't overwrite them!):
+ _ (one underscore): previous output.
+ __ (two underscores): next previous.
+ ___ (three underscores): next-next previous.
+
+ Global variables named _<n> are dynamically created (<n> being the prompt
+ counter), such that the result of output <n> is always available as _<n>.
+
+ Finally, a global dictionary named _oh exists with entries for all lines
+ which generated output.
+
+* Directory history:
+
+ Your history of visited directories is kept in the global list _dh, and the
+ magic %cd command can be used to go to any entry in that list.
+
+* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython)
+
+ 1. Auto-parentheses
- In [1]: callable_ob arg1, arg2, arg3
-
- and the input will be translated to this::
-
- callable_ob(arg1, arg2, arg3)
-
- This feature is off by default (in rare cases it can produce
- undesirable side-effects), but you can activate it at the command-line
- by starting IPython with `--autocall 1`, set it permanently in your
- configuration file, or turn on at runtime with `%autocall 1`.
-
- You can force auto-parentheses by using '/' as the first character
- of a line. For example::
-
- In [1]: /globals # becomes 'globals()'
-
- Note that the '/' MUST be the first character on the line! This
- won't work::
-
- In [2]: print /globals # syntax error
-
- In most cases the automatic algorithm should work, so you should
- rarely need to explicitly invoke /. One notable exception is if you
- are trying to call a function with a list of tuples as arguments (the
- parenthesis will confuse IPython)::
-
- In [1]: zip (1,2,3),(4,5,6) # won't work
-
- but this will work::
-
- In [2]: /zip (1,2,3),(4,5,6)
- ------> zip ((1,2,3),(4,5,6))
- Out[2]= [(1, 4), (2, 5), (3, 6)]
-
- IPython tells you that it has altered your command line by
- displaying the new command line preceded by -->. e.g.::
-
- In [18]: callable list
- -------> callable (list)
-
- 2. Auto-Quoting
-
- You can force auto-quoting of a function's arguments by using ',' as
- the first character of a line. For example::
-
- In [1]: ,my_function /home/me # becomes my_function("/home/me")
-
- If you use ';' instead, the whole argument is quoted as a single
- string (while ',' splits on whitespace)::
-
- In [2]: ,my_function a b c # becomes my_function("a","b","c")
- In [3]: ;my_function a b c # becomes my_function("a b c")
-
- Note that the ',' MUST be the first character on the line! This
- won't work::
-
- In [4]: x = ,my_function /home/me # syntax error
-"""
-
-interactive_usage_min = """\
-An enhanced console for Python.
-Some of its features are:
-- Tab completion in the local namespace.
-- Logging of input, see command-line options.
-- System shell escape via ! , eg !ls.
-- Magic commands, starting with a % (like %ls, %pwd, %cd, etc.)
-- Keeps track of locally defined variables via %who, %whos.
-- Show object information with a ? eg ?x or x? (use ?? for more info).
-"""
-
-quick_reference = r"""
-IPython -- An enhanced Interactive Python - Quick Reference Card
-================================================================
-
-obj?, obj?? : Get help, or more help for object (also works as
- ?obj, ??obj).
-?foo.*abc* : List names in 'foo' containing 'abc' in them.
-%magic : Information about IPython's 'magic' % functions.
-
-Magic functions are prefixed by % or %%, and typically take their arguments
-without parentheses, quotes or even commas for convenience. Line magics take a
-single % and cell magics are prefixed with two %%.
-
-Example magic function calls:
-
-%alias d ls -F : 'd' is now an alias for 'ls -F'
-alias d ls -F : Works if 'alias' not a python name
-alist = %alias : Get list of aliases to 'alist'
-cd /usr/share : Obvious. cd -<tab> to choose from visited dirs.
-%cd?? : See help AND source for magic %cd
-%timeit x=10 : time the 'x=10' statement with high precision.
-%%timeit x=2**100
-x**100 : time 'x**100' with a setup of 'x=2**100'; setup code is not
- counted. This is an example of a cell magic.
-
-System commands:
-
-!cp a.txt b/ : System command escape, calls os.system()
-cp a.txt b/ : after %rehashx, most system commands work without !
-cp ${f}.txt $bar : Variable expansion in magics and system commands
-files = !ls /usr : Capture sytem command output
-files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc'
-
-History:
-
-_i, _ii, _iii : Previous, next previous, next next previous input
-_i4, _ih[2:5] : Input history line 4, lines 2-4
-exec _i81 : Execute input history line #81 again
-%rep 81 : Edit input history line #81
-_, __, ___ : previous, next previous, next next previous output
-_dh : Directory history
-_oh : Output history
-%hist : Command history of current session.
-%hist -g foo : Search command history of (almost) all sessions for 'foo'.
-%hist -g : Command history of (almost) all sessions.
-%hist 1/2-8 : Command history containing lines 2-8 of session 1.
-%hist 1/ ~2/ : Command history of session 1 and 2 sessions before current.
-%hist ~8/1-~6/5 : Command history from line 1 of 8 sessions ago to
- line 5 of 6 sessions ago.
-%edit 0/ : Open editor to execute code with history of current session.
-
-Autocall:
-
-f 1,2 : f(1,2) # Off by default, enable with %autocall magic.
-/f 1,2 : f(1,2) (forced autoparen)
-,f 1 2 : f("1","2")
-;f 1 2 : f("1 2")
-
-Remember: TAB completion works in many contexts, not just file names
-or python names.
-
-The following magic functions are currently available:
-
-"""
-
-quick_guide = """\
-? -> Introduction and overview of IPython's features.
-%quickref -> Quick reference.
-help -> Python's own help system.
-object? -> Details about 'object', use 'object??' for extra details.
-"""
-
-default_banner_parts = [
- 'Python %s\n' % (sys.version.split('\n')[0],),
- 'Type "copyright", "credits" or "license" for more information.\n\n',
- 'IPython {version} -- An enhanced Interactive Python.\n'.format(
- version=release.version,
- ),
- quick_guide
-]
-
-default_banner = ''.join(default_banner_parts)
-
+ Callable objects (i.e. functions, methods, etc) can be invoked like
+ this (notice the commas between the arguments)::
+
+ In [1]: callable_ob arg1, arg2, arg3
+
+ and the input will be translated to this::
+
+ callable_ob(arg1, arg2, arg3)
+
+ This feature is off by default (in rare cases it can produce
+ undesirable side-effects), but you can activate it at the command-line
+ by starting IPython with `--autocall 1`, set it permanently in your
+ configuration file, or turn on at runtime with `%autocall 1`.
+
+ You can force auto-parentheses by using '/' as the first character
+ of a line. For example::
+
+ In [1]: /globals # becomes 'globals()'
+
+ Note that the '/' MUST be the first character on the line! This
+ won't work::
+
+ In [2]: print /globals # syntax error
+
+ In most cases the automatic algorithm should work, so you should
+ rarely need to explicitly invoke /. One notable exception is if you
+ are trying to call a function with a list of tuples as arguments (the
+ parenthesis will confuse IPython)::
+
+ In [1]: zip (1,2,3),(4,5,6) # won't work
+
+ but this will work::
+
+ In [2]: /zip (1,2,3),(4,5,6)
+ ------> zip ((1,2,3),(4,5,6))
+ Out[2]= [(1, 4), (2, 5), (3, 6)]
+
+ IPython tells you that it has altered your command line by
+ displaying the new command line preceded by -->. e.g.::
+
+ In [18]: callable list
+ -------> callable (list)
+
+ 2. Auto-Quoting
+
+ You can force auto-quoting of a function's arguments by using ',' as
+ the first character of a line. For example::
+
+ In [1]: ,my_function /home/me # becomes my_function("/home/me")
+
+ If you use ';' instead, the whole argument is quoted as a single
+ string (while ',' splits on whitespace)::
+
+ In [2]: ,my_function a b c # becomes my_function("a","b","c")
+ In [3]: ;my_function a b c # becomes my_function("a b c")
+
+ Note that the ',' MUST be the first character on the line! This
+ won't work::
+
+ In [4]: x = ,my_function /home/me # syntax error
+"""
+
+interactive_usage_min = """\
+An enhanced console for Python.
+Some of its features are:
+- Tab completion in the local namespace.
+- Logging of input, see command-line options.
+- System shell escape via ! , eg !ls.
+- Magic commands, starting with a % (like %ls, %pwd, %cd, etc.)
+- Keeps track of locally defined variables via %who, %whos.
+- Show object information with a ? eg ?x or x? (use ?? for more info).
+"""
+
+quick_reference = r"""
+IPython -- An enhanced Interactive Python - Quick Reference Card
+================================================================
+
+obj?, obj?? : Get help, or more help for object (also works as
+ ?obj, ??obj).
+?foo.*abc* : List names in 'foo' containing 'abc' in them.
+%magic : Information about IPython's 'magic' % functions.
+
+Magic functions are prefixed by % or %%, and typically take their arguments
+without parentheses, quotes or even commas for convenience. Line magics take a
+single % and cell magics are prefixed with two %%.
+
+Example magic function calls:
+
+%alias d ls -F : 'd' is now an alias for 'ls -F'
+alias d ls -F : Works if 'alias' not a python name
+alist = %alias : Get list of aliases to 'alist'
+cd /usr/share : Obvious. cd -<tab> to choose from visited dirs.
+%cd?? : See help AND source for magic %cd
+%timeit x=10 : time the 'x=10' statement with high precision.
+%%timeit x=2**100
+x**100 : time 'x**100' with a setup of 'x=2**100'; setup code is not
+ counted. This is an example of a cell magic.
+
+System commands:
+
+!cp a.txt b/ : System command escape, calls os.system()
+cp a.txt b/ : after %rehashx, most system commands work without !
+cp ${f}.txt $bar : Variable expansion in magics and system commands
+files = !ls /usr : Capture sytem command output
+files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc'
+
+History:
+
+_i, _ii, _iii : Previous, next previous, next next previous input
+_i4, _ih[2:5] : Input history line 4, lines 2-4
+exec _i81 : Execute input history line #81 again
+%rep 81 : Edit input history line #81
+_, __, ___ : previous, next previous, next next previous output
+_dh : Directory history
+_oh : Output history
+%hist : Command history of current session.
+%hist -g foo : Search command history of (almost) all sessions for 'foo'.
+%hist -g : Command history of (almost) all sessions.
+%hist 1/2-8 : Command history containing lines 2-8 of session 1.
+%hist 1/ ~2/ : Command history of session 1 and 2 sessions before current.
+%hist ~8/1-~6/5 : Command history from line 1 of 8 sessions ago to
+ line 5 of 6 sessions ago.
+%edit 0/ : Open editor to execute code with history of current session.
+
+Autocall:
+
+f 1,2 : f(1,2) # Off by default, enable with %autocall magic.
+/f 1,2 : f(1,2) (forced autoparen)
+,f 1 2 : f("1","2")
+;f 1 2 : f("1 2")
+
+Remember: TAB completion works in many contexts, not just file names
+or python names.
+
+The following magic functions are currently available:
+
+"""
+
+quick_guide = """\
+? -> Introduction and overview of IPython's features.
+%quickref -> Quick reference.
+help -> Python's own help system.
+object? -> Details about 'object', use 'object??' for extra details.
+"""
+
+default_banner_parts = [
+ 'Python %s\n' % (sys.version.split('\n')[0],),
+ 'Type "copyright", "credits" or "license" for more information.\n\n',
+ 'IPython {version} -- An enhanced Interactive Python.\n'.format(
+ version=release.version,
+ ),
+ quick_guide
+]
+
+default_banner = ''.join(default_banner_parts)
+
# deprecated GUI banner
-
+
default_gui_banner = '\n'.join([
'DEPRECATED: IPython.core.usage.default_gui_banner is deprecated and will be removed',
default_banner,
diff --git a/contrib/python/ipython/py2/IPython/display.py b/contrib/python/ipython/py2/IPython/display.py
index 872b93e92b..7d248ba023 100644
--- a/contrib/python/ipython/py2/IPython/display.py
+++ b/contrib/python/ipython/py2/IPython/display.py
@@ -1,16 +1,16 @@
-"""Public API for display tools in IPython.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2012 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-from IPython.core.display import *
-from IPython.lib.display import *
+"""Public API for display tools in IPython.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2012 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from IPython.core.display import *
+from IPython.lib.display import *
diff --git a/contrib/python/ipython/py2/IPython/extensions/__init__.py b/contrib/python/ipython/py2/IPython/extensions/__init__.py
index acc4dc7601..db7f79fca6 100644
--- a/contrib/python/ipython/py2/IPython/extensions/__init__.py
+++ b/contrib/python/ipython/py2/IPython/extensions/__init__.py
@@ -1,2 +1,2 @@
-# -*- coding: utf-8 -*-
-"""This directory is meant for IPython extensions."""
+# -*- coding: utf-8 -*-
+"""This directory is meant for IPython extensions."""
diff --git a/contrib/python/ipython/py2/IPython/extensions/autoreload.py b/contrib/python/ipython/py2/IPython/extensions/autoreload.py
index 9406e697d2..d3e420574d 100644
--- a/contrib/python/ipython/py2/IPython/extensions/autoreload.py
+++ b/contrib/python/ipython/py2/IPython/extensions/autoreload.py
@@ -1,536 +1,536 @@
-"""IPython extension to reload modules before executing user code.
-
-``autoreload`` reloads modules automatically before entering the execution of
-code typed at the IPython prompt.
-
-This makes for example the following workflow possible:
-
-.. sourcecode:: ipython
-
- In [1]: %load_ext autoreload
-
- In [2]: %autoreload 2
-
- In [3]: from foo import some_function
-
- In [4]: some_function()
- Out[4]: 42
-
- In [5]: # open foo.py in an editor and change some_function to return 43
-
- In [6]: some_function()
- Out[6]: 43
-
-The module was reloaded without reloading it explicitly, and the object
-imported with ``from foo import ...`` was also updated.
-
-Usage
-=====
-
-The following magic commands are provided:
-
-``%autoreload``
-
- Reload all modules (except those excluded by ``%aimport``)
- automatically now.
-
-``%autoreload 0``
-
- Disable automatic reloading.
-
-``%autoreload 1``
-
- Reload all modules imported with ``%aimport`` every time before
- executing the Python code typed.
-
-``%autoreload 2``
-
- Reload all modules (except those excluded by ``%aimport``) every
- time before executing the Python code typed.
-
-``%aimport``
-
- List modules which are to be automatically imported or not to be imported.
-
-``%aimport foo``
-
- Import module 'foo' and mark it to be autoreloaded for ``%autoreload 1``
-
-``%aimport -foo``
-
- Mark module 'foo' to not be autoreloaded.
-
-Caveats
-=======
-
-Reloading Python modules in a reliable way is in general difficult,
-and unexpected things may occur. ``%autoreload`` tries to work around
-common pitfalls by replacing function code objects and parts of
-classes previously in the module with new versions. This makes the
-following things to work:
-
-- Functions and classes imported via 'from xxx import foo' are upgraded
- to new versions when 'xxx' is reloaded.
-
-- Methods and properties of classes are upgraded on reload, so that
- calling 'c.foo()' on an object 'c' created before the reload causes
- the new code for 'foo' to be executed.
-
-Some of the known remaining caveats are:
-
-- Replacing code objects does not always succeed: changing a @property
- in a class to an ordinary method or a method to a member variable
- can cause problems (but in old objects only).
-
-- Functions that are removed (eg. via monkey-patching) from a module
- before it is reloaded are not upgraded.
-
-- C extension modules cannot be reloaded, and so cannot be autoreloaded.
-"""
-from __future__ import print_function
-
-skip_doctest = True
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2000 Thomas Heller
-# Copyright (C) 2008 Pauli Virtanen <pav@iki.fi>
-# Copyright (C) 2012 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-#
-# This IPython module is written by Pauli Virtanen, based on the autoreload
-# code by Thomas Heller.
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import os
-import sys
-import traceback
-import types
-import weakref
-
-try:
- # Reload is not defined by default in Python3.
- reload
-except NameError:
- from imp import reload
-
-from IPython.utils import openpy
-from IPython.utils.py3compat import PY3
-
-#------------------------------------------------------------------------------
-# Autoreload functionality
-#------------------------------------------------------------------------------
-
-class ModuleReloader(object):
- enabled = False
- """Whether this reloader is enabled"""
-
- check_all = True
- """Autoreload all modules, not just those listed in 'modules'"""
-
- def __init__(self):
- # Modules that failed to reload: {module: mtime-on-failed-reload, ...}
- self.failed = {}
- # Modules specially marked as autoreloadable.
- self.modules = {}
- # Modules specially marked as not autoreloadable.
- self.skip_modules = {}
- # (module-name, name) -> weakref, for replacing old code objects
- self.old_objects = {}
- # Module modification timestamps
- self.modules_mtimes = {}
-
- # Cache module modification times
- self.check(check_all=True, do_reload=False)
-
- def mark_module_skipped(self, module_name):
- """Skip reloading the named module in the future"""
- try:
- del self.modules[module_name]
- except KeyError:
- pass
- self.skip_modules[module_name] = True
-
- def mark_module_reloadable(self, module_name):
- """Reload the named module in the future (if it is imported)"""
- try:
- del self.skip_modules[module_name]
- except KeyError:
- pass
- self.modules[module_name] = True
-
- def aimport_module(self, module_name):
- """Import a module, and mark it reloadable
-
- Returns
- -------
- top_module : module
- The imported module if it is top-level, or the top-level
- top_name : module
- Name of top_module
-
- """
- self.mark_module_reloadable(module_name)
-
- __import__(module_name)
- top_name = module_name.split('.')[0]
- top_module = sys.modules[top_name]
- return top_module, top_name
-
- def filename_and_mtime(self, module):
- if not hasattr(module, '__file__') or module.__file__ is None:
- return None, None
-
+"""IPython extension to reload modules before executing user code.
+
+``autoreload`` reloads modules automatically before entering the execution of
+code typed at the IPython prompt.
+
+This makes for example the following workflow possible:
+
+.. sourcecode:: ipython
+
+ In [1]: %load_ext autoreload
+
+ In [2]: %autoreload 2
+
+ In [3]: from foo import some_function
+
+ In [4]: some_function()
+ Out[4]: 42
+
+ In [5]: # open foo.py in an editor and change some_function to return 43
+
+ In [6]: some_function()
+ Out[6]: 43
+
+The module was reloaded without reloading it explicitly, and the object
+imported with ``from foo import ...`` was also updated.
+
+Usage
+=====
+
+The following magic commands are provided:
+
+``%autoreload``
+
+ Reload all modules (except those excluded by ``%aimport``)
+ automatically now.
+
+``%autoreload 0``
+
+ Disable automatic reloading.
+
+``%autoreload 1``
+
+ Reload all modules imported with ``%aimport`` every time before
+ executing the Python code typed.
+
+``%autoreload 2``
+
+ Reload all modules (except those excluded by ``%aimport``) every
+ time before executing the Python code typed.
+
+``%aimport``
+
+ List modules which are to be automatically imported or not to be imported.
+
+``%aimport foo``
+
+ Import module 'foo' and mark it to be autoreloaded for ``%autoreload 1``
+
+``%aimport -foo``
+
+ Mark module 'foo' to not be autoreloaded.
+
+Caveats
+=======
+
+Reloading Python modules in a reliable way is in general difficult,
+and unexpected things may occur. ``%autoreload`` tries to work around
+common pitfalls by replacing function code objects and parts of
+classes previously in the module with new versions. This makes the
+following things to work:
+
+- Functions and classes imported via 'from xxx import foo' are upgraded
+ to new versions when 'xxx' is reloaded.
+
+- Methods and properties of classes are upgraded on reload, so that
+ calling 'c.foo()' on an object 'c' created before the reload causes
+ the new code for 'foo' to be executed.
+
+Some of the known remaining caveats are:
+
+- Replacing code objects does not always succeed: changing a @property
+ in a class to an ordinary method or a method to a member variable
+ can cause problems (but in old objects only).
+
+- Functions that are removed (eg. via monkey-patching) from a module
+ before it is reloaded are not upgraded.
+
+- C extension modules cannot be reloaded, and so cannot be autoreloaded.
+"""
+from __future__ import print_function
+
+skip_doctest = True
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2000 Thomas Heller
+# Copyright (C) 2008 Pauli Virtanen <pav@iki.fi>
+# Copyright (C) 2012 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+#
+# This IPython module is written by Pauli Virtanen, based on the autoreload
+# code by Thomas Heller.
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+import sys
+import traceback
+import types
+import weakref
+
+try:
+ # Reload is not defined by default in Python3.
+ reload
+except NameError:
+ from imp import reload
+
+from IPython.utils import openpy
+from IPython.utils.py3compat import PY3
+
+#------------------------------------------------------------------------------
+# Autoreload functionality
+#------------------------------------------------------------------------------
+
+class ModuleReloader(object):
+ enabled = False
+ """Whether this reloader is enabled"""
+
+ check_all = True
+ """Autoreload all modules, not just those listed in 'modules'"""
+
+ def __init__(self):
+ # Modules that failed to reload: {module: mtime-on-failed-reload, ...}
+ self.failed = {}
+ # Modules specially marked as autoreloadable.
+ self.modules = {}
+ # Modules specially marked as not autoreloadable.
+ self.skip_modules = {}
+ # (module-name, name) -> weakref, for replacing old code objects
+ self.old_objects = {}
+ # Module modification timestamps
+ self.modules_mtimes = {}
+
+ # Cache module modification times
+ self.check(check_all=True, do_reload=False)
+
+ def mark_module_skipped(self, module_name):
+ """Skip reloading the named module in the future"""
+ try:
+ del self.modules[module_name]
+ except KeyError:
+ pass
+ self.skip_modules[module_name] = True
+
+ def mark_module_reloadable(self, module_name):
+ """Reload the named module in the future (if it is imported)"""
+ try:
+ del self.skip_modules[module_name]
+ except KeyError:
+ pass
+ self.modules[module_name] = True
+
+ def aimport_module(self, module_name):
+ """Import a module, and mark it reloadable
+
+ Returns
+ -------
+ top_module : module
+ The imported module if it is top-level, or the top-level
+ top_name : module
+ Name of top_module
+
+ """
+ self.mark_module_reloadable(module_name)
+
+ __import__(module_name)
+ top_name = module_name.split('.')[0]
+ top_module = sys.modules[top_name]
+ return top_module, top_name
+
+ def filename_and_mtime(self, module):
+ if not hasattr(module, '__file__') or module.__file__ is None:
+ return None, None
+
if getattr(module, '__name__', None) in [None, '__mp_main__', '__main__']:
# we cannot reload(__main__) or reload(__mp_main__)
- return None, None
-
- filename = module.__file__
- path, ext = os.path.splitext(filename)
-
- if ext.lower() == '.py':
- py_filename = filename
- else:
- try:
- py_filename = openpy.source_from_cache(filename)
- except ValueError:
- return None, None
-
- try:
- pymtime = os.stat(py_filename).st_mtime
- except OSError:
- return None, None
-
- return py_filename, pymtime
-
- def check(self, check_all=False, do_reload=True):
- """Check whether some modules need to be reloaded."""
-
- if not self.enabled and not check_all:
- return
-
- if check_all or self.check_all:
- modules = list(sys.modules.keys())
- else:
- modules = list(self.modules.keys())
-
- for modname in modules:
- m = sys.modules.get(modname, None)
-
- if modname in self.skip_modules:
- continue
-
- py_filename, pymtime = self.filename_and_mtime(m)
- if py_filename is None:
- continue
-
- try:
- if pymtime <= self.modules_mtimes[modname]:
- continue
- except KeyError:
- self.modules_mtimes[modname] = pymtime
- continue
- else:
- if self.failed.get(py_filename, None) == pymtime:
- continue
-
- self.modules_mtimes[modname] = pymtime
-
- # If we've reached this point, we should try to reload the module
- if do_reload:
- try:
- superreload(m, reload, self.old_objects)
- if py_filename in self.failed:
- del self.failed[py_filename]
- except:
- print("[autoreload of %s failed: %s]" % (
- modname, traceback.format_exc(1)), file=sys.stderr)
- self.failed[py_filename] = pymtime
-
-#------------------------------------------------------------------------------
-# superreload
-#------------------------------------------------------------------------------
-
-if PY3:
- func_attrs = ['__code__', '__defaults__', '__doc__',
- '__closure__', '__globals__', '__dict__']
-else:
- func_attrs = ['func_code', 'func_defaults', 'func_doc',
- 'func_closure', 'func_globals', 'func_dict']
-
-
-def update_function(old, new):
- """Upgrade the code object of a function"""
- for name in func_attrs:
- try:
- setattr(old, name, getattr(new, name))
- except (AttributeError, TypeError):
- pass
-
-
-def update_class(old, new):
- """Replace stuff in the __dict__ of a class, and upgrade
- method code objects"""
- for key in list(old.__dict__.keys()):
- old_obj = getattr(old, key)
-
- try:
- new_obj = getattr(new, key)
- except AttributeError:
- # obsolete attribute: remove it
- try:
- delattr(old, key)
- except (AttributeError, TypeError):
- pass
- continue
-
- if update_generic(old_obj, new_obj): continue
-
- try:
- setattr(old, key, getattr(new, key))
- except (AttributeError, TypeError):
- pass # skip non-writable attributes
-
-
-def update_property(old, new):
- """Replace get/set/del functions of a property"""
- update_generic(old.fdel, new.fdel)
- update_generic(old.fget, new.fget)
- update_generic(old.fset, new.fset)
-
-
-def isinstance2(a, b, typ):
- return isinstance(a, typ) and isinstance(b, typ)
-
-
-UPDATE_RULES = [
- (lambda a, b: isinstance2(a, b, type),
- update_class),
- (lambda a, b: isinstance2(a, b, types.FunctionType),
- update_function),
- (lambda a, b: isinstance2(a, b, property),
- update_property),
-]
-
-
-if PY3:
- UPDATE_RULES.extend([(lambda a, b: isinstance2(a, b, types.MethodType),
- lambda a, b: update_function(a.__func__, b.__func__)),
- ])
-else:
- UPDATE_RULES.extend([(lambda a, b: isinstance2(a, b, types.ClassType),
- update_class),
- (lambda a, b: isinstance2(a, b, types.MethodType),
- lambda a, b: update_function(a.__func__, b.__func__)),
- ])
-
-
-def update_generic(a, b):
- for type_check, update in UPDATE_RULES:
- if type_check(a, b):
- update(a, b)
- return True
- return False
-
-
-class StrongRef(object):
- def __init__(self, obj):
- self.obj = obj
- def __call__(self):
- return self.obj
-
-
-def superreload(module, reload=reload, old_objects={}):
- """Enhanced version of the builtin reload function.
-
- superreload remembers objects previously in the module, and
-
- - upgrades the class dictionary of every old class in the module
- - upgrades the code object of every old function and method
- - clears the module's namespace before reloading
-
- """
-
- # collect old objects in the module
- for name, obj in list(module.__dict__.items()):
- if not hasattr(obj, '__module__') or obj.__module__ != module.__name__:
- continue
- key = (module.__name__, name)
- try:
- old_objects.setdefault(key, []).append(weakref.ref(obj))
- except TypeError:
- # weakref doesn't work for all types;
- # create strong references for 'important' cases
- if not PY3 and isinstance(obj, types.ClassType):
- old_objects.setdefault(key, []).append(StrongRef(obj))
-
- # reload module
- try:
- # clear namespace first from old cruft
- old_dict = module.__dict__.copy()
- old_name = module.__name__
- module.__dict__.clear()
- module.__dict__['__name__'] = old_name
- module.__dict__['__loader__'] = old_dict['__loader__']
- except (TypeError, AttributeError, KeyError):
- pass
-
- try:
- module = reload(module)
- except:
- # restore module dictionary on failed reload
- module.__dict__.update(old_dict)
- raise
-
- # iterate over all objects and update functions & classes
- for name, new_obj in list(module.__dict__.items()):
- key = (module.__name__, name)
- if key not in old_objects: continue
-
- new_refs = []
- for old_ref in old_objects[key]:
- old_obj = old_ref()
- if old_obj is None: continue
- new_refs.append(old_ref)
- update_generic(old_obj, new_obj)
-
- if new_refs:
- old_objects[key] = new_refs
- else:
- del old_objects[key]
-
- return module
-
-#------------------------------------------------------------------------------
-# IPython connectivity
-#------------------------------------------------------------------------------
-
-from IPython.core.magic import Magics, magics_class, line_magic
-
-@magics_class
-class AutoreloadMagics(Magics):
- def __init__(self, *a, **kw):
- super(AutoreloadMagics, self).__init__(*a, **kw)
- self._reloader = ModuleReloader()
- self._reloader.check_all = False
- self.loaded_modules = set(sys.modules)
-
- @line_magic
- def autoreload(self, parameter_s=''):
- r"""%autoreload => Reload modules automatically
-
- %autoreload
- Reload all modules (except those excluded by %aimport) automatically
- now.
-
- %autoreload 0
- Disable automatic reloading.
-
- %autoreload 1
- Reload all modules imported with %aimport every time before executing
- the Python code typed.
-
- %autoreload 2
- Reload all modules (except those excluded by %aimport) every time
- before executing the Python code typed.
-
- Reloading Python modules in a reliable way is in general
- difficult, and unexpected things may occur. %autoreload tries to
- work around common pitfalls by replacing function code objects and
- parts of classes previously in the module with new versions. This
- makes the following things to work:
-
- - Functions and classes imported via 'from xxx import foo' are upgraded
- to new versions when 'xxx' is reloaded.
-
- - Methods and properties of classes are upgraded on reload, so that
- calling 'c.foo()' on an object 'c' created before the reload causes
- the new code for 'foo' to be executed.
-
- Some of the known remaining caveats are:
-
- - Replacing code objects does not always succeed: changing a @property
- in a class to an ordinary method or a method to a member variable
- can cause problems (but in old objects only).
-
- - Functions that are removed (eg. via monkey-patching) from a module
- before it is reloaded are not upgraded.
-
- - C extension modules cannot be reloaded, and so cannot be
- autoreloaded.
-
- """
- if parameter_s == '':
- self._reloader.check(True)
- elif parameter_s == '0':
- self._reloader.enabled = False
- elif parameter_s == '1':
- self._reloader.check_all = False
- self._reloader.enabled = True
- elif parameter_s == '2':
- self._reloader.check_all = True
- self._reloader.enabled = True
-
- @line_magic
- def aimport(self, parameter_s='', stream=None):
- """%aimport => Import modules for automatic reloading.
-
- %aimport
- List modules to automatically import and not to import.
-
- %aimport foo
- Import module 'foo' and mark it to be autoreloaded for %autoreload 1
-
- %aimport -foo
- Mark module 'foo' to not be autoreloaded for %autoreload 1
- """
- modname = parameter_s
- if not modname:
- to_reload = sorted(self._reloader.modules.keys())
- to_skip = sorted(self._reloader.skip_modules.keys())
- if stream is None:
- stream = sys.stdout
- if self._reloader.check_all:
- stream.write("Modules to reload:\nall-except-skipped\n")
- else:
- stream.write("Modules to reload:\n%s\n" % ' '.join(to_reload))
- stream.write("\nModules to skip:\n%s\n" % ' '.join(to_skip))
- elif modname.startswith('-'):
- modname = modname[1:]
- self._reloader.mark_module_skipped(modname)
- else:
- top_module, top_name = self._reloader.aimport_module(modname)
-
- # Inject module to user namespace
- self.shell.push({top_name: top_module})
-
- def pre_run_cell(self):
- if self._reloader.enabled:
- try:
- self._reloader.check()
- except:
- pass
-
- def post_execute_hook(self):
- """Cache the modification times of any modules imported in this execution
- """
- newly_loaded_modules = set(sys.modules) - self.loaded_modules
- for modname in newly_loaded_modules:
- _, pymtime = self._reloader.filename_and_mtime(sys.modules[modname])
- if pymtime is not None:
- self._reloader.modules_mtimes[modname] = pymtime
-
- self.loaded_modules.update(newly_loaded_modules)
-
-
-def load_ipython_extension(ip):
- """Load the extension in IPython."""
- auto_reload = AutoreloadMagics(ip)
- ip.register_magics(auto_reload)
- ip.events.register('pre_run_cell', auto_reload.pre_run_cell)
- ip.events.register('post_execute', auto_reload.post_execute_hook)
+ return None, None
+
+ filename = module.__file__
+ path, ext = os.path.splitext(filename)
+
+ if ext.lower() == '.py':
+ py_filename = filename
+ else:
+ try:
+ py_filename = openpy.source_from_cache(filename)
+ except ValueError:
+ return None, None
+
+ try:
+ pymtime = os.stat(py_filename).st_mtime
+ except OSError:
+ return None, None
+
+ return py_filename, pymtime
+
+ def check(self, check_all=False, do_reload=True):
+ """Check whether some modules need to be reloaded."""
+
+ if not self.enabled and not check_all:
+ return
+
+ if check_all or self.check_all:
+ modules = list(sys.modules.keys())
+ else:
+ modules = list(self.modules.keys())
+
+ for modname in modules:
+ m = sys.modules.get(modname, None)
+
+ if modname in self.skip_modules:
+ continue
+
+ py_filename, pymtime = self.filename_and_mtime(m)
+ if py_filename is None:
+ continue
+
+ try:
+ if pymtime <= self.modules_mtimes[modname]:
+ continue
+ except KeyError:
+ self.modules_mtimes[modname] = pymtime
+ continue
+ else:
+ if self.failed.get(py_filename, None) == pymtime:
+ continue
+
+ self.modules_mtimes[modname] = pymtime
+
+ # If we've reached this point, we should try to reload the module
+ if do_reload:
+ try:
+ superreload(m, reload, self.old_objects)
+ if py_filename in self.failed:
+ del self.failed[py_filename]
+ except:
+ print("[autoreload of %s failed: %s]" % (
+ modname, traceback.format_exc(1)), file=sys.stderr)
+ self.failed[py_filename] = pymtime
+
+#------------------------------------------------------------------------------
+# superreload
+#------------------------------------------------------------------------------
+
+if PY3:
+ func_attrs = ['__code__', '__defaults__', '__doc__',
+ '__closure__', '__globals__', '__dict__']
+else:
+ func_attrs = ['func_code', 'func_defaults', 'func_doc',
+ 'func_closure', 'func_globals', 'func_dict']
+
+
+def update_function(old, new):
+ """Upgrade the code object of a function"""
+ for name in func_attrs:
+ try:
+ setattr(old, name, getattr(new, name))
+ except (AttributeError, TypeError):
+ pass
+
+
+def update_class(old, new):
+ """Replace stuff in the __dict__ of a class, and upgrade
+ method code objects"""
+ for key in list(old.__dict__.keys()):
+ old_obj = getattr(old, key)
+
+ try:
+ new_obj = getattr(new, key)
+ except AttributeError:
+ # obsolete attribute: remove it
+ try:
+ delattr(old, key)
+ except (AttributeError, TypeError):
+ pass
+ continue
+
+ if update_generic(old_obj, new_obj): continue
+
+ try:
+ setattr(old, key, getattr(new, key))
+ except (AttributeError, TypeError):
+ pass # skip non-writable attributes
+
+
+def update_property(old, new):
+ """Replace get/set/del functions of a property"""
+ update_generic(old.fdel, new.fdel)
+ update_generic(old.fget, new.fget)
+ update_generic(old.fset, new.fset)
+
+
+def isinstance2(a, b, typ):
+ return isinstance(a, typ) and isinstance(b, typ)
+
+
+UPDATE_RULES = [
+ (lambda a, b: isinstance2(a, b, type),
+ update_class),
+ (lambda a, b: isinstance2(a, b, types.FunctionType),
+ update_function),
+ (lambda a, b: isinstance2(a, b, property),
+ update_property),
+]
+
+
+if PY3:
+ UPDATE_RULES.extend([(lambda a, b: isinstance2(a, b, types.MethodType),
+ lambda a, b: update_function(a.__func__, b.__func__)),
+ ])
+else:
+ UPDATE_RULES.extend([(lambda a, b: isinstance2(a, b, types.ClassType),
+ update_class),
+ (lambda a, b: isinstance2(a, b, types.MethodType),
+ lambda a, b: update_function(a.__func__, b.__func__)),
+ ])
+
+
+def update_generic(a, b):
+ for type_check, update in UPDATE_RULES:
+ if type_check(a, b):
+ update(a, b)
+ return True
+ return False
+
+
+class StrongRef(object):
+ def __init__(self, obj):
+ self.obj = obj
+ def __call__(self):
+ return self.obj
+
+
+def superreload(module, reload=reload, old_objects={}):
+ """Enhanced version of the builtin reload function.
+
+ superreload remembers objects previously in the module, and
+
+ - upgrades the class dictionary of every old class in the module
+ - upgrades the code object of every old function and method
+ - clears the module's namespace before reloading
+
+ """
+
+ # collect old objects in the module
+ for name, obj in list(module.__dict__.items()):
+ if not hasattr(obj, '__module__') or obj.__module__ != module.__name__:
+ continue
+ key = (module.__name__, name)
+ try:
+ old_objects.setdefault(key, []).append(weakref.ref(obj))
+ except TypeError:
+ # weakref doesn't work for all types;
+ # create strong references for 'important' cases
+ if not PY3 and isinstance(obj, types.ClassType):
+ old_objects.setdefault(key, []).append(StrongRef(obj))
+
+ # reload module
+ try:
+ # clear namespace first from old cruft
+ old_dict = module.__dict__.copy()
+ old_name = module.__name__
+ module.__dict__.clear()
+ module.__dict__['__name__'] = old_name
+ module.__dict__['__loader__'] = old_dict['__loader__']
+ except (TypeError, AttributeError, KeyError):
+ pass
+
+ try:
+ module = reload(module)
+ except:
+ # restore module dictionary on failed reload
+ module.__dict__.update(old_dict)
+ raise
+
+ # iterate over all objects and update functions & classes
+ for name, new_obj in list(module.__dict__.items()):
+ key = (module.__name__, name)
+ if key not in old_objects: continue
+
+ new_refs = []
+ for old_ref in old_objects[key]:
+ old_obj = old_ref()
+ if old_obj is None: continue
+ new_refs.append(old_ref)
+ update_generic(old_obj, new_obj)
+
+ if new_refs:
+ old_objects[key] = new_refs
+ else:
+ del old_objects[key]
+
+ return module
+
+#------------------------------------------------------------------------------
+# IPython connectivity
+#------------------------------------------------------------------------------
+
+from IPython.core.magic import Magics, magics_class, line_magic
+
+@magics_class
+class AutoreloadMagics(Magics):
+ def __init__(self, *a, **kw):
+ super(AutoreloadMagics, self).__init__(*a, **kw)
+ self._reloader = ModuleReloader()
+ self._reloader.check_all = False
+ self.loaded_modules = set(sys.modules)
+
+ @line_magic
+ def autoreload(self, parameter_s=''):
+ r"""%autoreload => Reload modules automatically
+
+ %autoreload
+ Reload all modules (except those excluded by %aimport) automatically
+ now.
+
+ %autoreload 0
+ Disable automatic reloading.
+
+ %autoreload 1
+ Reload all modules imported with %aimport every time before executing
+ the Python code typed.
+
+ %autoreload 2
+ Reload all modules (except those excluded by %aimport) every time
+ before executing the Python code typed.
+
+ Reloading Python modules in a reliable way is in general
+ difficult, and unexpected things may occur. %autoreload tries to
+ work around common pitfalls by replacing function code objects and
+ parts of classes previously in the module with new versions. This
+ makes the following things to work:
+
+ - Functions and classes imported via 'from xxx import foo' are upgraded
+ to new versions when 'xxx' is reloaded.
+
+ - Methods and properties of classes are upgraded on reload, so that
+ calling 'c.foo()' on an object 'c' created before the reload causes
+ the new code for 'foo' to be executed.
+
+ Some of the known remaining caveats are:
+
+ - Replacing code objects does not always succeed: changing a @property
+ in a class to an ordinary method or a method to a member variable
+ can cause problems (but in old objects only).
+
+ - Functions that are removed (eg. via monkey-patching) from a module
+ before it is reloaded are not upgraded.
+
+ - C extension modules cannot be reloaded, and so cannot be
+ autoreloaded.
+
+ """
+ if parameter_s == '':
+ self._reloader.check(True)
+ elif parameter_s == '0':
+ self._reloader.enabled = False
+ elif parameter_s == '1':
+ self._reloader.check_all = False
+ self._reloader.enabled = True
+ elif parameter_s == '2':
+ self._reloader.check_all = True
+ self._reloader.enabled = True
+
+ @line_magic
+ def aimport(self, parameter_s='', stream=None):
+ """%aimport => Import modules for automatic reloading.
+
+ %aimport
+ List modules to automatically import and not to import.
+
+ %aimport foo
+ Import module 'foo' and mark it to be autoreloaded for %autoreload 1
+
+ %aimport -foo
+ Mark module 'foo' to not be autoreloaded for %autoreload 1
+ """
+ modname = parameter_s
+ if not modname:
+ to_reload = sorted(self._reloader.modules.keys())
+ to_skip = sorted(self._reloader.skip_modules.keys())
+ if stream is None:
+ stream = sys.stdout
+ if self._reloader.check_all:
+ stream.write("Modules to reload:\nall-except-skipped\n")
+ else:
+ stream.write("Modules to reload:\n%s\n" % ' '.join(to_reload))
+ stream.write("\nModules to skip:\n%s\n" % ' '.join(to_skip))
+ elif modname.startswith('-'):
+ modname = modname[1:]
+ self._reloader.mark_module_skipped(modname)
+ else:
+ top_module, top_name = self._reloader.aimport_module(modname)
+
+ # Inject module to user namespace
+ self.shell.push({top_name: top_module})
+
+ def pre_run_cell(self):
+ if self._reloader.enabled:
+ try:
+ self._reloader.check()
+ except:
+ pass
+
+ def post_execute_hook(self):
+ """Cache the modification times of any modules imported in this execution
+ """
+ newly_loaded_modules = set(sys.modules) - self.loaded_modules
+ for modname in newly_loaded_modules:
+ _, pymtime = self._reloader.filename_and_mtime(sys.modules[modname])
+ if pymtime is not None:
+ self._reloader.modules_mtimes[modname] = pymtime
+
+ self.loaded_modules.update(newly_loaded_modules)
+
+
+def load_ipython_extension(ip):
+ """Load the extension in IPython."""
+ auto_reload = AutoreloadMagics(ip)
+ ip.register_magics(auto_reload)
+ ip.events.register('pre_run_cell', auto_reload.pre_run_cell)
+ ip.events.register('post_execute', auto_reload.post_execute_hook)
diff --git a/contrib/python/ipython/py2/IPython/extensions/cythonmagic.py b/contrib/python/ipython/py2/IPython/extensions/cythonmagic.py
index 65c3e39548..3c88e7c2a1 100644
--- a/contrib/python/ipython/py2/IPython/extensions/cythonmagic.py
+++ b/contrib/python/ipython/py2/IPython/extensions/cythonmagic.py
@@ -1,21 +1,21 @@
-# -*- coding: utf-8 -*-
-"""
-**DEPRECATED**
-
-The cython magic has been integrated into Cython itself,
-which is now released in version 0.21.
-
-cf github `Cython` organisation, `Cython` repo, under the
-file `Cython/Build/IpythonMagic.py`
-"""
-#-----------------------------------------------------------------------------
-# Copyright (C) 2010-2011, IPython Development Team.
-#-----------------------------------------------------------------------------
-
-import warnings
-
-## still load the magic in IPython 3.x, remove completely in future versions.
-def load_ipython_extension(ip):
- """Load the extension in IPython."""
-
- warnings.warn("""The Cython magic has been moved to the Cython package""")
+# -*- coding: utf-8 -*-
+"""
+**DEPRECATED**
+
+The cython magic has been integrated into Cython itself,
+which is now released in version 0.21.
+
+cf github `Cython` organisation, `Cython` repo, under the
+file `Cython/Build/IpythonMagic.py`
+"""
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011, IPython Development Team.
+#-----------------------------------------------------------------------------
+
+import warnings
+
+## still load the magic in IPython 3.x, remove completely in future versions.
+def load_ipython_extension(ip):
+ """Load the extension in IPython."""
+
+ warnings.warn("""The Cython magic has been moved to the Cython package""")
diff --git a/contrib/python/ipython/py2/IPython/extensions/rmagic.py b/contrib/python/ipython/py2/IPython/extensions/rmagic.py
index ecc25bc4f8..ec5763972e 100644
--- a/contrib/python/ipython/py2/IPython/extensions/rmagic.py
+++ b/contrib/python/ipython/py2/IPython/extensions/rmagic.py
@@ -1,12 +1,12 @@
-# -*- coding: utf-8 -*-
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2012 The IPython Development Team
-#-----------------------------------------------------------------------------
-
-import warnings
-
-def load_ipython_extension(ip):
- """Load the extension in IPython."""
- warnings.warn("The rmagic extension in IPython has moved to "
- "`rpy2.ipython`, please see `rpy2` documentation.")
+# -*- coding: utf-8 -*-
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2012 The IPython Development Team
+#-----------------------------------------------------------------------------
+
+import warnings
+
+def load_ipython_extension(ip):
+ """Load the extension in IPython."""
+ warnings.warn("The rmagic extension in IPython has moved to "
+ "`rpy2.ipython`, please see `rpy2` documentation.")
diff --git a/contrib/python/ipython/py2/IPython/extensions/storemagic.py b/contrib/python/ipython/py2/IPython/extensions/storemagic.py
index 34371a30f2..2fd1abf993 100644
--- a/contrib/python/ipython/py2/IPython/extensions/storemagic.py
+++ b/contrib/python/ipython/py2/IPython/extensions/storemagic.py
@@ -1,228 +1,228 @@
-# -*- coding: utf-8 -*-
-"""
-%store magic for lightweight persistence.
-
-Stores variables, aliases and macros in IPython's database.
-
-To automatically restore stored variables at startup, add this to your
-:file:`ipython_config.py` file::
-
- c.StoreMagics.autorestore = True
-"""
-from __future__ import print_function
-
+# -*- coding: utf-8 -*-
+"""
+%store magic for lightweight persistence.
+
+Stores variables, aliases and macros in IPython's database.
+
+To automatically restore stored variables at startup, add this to your
+:file:`ipython_config.py` file::
+
+ c.StoreMagics.autorestore = True
+"""
+from __future__ import print_function
+
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
-
-import inspect, os, sys, textwrap
-
-from IPython.core.error import UsageError
-from IPython.core.magic import Magics, magics_class, line_magic
-from traitlets import Bool
-from IPython.utils.py3compat import string_types
-
-
-def restore_aliases(ip):
- staliases = ip.db.get('stored_aliases', {})
- for k,v in staliases.items():
- #print "restore alias",k,v # dbg
- #self.alias_table[k] = v
- ip.alias_manager.define_alias(k,v)
-
-
-def refresh_variables(ip):
- db = ip.db
- for key in db.keys('autorestore/*'):
- # strip autorestore
- justkey = os.path.basename(key)
- try:
- obj = db[key]
- except KeyError:
- print("Unable to restore variable '%s', ignoring (use %%store -d to forget!)" % justkey)
- print("The error was:", sys.exc_info()[0])
- else:
- #print "restored",justkey,"=",obj #dbg
- ip.user_ns[justkey] = obj
-
-
-def restore_dhist(ip):
- ip.user_ns['_dh'] = ip.db.get('dhist',[])
-
-
-def restore_data(ip):
- refresh_variables(ip)
- restore_aliases(ip)
- restore_dhist(ip)
-
-
-@magics_class
-class StoreMagics(Magics):
- """Lightweight persistence for python variables.
-
- Provides the %store magic."""
-
+
+import inspect, os, sys, textwrap
+
+from IPython.core.error import UsageError
+from IPython.core.magic import Magics, magics_class, line_magic
+from traitlets import Bool
+from IPython.utils.py3compat import string_types
+
+
+def restore_aliases(ip):
+ staliases = ip.db.get('stored_aliases', {})
+ for k,v in staliases.items():
+ #print "restore alias",k,v # dbg
+ #self.alias_table[k] = v
+ ip.alias_manager.define_alias(k,v)
+
+
+def refresh_variables(ip):
+ db = ip.db
+ for key in db.keys('autorestore/*'):
+ # strip autorestore
+ justkey = os.path.basename(key)
+ try:
+ obj = db[key]
+ except KeyError:
+ print("Unable to restore variable '%s', ignoring (use %%store -d to forget!)" % justkey)
+ print("The error was:", sys.exc_info()[0])
+ else:
+ #print "restored",justkey,"=",obj #dbg
+ ip.user_ns[justkey] = obj
+
+
+def restore_dhist(ip):
+ ip.user_ns['_dh'] = ip.db.get('dhist',[])
+
+
+def restore_data(ip):
+ refresh_variables(ip)
+ restore_aliases(ip)
+ restore_dhist(ip)
+
+
+@magics_class
+class StoreMagics(Magics):
+ """Lightweight persistence for python variables.
+
+ Provides the %store magic."""
+
autorestore = Bool(False, help=
- """If True, any %store-d variables will be automatically restored
- when IPython starts.
- """
+ """If True, any %store-d variables will be automatically restored
+ when IPython starts.
+ """
).tag(config=True)
-
- def __init__(self, shell):
- super(StoreMagics, self).__init__(shell=shell)
- self.shell.configurables.append(self)
- if self.autorestore:
- restore_data(self.shell)
-
- @line_magic
- def store(self, parameter_s=''):
- """Lightweight persistence for python variables.
-
- Example::
-
- In [1]: l = ['hello',10,'world']
- In [2]: %store l
- In [3]: exit
-
- (IPython session is closed and started again...)
-
- ville@badger:~$ ipython
- In [1]: l
- NameError: name 'l' is not defined
- In [2]: %store -r
- In [3]: l
- Out[3]: ['hello', 10, 'world']
-
- Usage:
-
- * ``%store`` - Show list of all variables and their current
- values
- * ``%store spam`` - Store the *current* value of the variable spam
- to disk
- * ``%store -d spam`` - Remove the variable and its value from storage
- * ``%store -z`` - Remove all variables from storage
- * ``%store -r`` - Refresh all variables from store (overwrite
- current vals)
- * ``%store -r spam bar`` - Refresh specified variables from store
- (delete current val)
- * ``%store foo >a.txt`` - Store value of foo to new file a.txt
- * ``%store foo >>a.txt`` - Append value of foo to file a.txt
-
- It should be noted that if you change the value of a variable, you
- need to %store it again if you want to persist the new value.
-
- Note also that the variables will need to be pickleable; most basic
- python types can be safely %store'd.
-
- Also aliases can be %store'd across sessions.
- """
-
- opts,argsl = self.parse_options(parameter_s,'drz',mode='string')
- args = argsl.split(None,1)
- ip = self.shell
- db = ip.db
- # delete
- if 'd' in opts:
- try:
- todel = args[0]
- except IndexError:
- raise UsageError('You must provide the variable to forget')
- else:
- try:
- del db['autorestore/' + todel]
- except:
- raise UsageError("Can't delete variable '%s'" % todel)
- # reset
- elif 'z' in opts:
- for k in db.keys('autorestore/*'):
- del db[k]
-
- elif 'r' in opts:
- if args:
- for arg in args:
- try:
- obj = db['autorestore/' + arg]
- except KeyError:
- print("no stored variable %s" % arg)
- else:
- ip.user_ns[arg] = obj
- else:
- restore_data(ip)
-
- # run without arguments -> list variables & values
- elif not args:
- vars = db.keys('autorestore/*')
- vars.sort()
- if vars:
- size = max(map(len, vars))
- else:
- size = 0
-
- print('Stored variables and their in-db values:')
- fmt = '%-'+str(size)+'s -> %s'
- get = db.get
- for var in vars:
- justkey = os.path.basename(var)
- # print 30 first characters from every var
- print(fmt % (justkey, repr(get(var, '<unavailable>'))[:50]))
-
- # default action - store the variable
- else:
- # %store foo >file.txt or >>file.txt
- if len(args) > 1 and args[1].startswith('>'):
- fnam = os.path.expanduser(args[1].lstrip('>').lstrip())
- if args[1].startswith('>>'):
- fil = open(fnam, 'a')
- else:
- fil = open(fnam, 'w')
- obj = ip.ev(args[0])
- print("Writing '%s' (%s) to file '%s'." % (args[0],
- obj.__class__.__name__, fnam))
-
-
- if not isinstance (obj, string_types):
- from pprint import pprint
- pprint(obj, fil)
- else:
- fil.write(obj)
- if not obj.endswith('\n'):
- fil.write('\n')
-
- fil.close()
- return
-
- # %store foo
- try:
- obj = ip.user_ns[args[0]]
- except KeyError:
- # it might be an alias
- name = args[0]
- try:
- cmd = ip.alias_manager.retrieve_alias(name)
- except ValueError:
- raise UsageError("Unknown variable '%s'" % name)
-
- staliases = db.get('stored_aliases',{})
- staliases[name] = cmd
- db['stored_aliases'] = staliases
- print("Alias stored: %s (%s)" % (name, cmd))
- return
-
- else:
- modname = getattr(inspect.getmodule(obj), '__name__', '')
- if modname == '__main__':
- print(textwrap.dedent("""\
- Warning:%s is %s
- Proper storage of interactively declared classes (or instances
- of those classes) is not possible! Only instances
- of classes in real modules on file system can be %%store'd.
- """ % (args[0], obj) ))
- return
- #pickled = pickle.dumps(obj)
- db[ 'autorestore/' + args[0] ] = obj
- print("Stored '%s' (%s)" % (args[0], obj.__class__.__name__))
-
-
-def load_ipython_extension(ip):
- """Load the extension in IPython."""
- ip.register_magics(StoreMagics)
-
+
+ def __init__(self, shell):
+ super(StoreMagics, self).__init__(shell=shell)
+ self.shell.configurables.append(self)
+ if self.autorestore:
+ restore_data(self.shell)
+
+ @line_magic
+ def store(self, parameter_s=''):
+ """Lightweight persistence for python variables.
+
+ Example::
+
+ In [1]: l = ['hello',10,'world']
+ In [2]: %store l
+ In [3]: exit
+
+ (IPython session is closed and started again...)
+
+ ville@badger:~$ ipython
+ In [1]: l
+ NameError: name 'l' is not defined
+ In [2]: %store -r
+ In [3]: l
+ Out[3]: ['hello', 10, 'world']
+
+ Usage:
+
+ * ``%store`` - Show list of all variables and their current
+ values
+ * ``%store spam`` - Store the *current* value of the variable spam
+ to disk
+ * ``%store -d spam`` - Remove the variable and its value from storage
+ * ``%store -z`` - Remove all variables from storage
+ * ``%store -r`` - Refresh all variables from store (overwrite
+ current vals)
+ * ``%store -r spam bar`` - Refresh specified variables from store
+ (delete current val)
+ * ``%store foo >a.txt`` - Store value of foo to new file a.txt
+ * ``%store foo >>a.txt`` - Append value of foo to file a.txt
+
+ It should be noted that if you change the value of a variable, you
+ need to %store it again if you want to persist the new value.
+
+ Note also that the variables will need to be pickleable; most basic
+ python types can be safely %store'd.
+
+ Also aliases can be %store'd across sessions.
+ """
+
+ opts,argsl = self.parse_options(parameter_s,'drz',mode='string')
+ args = argsl.split(None,1)
+ ip = self.shell
+ db = ip.db
+ # delete
+ if 'd' in opts:
+ try:
+ todel = args[0]
+ except IndexError:
+ raise UsageError('You must provide the variable to forget')
+ else:
+ try:
+ del db['autorestore/' + todel]
+ except:
+ raise UsageError("Can't delete variable '%s'" % todel)
+ # reset
+ elif 'z' in opts:
+ for k in db.keys('autorestore/*'):
+ del db[k]
+
+ elif 'r' in opts:
+ if args:
+ for arg in args:
+ try:
+ obj = db['autorestore/' + arg]
+ except KeyError:
+ print("no stored variable %s" % arg)
+ else:
+ ip.user_ns[arg] = obj
+ else:
+ restore_data(ip)
+
+ # run without arguments -> list variables & values
+ elif not args:
+ vars = db.keys('autorestore/*')
+ vars.sort()
+ if vars:
+ size = max(map(len, vars))
+ else:
+ size = 0
+
+ print('Stored variables and their in-db values:')
+ fmt = '%-'+str(size)+'s -> %s'
+ get = db.get
+ for var in vars:
+ justkey = os.path.basename(var)
+ # print 30 first characters from every var
+ print(fmt % (justkey, repr(get(var, '<unavailable>'))[:50]))
+
+ # default action - store the variable
+ else:
+ # %store foo >file.txt or >>file.txt
+ if len(args) > 1 and args[1].startswith('>'):
+ fnam = os.path.expanduser(args[1].lstrip('>').lstrip())
+ if args[1].startswith('>>'):
+ fil = open(fnam, 'a')
+ else:
+ fil = open(fnam, 'w')
+ obj = ip.ev(args[0])
+ print("Writing '%s' (%s) to file '%s'." % (args[0],
+ obj.__class__.__name__, fnam))
+
+
+ if not isinstance (obj, string_types):
+ from pprint import pprint
+ pprint(obj, fil)
+ else:
+ fil.write(obj)
+ if not obj.endswith('\n'):
+ fil.write('\n')
+
+ fil.close()
+ return
+
+ # %store foo
+ try:
+ obj = ip.user_ns[args[0]]
+ except KeyError:
+ # it might be an alias
+ name = args[0]
+ try:
+ cmd = ip.alias_manager.retrieve_alias(name)
+ except ValueError:
+ raise UsageError("Unknown variable '%s'" % name)
+
+ staliases = db.get('stored_aliases',{})
+ staliases[name] = cmd
+ db['stored_aliases'] = staliases
+ print("Alias stored: %s (%s)" % (name, cmd))
+ return
+
+ else:
+ modname = getattr(inspect.getmodule(obj), '__name__', '')
+ if modname == '__main__':
+ print(textwrap.dedent("""\
+ Warning:%s is %s
+ Proper storage of interactively declared classes (or instances
+ of those classes) is not possible! Only instances
+ of classes in real modules on file system can be %%store'd.
+ """ % (args[0], obj) ))
+ return
+ #pickled = pickle.dumps(obj)
+ db[ 'autorestore/' + args[0] ] = obj
+ print("Stored '%s' (%s)" % (args[0], obj.__class__.__name__))
+
+
+def load_ipython_extension(ip):
+ """Load the extension in IPython."""
+ ip.register_magics(StoreMagics)
+
diff --git a/contrib/python/ipython/py2/IPython/extensions/sympyprinting.py b/contrib/python/ipython/py2/IPython/extensions/sympyprinting.py
index 1428605ed2..7f9fb2ef98 100644
--- a/contrib/python/ipython/py2/IPython/extensions/sympyprinting.py
+++ b/contrib/python/ipython/py2/IPython/extensions/sympyprinting.py
@@ -1,32 +1,32 @@
-"""
-**DEPRECATED**
-
-A print function that pretty prints sympy Basic objects.
-
-:moduleauthor: Brian Granger
-
-Usage
-=====
-
-Once the extension is loaded, Sympy Basic objects are automatically
-pretty-printed.
-
-As of SymPy 0.7.2, maintenance of this extension has moved to SymPy under
-sympy.interactive.ipythonprinting, any modifications to account for changes to
-SymPy should be submitted to SymPy rather than changed here. This module is
-maintained here for backwards compatablitiy with old SymPy versions.
-
-"""
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008 The IPython Development Team
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import warnings
-
-def load_ipython_extension(ip):
- warnings.warn("The sympyprinting extension has moved to `sympy`, "
- "use `from sympy import init_printing; init_printing()`")
+"""
+**DEPRECATED**
+
+A print function that pretty prints sympy Basic objects.
+
+:moduleauthor: Brian Granger
+
+Usage
+=====
+
+Once the extension is loaded, Sympy Basic objects are automatically
+pretty-printed.
+
+As of SymPy 0.7.2, maintenance of this extension has moved to SymPy under
+sympy.interactive.ipythonprinting, any modifications to account for changes to
+SymPy should be submitted to SymPy rather than changed here. This module is
+maintained here for backwards compatablitiy with old SymPy versions.
+
+"""
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008 The IPython Development Team
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import warnings
+
+def load_ipython_extension(ip):
+ warnings.warn("The sympyprinting extension has moved to `sympy`, "
+ "use `from sympy import init_printing; init_printing()`")
diff --git a/contrib/python/ipython/py2/IPython/external/__init__.py b/contrib/python/ipython/py2/IPython/external/__init__.py
index 5e948bfe1d..3104c19462 100644
--- a/contrib/python/ipython/py2/IPython/external/__init__.py
+++ b/contrib/python/ipython/py2/IPython/external/__init__.py
@@ -1,5 +1,5 @@
-"""
-This package contains all third-party modules bundled with IPython.
-"""
-
-__all__ = ["simplegeneric"]
+"""
+This package contains all third-party modules bundled with IPython.
+"""
+
+__all__ = ["simplegeneric"]
diff --git a/contrib/python/ipython/py2/IPython/external/decorators/__init__.py b/contrib/python/ipython/py2/IPython/external/decorators/__init__.py
index 7aae6b74cd..dd8f52b711 100644
--- a/contrib/python/ipython/py2/IPython/external/decorators/__init__.py
+++ b/contrib/python/ipython/py2/IPython/external/decorators/__init__.py
@@ -1,9 +1,9 @@
-try:
- from numpy.testing.decorators import *
- from numpy.testing.noseclasses import KnownFailure
-except ImportError:
- from ._decorators import *
- try:
- from ._numpy_testing_noseclasses import KnownFailure
- except ImportError:
- pass
+try:
+ from numpy.testing.decorators import *
+ from numpy.testing.noseclasses import KnownFailure
+except ImportError:
+ from ._decorators import *
+ try:
+ from ._numpy_testing_noseclasses import KnownFailure
+ except ImportError:
+ pass
diff --git a/contrib/python/ipython/py2/IPython/external/decorators/_decorators.py b/contrib/python/ipython/py2/IPython/external/decorators/_decorators.py
index b0df539b0b..19de5e5cde 100644
--- a/contrib/python/ipython/py2/IPython/external/decorators/_decorators.py
+++ b/contrib/python/ipython/py2/IPython/external/decorators/_decorators.py
@@ -1,281 +1,281 @@
-"""
-Decorators for labeling and modifying behavior of test objects.
-
-Decorators that merely return a modified version of the original
-function object are straightforward. Decorators that return a new
-function object need to use
-::
-
- nose.tools.make_decorator(original_function)(decorator)
-
-in returning the decorator, in order to preserve meta-data such as
-function name, setup and teardown functions and so on - see
-``nose.tools`` for more information.
-
-"""
-import warnings
-
-# IPython changes: make this work if numpy not available
-# Original code:
-#from numpy.testing.utils import \
-# WarningManager, WarningMessage
-# Our version:
-from ._numpy_testing_utils import WarningManager
-try:
- from ._numpy_testing_noseclasses import KnownFailureTest
-except:
- pass
-
-# End IPython changes
-
-def slow(t):
- """
- Label a test as 'slow'.
-
- The exact definition of a slow test is obviously both subjective and
- hardware-dependent, but in general any individual test that requires more
- than a second or two should be labeled as slow (the whole suite consists of
- thousands of tests, so even a second is significant).
-
- Parameters
- ----------
- t : callable
- The test to label as slow.
-
- Returns
- -------
- t : callable
- The decorated test `t`.
-
- Examples
- --------
- The `numpy.testing` module includes ``import decorators as dec``.
- A test can be decorated as slow like this::
-
- from numpy.testing import *
-
- @dec.slow
- def test_big(self):
- print 'Big, slow test'
-
- """
-
- t.slow = True
- return t
-
-def setastest(tf=True):
- """
- Signals to nose that this function is or is not a test.
-
- Parameters
- ----------
- tf : bool
- If True, specifies that the decorated callable is a test.
- If False, specifies that the decorated callable is not a test.
- Default is True.
-
- Notes
- -----
- This decorator can't use the nose namespace, because it can be
- called from a non-test module. See also ``istest`` and ``nottest`` in
- ``nose.tools``.
-
- Examples
- --------
- `setastest` can be used in the following way::
-
- from numpy.testing.decorators import setastest
-
- @setastest(False)
- def func_with_test_in_name(arg1, arg2):
- pass
-
- """
- def set_test(t):
- t.__test__ = tf
- return t
- return set_test
-
-def skipif(skip_condition, msg=None):
- """
- Make function raise SkipTest exception if a given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- skip_condition : bool or callable
- Flag to determine whether to skip the decorated test.
- msg : str, optional
- Message to give on raising a SkipTest exception. Default is None.
-
- Returns
- -------
- decorator : function
- Decorator which, when applied to a function, causes SkipTest
- to be raised when `skip_condition` is True, and the function
- to be called normally otherwise.
-
- Notes
- -----
- The decorator itself is decorated with the ``nose.tools.make_decorator``
- function in order to transmit function name, and various other metadata.
-
- """
-
- def skip_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
-
- # Allow for both boolean or callable skip conditions.
- if callable(skip_condition):
- skip_val = lambda : skip_condition()
- else:
- skip_val = lambda : skip_condition
-
- def get_msg(func,msg=None):
- """Skip message with information about function being skipped."""
- if msg is None:
- out = 'Test skipped due to test condition'
- else:
- out = '\n'+msg
-
- return "Skipping test: %s%s" % (func.__name__,out)
-
- # We need to define *two* skippers because Python doesn't allow both
- # return with value and yield inside the same function.
- def skipper_func(*args, **kwargs):
- """Skipper for normal test functions."""
- if skip_val():
- raise nose.SkipTest(get_msg(f,msg))
- else:
- return f(*args, **kwargs)
-
- def skipper_gen(*args, **kwargs):
- """Skipper for test generators."""
- if skip_val():
- raise nose.SkipTest(get_msg(f,msg))
- else:
- for x in f(*args, **kwargs):
- yield x
-
- # Choose the right skipper to use when building the actual decorator.
- if nose.util.isgenerator(f):
- skipper = skipper_gen
- else:
- skipper = skipper_func
-
- return nose.tools.make_decorator(f)(skipper)
-
- return skip_decorator
-
-def knownfailureif(fail_condition, msg=None):
- """
- Make function raise KnownFailureTest exception if given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- fail_condition : bool or callable
- Flag to determine whether to mark the decorated test as a known
- failure (if True) or not (if False).
- msg : str, optional
- Message to give on raising a KnownFailureTest exception.
- Default is None.
-
- Returns
- -------
- decorator : function
- Decorator, which, when applied to a function, causes SkipTest
- to be raised when `skip_condition` is True, and the function
- to be called normally otherwise.
-
- Notes
- -----
- The decorator itself is decorated with the ``nose.tools.make_decorator``
- function in order to transmit function name, and various other metadata.
-
- """
- if msg is None:
- msg = 'Test skipped due to known failure'
-
- # Allow for both boolean or callable known failure conditions.
- if callable(fail_condition):
- fail_val = lambda : fail_condition()
- else:
- fail_val = lambda : fail_condition
-
- def knownfail_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
- def knownfailer(*args, **kwargs):
- if fail_val():
- raise KnownFailureTest(msg)
- else:
- return f(*args, **kwargs)
- return nose.tools.make_decorator(f)(knownfailer)
-
- return knownfail_decorator
-
-def deprecated(conditional=True):
- """
- Filter deprecation warnings while running the test suite.
-
- This decorator can be used to filter DeprecationWarning's, to avoid
- printing them during the test suite run, while checking that the test
- actually raises a DeprecationWarning.
-
- Parameters
- ----------
- conditional : bool or callable, optional
- Flag to determine whether to mark test as deprecated or not. If the
- condition is a callable, it is used at runtime to dynamically make the
- decision. Default is True.
-
- Returns
- -------
- decorator : function
- The `deprecated` decorator itself.
-
- Notes
- -----
- .. versionadded:: 1.4.0
-
- """
- def deprecate_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
-
- def _deprecated_imp(*args, **kwargs):
- # Poor man's replacement for the with statement
- ctx = WarningManager(record=True)
- l = ctx.__enter__()
- warnings.simplefilter('always')
- try:
- f(*args, **kwargs)
- if not len(l) > 0:
- raise AssertionError("No warning raised when calling %s"
- % f.__name__)
- if not l[0].category is DeprecationWarning:
- raise AssertionError("First warning for %s is not a " \
- "DeprecationWarning( is %s)" % (f.__name__, l[0]))
- finally:
- ctx.__exit__()
-
- if callable(conditional):
- cond = conditional()
- else:
- cond = conditional
- if cond:
- return nose.tools.make_decorator(f)(_deprecated_imp)
- else:
- return f
- return deprecate_decorator
+"""
+Decorators for labeling and modifying behavior of test objects.
+
+Decorators that merely return a modified version of the original
+function object are straightforward. Decorators that return a new
+function object need to use
+::
+
+ nose.tools.make_decorator(original_function)(decorator)
+
+in returning the decorator, in order to preserve meta-data such as
+function name, setup and teardown functions and so on - see
+``nose.tools`` for more information.
+
+"""
+import warnings
+
+# IPython changes: make this work if numpy not available
+# Original code:
+#from numpy.testing.utils import \
+# WarningManager, WarningMessage
+# Our version:
+from ._numpy_testing_utils import WarningManager
+try:
+ from ._numpy_testing_noseclasses import KnownFailureTest
+except:
+ pass
+
+# End IPython changes
+
+def slow(t):
+ """
+ Label a test as 'slow'.
+
+ The exact definition of a slow test is obviously both subjective and
+ hardware-dependent, but in general any individual test that requires more
+ than a second or two should be labeled as slow (the whole suite consists of
+ thousands of tests, so even a second is significant).
+
+ Parameters
+ ----------
+ t : callable
+ The test to label as slow.
+
+ Returns
+ -------
+ t : callable
+ The decorated test `t`.
+
+ Examples
+ --------
+ The `numpy.testing` module includes ``import decorators as dec``.
+ A test can be decorated as slow like this::
+
+ from numpy.testing import *
+
+ @dec.slow
+ def test_big(self):
+ print 'Big, slow test'
+
+ """
+
+ t.slow = True
+ return t
+
+def setastest(tf=True):
+ """
+ Signals to nose that this function is or is not a test.
+
+ Parameters
+ ----------
+ tf : bool
+ If True, specifies that the decorated callable is a test.
+ If False, specifies that the decorated callable is not a test.
+ Default is True.
+
+ Notes
+ -----
+ This decorator can't use the nose namespace, because it can be
+ called from a non-test module. See also ``istest`` and ``nottest`` in
+ ``nose.tools``.
+
+ Examples
+ --------
+ `setastest` can be used in the following way::
+
+ from numpy.testing.decorators import setastest
+
+ @setastest(False)
+ def func_with_test_in_name(arg1, arg2):
+ pass
+
+ """
+ def set_test(t):
+ t.__test__ = tf
+ return t
+ return set_test
+
+def skipif(skip_condition, msg=None):
+ """
+ Make function raise SkipTest exception if a given condition is true.
+
+ If the condition is a callable, it is used at runtime to dynamically
+ make the decision. This is useful for tests that may require costly
+ imports, to delay the cost until the test suite is actually executed.
+
+ Parameters
+ ----------
+ skip_condition : bool or callable
+ Flag to determine whether to skip the decorated test.
+ msg : str, optional
+ Message to give on raising a SkipTest exception. Default is None.
+
+ Returns
+ -------
+ decorator : function
+ Decorator which, when applied to a function, causes SkipTest
+ to be raised when `skip_condition` is True, and the function
+ to be called normally otherwise.
+
+ Notes
+ -----
+ The decorator itself is decorated with the ``nose.tools.make_decorator``
+ function in order to transmit function name, and various other metadata.
+
+ """
+
+ def skip_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ # Allow for both boolean or callable skip conditions.
+ if callable(skip_condition):
+ skip_val = lambda : skip_condition()
+ else:
+ skip_val = lambda : skip_condition
+
+ def get_msg(func,msg=None):
+ """Skip message with information about function being skipped."""
+ if msg is None:
+ out = 'Test skipped due to test condition'
+ else:
+ out = '\n'+msg
+
+ return "Skipping test: %s%s" % (func.__name__,out)
+
+ # We need to define *two* skippers because Python doesn't allow both
+ # return with value and yield inside the same function.
+ def skipper_func(*args, **kwargs):
+ """Skipper for normal test functions."""
+ if skip_val():
+ raise nose.SkipTest(get_msg(f,msg))
+ else:
+ return f(*args, **kwargs)
+
+ def skipper_gen(*args, **kwargs):
+ """Skipper for test generators."""
+ if skip_val():
+ raise nose.SkipTest(get_msg(f,msg))
+ else:
+ for x in f(*args, **kwargs):
+ yield x
+
+ # Choose the right skipper to use when building the actual decorator.
+ if nose.util.isgenerator(f):
+ skipper = skipper_gen
+ else:
+ skipper = skipper_func
+
+ return nose.tools.make_decorator(f)(skipper)
+
+ return skip_decorator
+
+def knownfailureif(fail_condition, msg=None):
+ """
+ Make function raise KnownFailureTest exception if given condition is true.
+
+ If the condition is a callable, it is used at runtime to dynamically
+ make the decision. This is useful for tests that may require costly
+ imports, to delay the cost until the test suite is actually executed.
+
+ Parameters
+ ----------
+ fail_condition : bool or callable
+ Flag to determine whether to mark the decorated test as a known
+ failure (if True) or not (if False).
+ msg : str, optional
+ Message to give on raising a KnownFailureTest exception.
+ Default is None.
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes SkipTest
+ to be raised when `skip_condition` is True, and the function
+ to be called normally otherwise.
+
+ Notes
+ -----
+ The decorator itself is decorated with the ``nose.tools.make_decorator``
+ function in order to transmit function name, and various other metadata.
+
+ """
+ if msg is None:
+ msg = 'Test skipped due to known failure'
+
+ # Allow for both boolean or callable known failure conditions.
+ if callable(fail_condition):
+ fail_val = lambda : fail_condition()
+ else:
+ fail_val = lambda : fail_condition
+
+ def knownfail_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+ def knownfailer(*args, **kwargs):
+ if fail_val():
+ raise KnownFailureTest(msg)
+ else:
+ return f(*args, **kwargs)
+ return nose.tools.make_decorator(f)(knownfailer)
+
+ return knownfail_decorator
+
+def deprecated(conditional=True):
+ """
+ Filter deprecation warnings while running the test suite.
+
+ This decorator can be used to filter DeprecationWarning's, to avoid
+ printing them during the test suite run, while checking that the test
+ actually raises a DeprecationWarning.
+
+ Parameters
+ ----------
+ conditional : bool or callable, optional
+ Flag to determine whether to mark test as deprecated or not. If the
+ condition is a callable, it is used at runtime to dynamically make the
+ decision. Default is True.
+
+ Returns
+ -------
+ decorator : function
+ The `deprecated` decorator itself.
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+ def deprecate_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ def _deprecated_imp(*args, **kwargs):
+ # Poor man's replacement for the with statement
+ ctx = WarningManager(record=True)
+ l = ctx.__enter__()
+ warnings.simplefilter('always')
+ try:
+ f(*args, **kwargs)
+ if not len(l) > 0:
+ raise AssertionError("No warning raised when calling %s"
+ % f.__name__)
+ if not l[0].category is DeprecationWarning:
+ raise AssertionError("First warning for %s is not a " \
+ "DeprecationWarning( is %s)" % (f.__name__, l[0]))
+ finally:
+ ctx.__exit__()
+
+ if callable(conditional):
+ cond = conditional()
+ else:
+ cond = conditional
+ if cond:
+ return nose.tools.make_decorator(f)(_deprecated_imp)
+ else:
+ return f
+ return deprecate_decorator
diff --git a/contrib/python/ipython/py2/IPython/external/decorators/_numpy_testing_noseclasses.py b/contrib/python/ipython/py2/IPython/external/decorators/_numpy_testing_noseclasses.py
index 2b3d2841a7..ca6ccd87bb 100644
--- a/contrib/python/ipython/py2/IPython/external/decorators/_numpy_testing_noseclasses.py
+++ b/contrib/python/ipython/py2/IPython/external/decorators/_numpy_testing_noseclasses.py
@@ -1,41 +1,41 @@
-# IPython: modified copy of numpy.testing.noseclasses, so
-# IPython.external._decorators works without numpy being installed.
-
-# These classes implement a "known failure" error class.
-
-import os
-
-from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
-
-class KnownFailureTest(Exception):
- '''Raise this exception to mark a test as a known failing test.'''
- pass
-
-
-class KnownFailure(ErrorClassPlugin):
- '''Plugin that installs a KNOWNFAIL error class for the
- KnownFailureClass exception. When KnownFailureTest is raised,
- the exception will be logged in the knownfail attribute of the
- result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
- exception will not be counted as an error or failure.'''
- enabled = True
- knownfail = ErrorClass(KnownFailureTest,
- label='KNOWNFAIL',
- isfailure=False)
-
- def options(self, parser, env=os.environ):
- env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
- parser.add_option('--no-knownfail', action='store_true',
- dest='noKnownFail', default=env.get(env_opt, False),
- help='Disable special handling of KnownFailureTest '
- 'exceptions')
-
- def configure(self, options, conf):
- if not self.can_configure:
- return
- self.conf = conf
- disable = getattr(options, 'noKnownFail', False)
- if disable:
- self.enabled = False
-
-
+# IPython: modified copy of numpy.testing.noseclasses, so
+# IPython.external._decorators works without numpy being installed.
+
+# These classes implement a "known failure" error class.
+
+import os
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+class KnownFailureTest(Exception):
+ '''Raise this exception to mark a test as a known failing test.'''
+ pass
+
+
+class KnownFailure(ErrorClassPlugin):
+ '''Plugin that installs a KNOWNFAIL error class for the
+ KnownFailureClass exception. When KnownFailureTest is raised,
+ the exception will be logged in the knownfail attribute of the
+ result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
+ exception will not be counted as an error or failure.'''
+ enabled = True
+ knownfail = ErrorClass(KnownFailureTest,
+ label='KNOWNFAIL',
+ isfailure=False)
+
+ def options(self, parser, env=os.environ):
+ env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
+ parser.add_option('--no-knownfail', action='store_true',
+ dest='noKnownFail', default=env.get(env_opt, False),
+ help='Disable special handling of KnownFailureTest '
+ 'exceptions')
+
+ def configure(self, options, conf):
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noKnownFail', False)
+ if disable:
+ self.enabled = False
+
+
diff --git a/contrib/python/ipython/py2/IPython/external/decorators/_numpy_testing_utils.py b/contrib/python/ipython/py2/IPython/external/decorators/_numpy_testing_utils.py
index d12cba38ac..ad7bd0f981 100644
--- a/contrib/python/ipython/py2/IPython/external/decorators/_numpy_testing_utils.py
+++ b/contrib/python/ipython/py2/IPython/external/decorators/_numpy_testing_utils.py
@@ -1,112 +1,112 @@
-# IPython: modified copy of numpy.testing.utils, so
-# IPython.external._decorators works without numpy being installed.
-"""
-Utility function to facilitate testing.
-"""
-
-import sys
-import warnings
-
-# The following two classes are copied from python 2.6 warnings module (context
-# manager)
-class WarningMessage(object):
-
- """
- Holds the result of a single showwarning() call.
-
- Notes
- -----
- `WarningMessage` is copied from the Python 2.6 warnings module,
- so it can be used in NumPy with older Python versions.
-
- """
-
- _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
- "line")
-
- def __init__(self, message, category, filename, lineno, file=None,
- line=None):
- local_values = locals()
- for attr in self._WARNING_DETAILS:
- setattr(self, attr, local_values[attr])
- if category:
- self._category_name = category.__name__
- else:
- self._category_name = None
-
- def __str__(self):
- return ("{message : %r, category : %r, filename : %r, lineno : %s, "
- "line : %r}" % (self.message, self._category_name,
- self.filename, self.lineno, self.line))
-
-class WarningManager:
- """
- A context manager that copies and restores the warnings filter upon
- exiting the context.
-
- The 'record' argument specifies whether warnings should be captured by a
- custom implementation of ``warnings.showwarning()`` and be appended to a
- list returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
-
- The 'module' argument is to specify an alternative module to the module
- named 'warnings' and imported under that name. This argument is only useful
- when testing the warnings module itself.
-
- Notes
- -----
- `WarningManager` is a copy of the ``catch_warnings`` context manager
- from the Python 2.6 warnings module, with slight modifications.
- It is copied so it can be used in NumPy with older Python versions.
-
- """
- def __init__(self, record=False, module=None):
- self._record = record
- if module is None:
- self._module = sys.modules['warnings']
- else:
- self._module = module
- self._entered = False
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("Cannot enter %r twice" % self)
- self._entered = True
- self._filters = self._module.filters
- self._module.filters = self._filters[:]
- self._showwarning = self._module.showwarning
- if self._record:
- log = []
- def showwarning(*args, **kwargs):
- log.append(WarningMessage(*args, **kwargs))
- self._module.showwarning = showwarning
- return log
- else:
- return None
-
+# IPython: modified copy of numpy.testing.utils, so
+# IPython.external._decorators works without numpy being installed.
+"""
+Utility function to facilitate testing.
+"""
+
+import sys
+import warnings
+
+# The following two classes are copied from python 2.6 warnings module (context
+# manager)
+class WarningMessage(object):
+
+ """
+ Holds the result of a single showwarning() call.
+
+ Notes
+ -----
+ `WarningMessage` is copied from the Python 2.6 warnings module,
+ so it can be used in NumPy with older Python versions.
+
+ """
+
+ _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
+ "line")
+
+ def __init__(self, message, category, filename, lineno, file=None,
+ line=None):
+ local_values = locals()
+ for attr in self._WARNING_DETAILS:
+ setattr(self, attr, local_values[attr])
+ if category:
+ self._category_name = category.__name__
+ else:
+ self._category_name = None
+
+ def __str__(self):
+ return ("{message : %r, category : %r, filename : %r, lineno : %s, "
+ "line : %r}" % (self.message, self._category_name,
+ self.filename, self.lineno, self.line))
+
+class WarningManager:
+ """
+ A context manager that copies and restores the warnings filter upon
+ exiting the context.
+
+ The 'record' argument specifies whether warnings should be captured by a
+ custom implementation of ``warnings.showwarning()`` and be appended to a
+ list returned by the context manager. Otherwise None is returned by the
+ context manager. The objects appended to the list are arguments whose
+ attributes mirror the arguments to ``showwarning()``.
+
+ The 'module' argument is to specify an alternative module to the module
+ named 'warnings' and imported under that name. This argument is only useful
+ when testing the warnings module itself.
+
+ Notes
+ -----
+ `WarningManager` is a copy of the ``catch_warnings`` context manager
+ from the Python 2.6 warnings module, with slight modifications.
+ It is copied so it can be used in NumPy with older Python versions.
+
+ """
+ def __init__(self, record=False, module=None):
+ self._record = record
+ if module is None:
+ self._module = sys.modules['warnings']
+ else:
+ self._module = module
+ self._entered = False
+
+ def __enter__(self):
+ if self._entered:
+ raise RuntimeError("Cannot enter %r twice" % self)
+ self._entered = True
+ self._filters = self._module.filters
+ self._module.filters = self._filters[:]
+ self._showwarning = self._module.showwarning
+ if self._record:
+ log = []
+ def showwarning(*args, **kwargs):
+ log.append(WarningMessage(*args, **kwargs))
+ self._module.showwarning = showwarning
+ return log
+ else:
+ return None
+
def __exit__(self, type_, value, traceback):
- if not self._entered:
- raise RuntimeError("Cannot exit %r without entering first" % self)
- self._module.filters = self._filters
- self._module.showwarning = self._showwarning
-
-def assert_warns(warning_class, func, *args, **kw):
- """Fail unless a warning of class warning_class is thrown by callable when
- invoked with arguments args and keyword arguments kwargs.
-
- If a different type of warning is thrown, it will not be caught, and the
- test case will be deemed to have suffered an error.
- """
-
- # XXX: once we may depend on python >= 2.6, this can be replaced by the
- # warnings module context manager.
+ if not self._entered:
+ raise RuntimeError("Cannot exit %r without entering first" % self)
+ self._module.filters = self._filters
+ self._module.showwarning = self._showwarning
+
+def assert_warns(warning_class, func, *args, **kw):
+ """Fail unless a warning of class warning_class is thrown by callable when
+ invoked with arguments args and keyword arguments kwargs.
+
+ If a different type of warning is thrown, it will not be caught, and the
+ test case will be deemed to have suffered an error.
+ """
+
+ # XXX: once we may depend on python >= 2.6, this can be replaced by the
+ # warnings module context manager.
with WarningManager(record=True) as l:
warnings.simplefilter('always')
- func(*args, **kw)
- if not len(l) > 0:
- raise AssertionError("No warning raised when calling %s"
- % func.__name__)
- if not l[0].category is warning_class:
- raise AssertionError("First warning for %s is not a " \
- "%s( is %s)" % (func.__name__, warning_class, l[0]))
+ func(*args, **kw)
+ if not len(l) > 0:
+ raise AssertionError("No warning raised when calling %s"
+ % func.__name__)
+ if not l[0].category is warning_class:
+ raise AssertionError("First warning for %s is not a " \
+ "%s( is %s)" % (func.__name__, warning_class, l[0]))
diff --git a/contrib/python/ipython/py2/IPython/external/mathjax.py b/contrib/python/ipython/py2/IPython/external/mathjax.py
index c614e46579..1b9b80905b 100644
--- a/contrib/python/ipython/py2/IPython/external/mathjax.py
+++ b/contrib/python/ipython/py2/IPython/external/mathjax.py
@@ -1,13 +1,13 @@
-#!/usr/bin/python
-"""
-`IPython.external.mathjax` is deprecated with IPython 4.0+
-
-mathjax is now install by default with the notebook package
-
-"""
-
-import sys
-
-if __name__ == '__main__' :
- sys.exit("IPython.external.mathjax is deprecated, Mathjax is now installed by default with the notebook package")
-
+#!/usr/bin/python
+"""
+`IPython.external.mathjax` is deprecated with IPython 4.0+
+
+mathjax is now install by default with the notebook package
+
+"""
+
+import sys
+
+if __name__ == '__main__' :
+ sys.exit("IPython.external.mathjax is deprecated, Mathjax is now installed by default with the notebook package")
+
diff --git a/contrib/python/ipython/py2/IPython/external/qt_for_kernel.py b/contrib/python/ipython/py2/IPython/external/qt_for_kernel.py
index fe1dc15dae..1a94e7e0a2 100644
--- a/contrib/python/ipython/py2/IPython/external/qt_for_kernel.py
+++ b/contrib/python/ipython/py2/IPython/external/qt_for_kernel.py
@@ -1,95 +1,95 @@
-""" Import Qt in a manner suitable for an IPython kernel.
-
-This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
-
-Import Priority:
-
-if Qt has been imported anywhere else:
- use that
-
-if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
- use PyQt4 @v1
-
-Next, ask QT_API env variable
-
-if QT_API not set:
- ask matplotlib what it's using. If Qt4Agg or Qt5Agg, then use the
- version matplotlib is configured with
-
- else: (matplotlib said nothing)
- # this is the default path - nobody told us anything
- try in this order:
- PyQt default version, PySide, PyQt5
-else:
- use what QT_API says
-
-"""
-# NOTE: This is no longer an external, third-party module, and should be
-# considered part of IPython. For compatibility however, it is being kept in
-# IPython/external.
-
-import os
-import sys
-
-from IPython.utils.version import check_version
-from IPython.external.qt_loaders import (load_qt, loaded_api, QT_API_PYSIDE,
+""" Import Qt in a manner suitable for an IPython kernel.
+
+This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
+
+Import Priority:
+
+if Qt has been imported anywhere else:
+ use that
+
+if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
+ use PyQt4 @v1
+
+Next, ask QT_API env variable
+
+if QT_API not set:
+ ask matplotlib what it's using. If Qt4Agg or Qt5Agg, then use the
+ version matplotlib is configured with
+
+ else: (matplotlib said nothing)
+ # this is the default path - nobody told us anything
+ try in this order:
+ PyQt default version, PySide, PyQt5
+else:
+ use what QT_API says
+
+"""
+# NOTE: This is no longer an external, third-party module, and should be
+# considered part of IPython. For compatibility however, it is being kept in
+# IPython/external.
+
+import os
+import sys
+
+from IPython.utils.version import check_version
+from IPython.external.qt_loaders import (load_qt, loaded_api, QT_API_PYSIDE,
QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQT5,
- QT_API_PYQTv1, QT_API_PYQT_DEFAULT)
-
+ QT_API_PYQTv1, QT_API_PYQT_DEFAULT)
+
_qt_apis = (QT_API_PYSIDE, QT_API_PYSIDE2, QT_API_PYQT, QT_API_PYQT5, QT_API_PYQTv1,
- QT_API_PYQT_DEFAULT)
-
-#Constraints placed on an imported matplotlib
-def matplotlib_options(mpl):
- if mpl is None:
- return
- backend = mpl.rcParams.get('backend', None)
- if backend == 'Qt4Agg':
- mpqt = mpl.rcParams.get('backend.qt4', None)
- if mpqt is None:
- return None
- if mpqt.lower() == 'pyside':
- return [QT_API_PYSIDE]
- elif mpqt.lower() == 'pyqt4':
- return [QT_API_PYQT_DEFAULT]
- elif mpqt.lower() == 'pyqt4v2':
- return [QT_API_PYQT]
- raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
- mpqt)
- elif backend == 'Qt5Agg':
- mpqt = mpl.rcParams.get('backend.qt5', None)
- if mpqt is None:
- return None
- if mpqt.lower() == 'pyqt5':
- return [QT_API_PYQT5]
- raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" %
- mpqt)
-
-def get_options():
- """Return a list of acceptable QT APIs, in decreasing order of
- preference
- """
- #already imported Qt somewhere. Use that
- loaded = loaded_api()
- if loaded is not None:
- return [loaded]
-
- mpl = sys.modules.get('matplotlib', None)
-
- if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
- #1.0.1 only supports PyQt4 v1
- return [QT_API_PYQT_DEFAULT]
-
- qt_api = os.environ.get('QT_API', None)
- if qt_api is None:
- #no ETS variable. Ask mpl, then use default fallback path
+ QT_API_PYQT_DEFAULT)
+
+#Constraints placed on an imported matplotlib
+def matplotlib_options(mpl):
+ if mpl is None:
+ return
+ backend = mpl.rcParams.get('backend', None)
+ if backend == 'Qt4Agg':
+ mpqt = mpl.rcParams.get('backend.qt4', None)
+ if mpqt is None:
+ return None
+ if mpqt.lower() == 'pyside':
+ return [QT_API_PYSIDE]
+ elif mpqt.lower() == 'pyqt4':
+ return [QT_API_PYQT_DEFAULT]
+ elif mpqt.lower() == 'pyqt4v2':
+ return [QT_API_PYQT]
+ raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
+ mpqt)
+ elif backend == 'Qt5Agg':
+ mpqt = mpl.rcParams.get('backend.qt5', None)
+ if mpqt is None:
+ return None
+ if mpqt.lower() == 'pyqt5':
+ return [QT_API_PYQT5]
+ raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" %
+ mpqt)
+
+def get_options():
+ """Return a list of acceptable QT APIs, in decreasing order of
+ preference
+ """
+ #already imported Qt somewhere. Use that
+ loaded = loaded_api()
+ if loaded is not None:
+ return [loaded]
+
+ mpl = sys.modules.get('matplotlib', None)
+
+ if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
+ #1.0.1 only supports PyQt4 v1
+ return [QT_API_PYQT_DEFAULT]
+
+ qt_api = os.environ.get('QT_API', None)
+ if qt_api is None:
+ #no ETS variable. Ask mpl, then use default fallback path
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE,
QT_API_PYQT5, QT_API_PYSIDE2]
- elif qt_api not in _qt_apis:
- raise RuntimeError("Invalid Qt API %r, valid values are: %r" %
- (qt_api, ', '.join(_qt_apis)))
- else:
- return [qt_api]
-
-api_opts = get_options()
-QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
+ elif qt_api not in _qt_apis:
+ raise RuntimeError("Invalid Qt API %r, valid values are: %r" %
+ (qt_api, ', '.join(_qt_apis)))
+ else:
+ return [qt_api]
+
+api_opts = get_options()
+QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
diff --git a/contrib/python/ipython/py2/IPython/external/qt_loaders.py b/contrib/python/ipython/py2/IPython/external/qt_loaders.py
index e9912106e9..3b894fb2ab 100644
--- a/contrib/python/ipython/py2/IPython/external/qt_loaders.py
+++ b/contrib/python/ipython/py2/IPython/external/qt_loaders.py
@@ -1,27 +1,27 @@
-"""
-This module contains factory functions that attempt
-to return Qt submodules from the various python Qt bindings.
-
-It also protects against double-importing Qt with different
-bindings, which is unstable and likely to crash
-
-This is used primarily by qt and qt_for_kernel, and shouldn't
-be accessed directly from the outside
-"""
-import sys
-import types
-from functools import partial
-
-from IPython.utils.version import check_version
-
-# Available APIs.
-QT_API_PYQT = 'pyqt' # Force version 2
-QT_API_PYQT5 = 'pyqt5'
-QT_API_PYQTv1 = 'pyqtv1' # Force version 2
-QT_API_PYQT_DEFAULT = 'pyqtdefault' # use system default for version 1 vs. 2
-QT_API_PYSIDE = 'pyside'
+"""
+This module contains factory functions that attempt
+to return Qt submodules from the various python Qt bindings.
+
+It also protects against double-importing Qt with different
+bindings, which is unstable and likely to crash
+
+This is used primarily by qt and qt_for_kernel, and shouldn't
+be accessed directly from the outside
+"""
+import sys
+import types
+from functools import partial
+
+from IPython.utils.version import check_version
+
+# Available APIs.
+QT_API_PYQT = 'pyqt' # Force version 2
+QT_API_PYQT5 = 'pyqt5'
+QT_API_PYQTv1 = 'pyqtv1' # Force version 2
+QT_API_PYQT_DEFAULT = 'pyqtdefault' # use system default for version 1 vs. 2
+QT_API_PYSIDE = 'pyside'
QT_API_PYSIDE2 = 'pyside2'
-
+
api_to_module = {QT_API_PYSIDE2: 'PySide2',
QT_API_PYSIDE: 'PySide',
QT_API_PYQT: 'PyQt4',
@@ -29,124 +29,124 @@ api_to_module = {QT_API_PYSIDE2: 'PySide2',
QT_API_PYQT5: 'PyQt5',
QT_API_PYQT_DEFAULT: 'PyQt4',
}
-
-
-class ImportDenier(object):
- """Import Hook that will guard against bad Qt imports
- once IPython commits to a specific binding
- """
-
- def __init__(self):
- self.__forbidden = set()
-
- def forbid(self, module_name):
- sys.modules.pop(module_name, None)
- self.__forbidden.add(module_name)
-
- def find_module(self, fullname, path=None):
- if path:
- return
- if fullname in self.__forbidden:
- return self
-
- def load_module(self, fullname):
- raise ImportError("""
- Importing %s disabled by IPython, which has
- already imported an Incompatible QT Binding: %s
- """ % (fullname, loaded_api()))
-
-ID = ImportDenier()
+
+
+class ImportDenier(object):
+ """Import Hook that will guard against bad Qt imports
+ once IPython commits to a specific binding
+ """
+
+ def __init__(self):
+ self.__forbidden = set()
+
+ def forbid(self, module_name):
+ sys.modules.pop(module_name, None)
+ self.__forbidden.add(module_name)
+
+ def find_module(self, fullname, path=None):
+ if path:
+ return
+ if fullname in self.__forbidden:
+ return self
+
+ def load_module(self, fullname):
+ raise ImportError("""
+ Importing %s disabled by IPython, which has
+ already imported an Incompatible QT Binding: %s
+ """ % (fullname, loaded_api()))
+
+ID = ImportDenier()
sys.meta_path.insert(0, ID)
-
-
-def commit_api(api):
- """Commit to a particular API, and trigger ImportErrors on subsequent
- dangerous imports"""
-
+
+
+def commit_api(api):
+ """Commit to a particular API, and trigger ImportErrors on subsequent
+ dangerous imports"""
+
if api == QT_API_PYSIDE2:
ID.forbid('PySide')
ID.forbid('PyQt4')
ID.forbid('PyQt5')
- if api == QT_API_PYSIDE:
+ if api == QT_API_PYSIDE:
ID.forbid('PySide2')
- ID.forbid('PyQt4')
- ID.forbid('PyQt5')
- elif api == QT_API_PYQT5:
+ ID.forbid('PyQt4')
+ ID.forbid('PyQt5')
+ elif api == QT_API_PYQT5:
ID.forbid('PySide2')
- ID.forbid('PySide')
- ID.forbid('PyQt4')
- else: # There are three other possibilities, all representing PyQt4
- ID.forbid('PyQt5')
+ ID.forbid('PySide')
+ ID.forbid('PyQt4')
+ else: # There are three other possibilities, all representing PyQt4
+ ID.forbid('PyQt5')
ID.forbid('PySide2')
- ID.forbid('PySide')
-
-
-def loaded_api():
- """Return which API is loaded, if any
-
- If this returns anything besides None,
- importing any other Qt binding is unsafe.
-
- Returns
- -------
+ ID.forbid('PySide')
+
+
+def loaded_api():
+ """Return which API is loaded, if any
+
+ If this returns anything besides None,
+ importing any other Qt binding is unsafe.
+
+ Returns
+ -------
None, 'pyside2', 'pyside', 'pyqt', 'pyqt5', or 'pyqtv1'
- """
- if 'PyQt4.QtCore' in sys.modules:
- if qtapi_version() == 2:
- return QT_API_PYQT
- else:
- return QT_API_PYQTv1
- elif 'PySide.QtCore' in sys.modules:
- return QT_API_PYSIDE
+ """
+ if 'PyQt4.QtCore' in sys.modules:
+ if qtapi_version() == 2:
+ return QT_API_PYQT
+ else:
+ return QT_API_PYQTv1
+ elif 'PySide.QtCore' in sys.modules:
+ return QT_API_PYSIDE
elif 'PySide2.QtCore' in sys.modules:
return QT_API_PYSIDE2
- elif 'PyQt5.QtCore' in sys.modules:
- return QT_API_PYQT5
- return None
-
-
-def has_binding(api):
+ elif 'PyQt5.QtCore' in sys.modules:
+ return QT_API_PYQT5
+ return None
+
+
+def has_binding(api):
"""Safely check for PyQt4/5, PySide or PySide2, without importing submodules
-
+
Supports Python <= 3.3
- Parameters
- ----------
+ Parameters
+ ----------
api : str [ 'pyqtv1' | 'pyqt' | 'pyqt5' | 'pyside' | 'pyside2' | 'pyqtdefault']
- Which module to check for
-
- Returns
- -------
- True if the relevant module appears to be importable
- """
- # we can't import an incomplete pyside and pyqt4
- # this will cause a crash in sip (#1431)
- # check for complete presence before importing
+ Which module to check for
+
+ Returns
+ -------
+ True if the relevant module appears to be importable
+ """
+ # we can't import an incomplete pyside and pyqt4
+ # this will cause a crash in sip (#1431)
+ # check for complete presence before importing
module_name = api_to_module[api]
-
- import imp
- try:
- #importing top level PyQt4/PySide module is ok...
- mod = __import__(module_name)
- #...importing submodules is not
- imp.find_module('QtCore', mod.__path__)
- imp.find_module('QtGui', mod.__path__)
- imp.find_module('QtSvg', mod.__path__)
+
+ import imp
+ try:
+ #importing top level PyQt4/PySide module is ok...
+ mod = __import__(module_name)
+ #...importing submodules is not
+ imp.find_module('QtCore', mod.__path__)
+ imp.find_module('QtGui', mod.__path__)
+ imp.find_module('QtSvg', mod.__path__)
if api in (QT_API_PYQT5, QT_API_PYSIDE2):
- # QT5 requires QtWidgets too
- imp.find_module('QtWidgets', mod.__path__)
-
- #we can also safely check PySide version
- if api == QT_API_PYSIDE:
- return check_version(mod.__version__, '1.0.3')
- else:
- return True
- except ImportError:
- return False
-
+ # QT5 requires QtWidgets too
+ imp.find_module('QtWidgets', mod.__path__)
+
+ #we can also safely check PySide version
+ if api == QT_API_PYSIDE:
+ return check_version(mod.__version__, '1.0.3')
+ else:
+ return True
+ except ImportError:
+ return False
+
def has_binding_new(api):
"""Safely check for PyQt4/5, PySide or PySide2, without importing submodules
-
+
Supports Python >= 3.4
Parameters
@@ -187,108 +187,108 @@ def has_binding_new(api):
if sys.version_info >= (3, 4):
has_binding = has_binding_new
-def qtapi_version():
- """Return which QString API has been set, if any
-
- Returns
- -------
- The QString API version (1 or 2), or None if not set
- """
- try:
- import sip
- except ImportError:
- return
- try:
- return sip.getapi('QString')
- except ValueError:
- return
-
-
-def can_import(api):
- """Safely query whether an API is importable, without importing it"""
- if not has_binding(api):
- return False
-
- current = loaded_api()
- if api == QT_API_PYQT_DEFAULT:
- return current in [QT_API_PYQT, QT_API_PYQTv1, None]
- else:
- return current in [api, None]
-
-
-def import_pyqt4(version=2):
- """
- Import PyQt4
-
- Parameters
- ----------
- version : 1, 2, or None
- Which QString/QVariant API to use. Set to None to use the system
- default
-
- ImportErrors rasied within this function are non-recoverable
- """
- # The new-style string API (version=2) automatically
- # converts QStrings to Unicode Python strings. Also, automatically unpacks
- # QVariants to their underlying objects.
- import sip
-
- if version is not None:
- sip.setapi('QString', version)
- sip.setapi('QVariant', version)
-
- from PyQt4 import QtGui, QtCore, QtSvg
-
- if not check_version(QtCore.PYQT_VERSION_STR, '4.7'):
- raise ImportError("IPython requires PyQt4 >= 4.7, found %s" %
- QtCore.PYQT_VERSION_STR)
-
- # Alias PyQt-specific functions for PySide compatibility.
- QtCore.Signal = QtCore.pyqtSignal
- QtCore.Slot = QtCore.pyqtSlot
-
- # query for the API version (in case version == None)
- version = sip.getapi('QString')
- api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT
- return QtCore, QtGui, QtSvg, api
-
-
-def import_pyqt5():
- """
- Import PyQt5
-
- ImportErrors rasied within this function are non-recoverable
- """
- import sip
-
- from PyQt5 import QtCore, QtSvg, QtWidgets, QtGui
-
- # Alias PyQt-specific functions for PySide compatibility.
- QtCore.Signal = QtCore.pyqtSignal
- QtCore.Slot = QtCore.pyqtSlot
-
- # Join QtGui and QtWidgets for Qt4 compatibility.
- QtGuiCompat = types.ModuleType('QtGuiCompat')
- QtGuiCompat.__dict__.update(QtGui.__dict__)
- QtGuiCompat.__dict__.update(QtWidgets.__dict__)
-
- api = QT_API_PYQT5
- return QtCore, QtGuiCompat, QtSvg, api
-
-
-def import_pyside():
- """
- Import PySide
-
- ImportErrors raised within this function are non-recoverable
- """
- from PySide import QtGui, QtCore, QtSvg
- return QtCore, QtGui, QtSvg, QT_API_PYSIDE
-
+def qtapi_version():
+ """Return which QString API has been set, if any
+
+ Returns
+ -------
+ The QString API version (1 or 2), or None if not set
+ """
+ try:
+ import sip
+ except ImportError:
+ return
+ try:
+ return sip.getapi('QString')
+ except ValueError:
+ return
+
+
+def can_import(api):
+ """Safely query whether an API is importable, without importing it"""
+ if not has_binding(api):
+ return False
+
+ current = loaded_api()
+ if api == QT_API_PYQT_DEFAULT:
+ return current in [QT_API_PYQT, QT_API_PYQTv1, None]
+ else:
+ return current in [api, None]
+
+
+def import_pyqt4(version=2):
+ """
+ Import PyQt4
+
+ Parameters
+ ----------
+ version : 1, 2, or None
+ Which QString/QVariant API to use. Set to None to use the system
+ default
+
+ ImportErrors rasied within this function are non-recoverable
+ """
+ # The new-style string API (version=2) automatically
+ # converts QStrings to Unicode Python strings. Also, automatically unpacks
+ # QVariants to their underlying objects.
+ import sip
+
+ if version is not None:
+ sip.setapi('QString', version)
+ sip.setapi('QVariant', version)
+
+ from PyQt4 import QtGui, QtCore, QtSvg
+
+ if not check_version(QtCore.PYQT_VERSION_STR, '4.7'):
+ raise ImportError("IPython requires PyQt4 >= 4.7, found %s" %
+ QtCore.PYQT_VERSION_STR)
+
+ # Alias PyQt-specific functions for PySide compatibility.
+ QtCore.Signal = QtCore.pyqtSignal
+ QtCore.Slot = QtCore.pyqtSlot
+
+ # query for the API version (in case version == None)
+ version = sip.getapi('QString')
+ api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT
+ return QtCore, QtGui, QtSvg, api
+
+
+def import_pyqt5():
+ """
+ Import PyQt5
+
+ ImportErrors rasied within this function are non-recoverable
+ """
+ import sip
+
+ from PyQt5 import QtCore, QtSvg, QtWidgets, QtGui
+
+ # Alias PyQt-specific functions for PySide compatibility.
+ QtCore.Signal = QtCore.pyqtSignal
+ QtCore.Slot = QtCore.pyqtSlot
+
+ # Join QtGui and QtWidgets for Qt4 compatibility.
+ QtGuiCompat = types.ModuleType('QtGuiCompat')
+ QtGuiCompat.__dict__.update(QtGui.__dict__)
+ QtGuiCompat.__dict__.update(QtWidgets.__dict__)
+
+ api = QT_API_PYQT5
+ return QtCore, QtGuiCompat, QtSvg, api
+
+
+def import_pyside():
+ """
+ Import PySide
+
+ ImportErrors raised within this function are non-recoverable
+ """
+ from PySide import QtGui, QtCore, QtSvg
+ return QtCore, QtGui, QtSvg, QT_API_PYSIDE
+
def import_pyside2():
"""
Import PySide2
-
+
ImportErrors raised within this function are non-recoverable
"""
from PySide2 import QtGui, QtCore, QtSvg, QtWidgets, QtPrintSupport
@@ -302,71 +302,71 @@ def import_pyside2():
return QtCore, QtGuiCompat, QtSvg, QT_API_PYSIDE2
-def load_qt(api_options):
- """
- Attempt to import Qt, given a preference list
- of permissible bindings
-
- It is safe to call this function multiple times.
-
- Parameters
- ----------
- api_options: List of strings
+def load_qt(api_options):
+ """
+ Attempt to import Qt, given a preference list
+ of permissible bindings
+
+ It is safe to call this function multiple times.
+
+ Parameters
+ ----------
+ api_options: List of strings
The order of APIs to try. Valid items are 'pyside', 'pyside2',
- 'pyqt', 'pyqt5', 'pyqtv1' and 'pyqtdefault'
-
- Returns
- -------
-
- A tuple of QtCore, QtGui, QtSvg, QT_API
- The first three are the Qt modules. The last is the
- string indicating which module was loaded.
-
- Raises
- ------
- ImportError, if it isn't possible to import any requested
- bindings (either becaues they aren't installed, or because
- an incompatible library has already been installed)
- """
+ 'pyqt', 'pyqt5', 'pyqtv1' and 'pyqtdefault'
+
+ Returns
+ -------
+
+ A tuple of QtCore, QtGui, QtSvg, QT_API
+ The first three are the Qt modules. The last is the
+ string indicating which module was loaded.
+
+ Raises
+ ------
+ ImportError, if it isn't possible to import any requested
+ bindings (either becaues they aren't installed, or because
+ an incompatible library has already been installed)
+ """
loaders = {
QT_API_PYSIDE2: import_pyside2,
QT_API_PYSIDE: import_pyside,
- QT_API_PYQT: import_pyqt4,
- QT_API_PYQT5: import_pyqt5,
- QT_API_PYQTv1: partial(import_pyqt4, version=1),
- QT_API_PYQT_DEFAULT: partial(import_pyqt4, version=None)
+ QT_API_PYQT: import_pyqt4,
+ QT_API_PYQT5: import_pyqt5,
+ QT_API_PYQTv1: partial(import_pyqt4, version=1),
+ QT_API_PYQT_DEFAULT: partial(import_pyqt4, version=None)
}
-
- for api in api_options:
-
- if api not in loaders:
- raise RuntimeError(
- "Invalid Qt API %r, valid values are: %s" %
- (api, ", ".join(["%r" % k for k in loaders.keys()])))
-
- if not can_import(api):
- continue
-
- #cannot safely recover from an ImportError during this
- result = loaders[api]()
- api = result[-1] # changed if api = QT_API_PYQT_DEFAULT
- commit_api(api)
- return result
- else:
- raise ImportError("""
- Could not load requested Qt binding. Please ensure that
+
+ for api in api_options:
+
+ if api not in loaders:
+ raise RuntimeError(
+ "Invalid Qt API %r, valid values are: %s" %
+ (api, ", ".join(["%r" % k for k in loaders.keys()])))
+
+ if not can_import(api):
+ continue
+
+ #cannot safely recover from an ImportError during this
+ result = loaders[api]()
+ api = result[-1] # changed if api = QT_API_PYQT_DEFAULT
+ commit_api(api)
+ return result
+ else:
+ raise ImportError("""
+ Could not load requested Qt binding. Please ensure that
PyQt4 >= 4.7, PyQt5, PySide >= 1.0.3 or PySide2 is available,
- and only one is imported per session.
-
- Currently-imported Qt library: %r
- PyQt4 available (requires QtCore, QtGui, QtSvg): %s
- PyQt5 available (requires QtCore, QtGui, QtSvg, QtWidgets): %s
- PySide >= 1.0.3 installed: %s
+ and only one is imported per session.
+
+ Currently-imported Qt library: %r
+ PyQt4 available (requires QtCore, QtGui, QtSvg): %s
+ PyQt5 available (requires QtCore, QtGui, QtSvg, QtWidgets): %s
+ PySide >= 1.0.3 installed: %s
PySide2 installed: %s
- Tried to load: %r
- """ % (loaded_api(),
- has_binding(QT_API_PYQT),
- has_binding(QT_API_PYQT5),
- has_binding(QT_API_PYSIDE),
+ Tried to load: %r
+ """ % (loaded_api(),
+ has_binding(QT_API_PYQT),
+ has_binding(QT_API_PYQT5),
+ has_binding(QT_API_PYSIDE),
has_binding(QT_API_PYSIDE2),
- api_options))
+ api_options))
diff --git a/contrib/python/ipython/py2/IPython/frontend.py b/contrib/python/ipython/py2/IPython/frontend.py
index d60957b19b..9cc3eaff2f 100644
--- a/contrib/python/ipython/py2/IPython/frontend.py
+++ b/contrib/python/ipython/py2/IPython/frontend.py
@@ -1,29 +1,29 @@
-"""
-Shim to maintain backwards compatibility with old frontend imports.
-
-We have moved all contents of the old `frontend` subpackage into top-level
-subpackages (`html`, `qt` and `terminal`), and flattened the notebook into
-just `IPython.html`, formerly `IPython.frontend.html.notebook`.
-
-This will let code that was making `from IPython.frontend...` calls continue
-working, though a warning will be printed.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-from warnings import warn
-
-from IPython.utils.shimmodule import ShimModule, ShimWarning
-
+"""
+Shim to maintain backwards compatibility with old frontend imports.
+
+We have moved all contents of the old `frontend` subpackage into top-level
+subpackages (`html`, `qt` and `terminal`), and flattened the notebook into
+just `IPython.html`, formerly `IPython.frontend.html.notebook`.
+
+This will let code that was making `from IPython.frontend...` calls continue
+working, though a warning will be printed.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from warnings import warn
+
+from IPython.utils.shimmodule import ShimModule, ShimWarning
+
warn("The top-level `frontend` package has been deprecated since IPython 1.0. "
- "All its subpackages have been moved to the top `IPython` level.", ShimWarning)
-
-# Unconditionally insert the shim into sys.modules so that further import calls
-# trigger the custom attribute access above
-
-sys.modules['IPython.frontend.html.notebook'] = ShimModule(
- src='IPython.frontend.html.notebook', mirror='IPython.html')
-sys.modules['IPython.frontend'] = ShimModule(
- src='IPython.frontend', mirror='IPython')
+ "All its subpackages have been moved to the top `IPython` level.", ShimWarning)
+
+# Unconditionally insert the shim into sys.modules so that further import calls
+# trigger the custom attribute access above
+
+sys.modules['IPython.frontend.html.notebook'] = ShimModule(
+ src='IPython.frontend.html.notebook', mirror='IPython.html')
+sys.modules['IPython.frontend'] = ShimModule(
+ src='IPython.frontend', mirror='IPython')
diff --git a/contrib/python/ipython/py2/IPython/html.py b/contrib/python/ipython/py2/IPython/html.py
index 3e5595d8e6..050be5c599 100644
--- a/contrib/python/ipython/py2/IPython/html.py
+++ b/contrib/python/ipython/py2/IPython/html.py
@@ -1,28 +1,28 @@
-"""
-Shim to maintain backwards compatibility with old IPython.html imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-from warnings import warn
-
-from IPython.utils.shimmodule import ShimModule, ShimWarning
-
+"""
+Shim to maintain backwards compatibility with old IPython.html imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from warnings import warn
+
+from IPython.utils.shimmodule import ShimModule, ShimWarning
+
warn("The `IPython.html` package has been deprecated since IPython 4.0. "
- "You should import from `notebook` instead. "
- "`IPython.html.widgets` has moved to `ipywidgets`.", ShimWarning)
-
-_widgets = sys.modules['IPython.html.widgets'] = ShimModule(
- src='IPython.html.widgets', mirror='ipywidgets')
-
-_html = ShimModule(
- src='IPython.html', mirror='notebook')
-
-# hook up widgets
-_html.widgets = _widgets
-sys.modules['IPython.html'] = _html
-
-if __name__ == '__main__':
- from notebook import notebookapp as app
- app.launch_new_instance()
+ "You should import from `notebook` instead. "
+ "`IPython.html.widgets` has moved to `ipywidgets`.", ShimWarning)
+
+_widgets = sys.modules['IPython.html.widgets'] = ShimModule(
+ src='IPython.html.widgets', mirror='ipywidgets')
+
+_html = ShimModule(
+ src='IPython.html', mirror='notebook')
+
+# hook up widgets
+_html.widgets = _widgets
+sys.modules['IPython.html'] = _html
+
+if __name__ == '__main__':
+ from notebook import notebookapp as app
+ app.launch_new_instance()
diff --git a/contrib/python/ipython/py2/IPython/kernel/__init__.py b/contrib/python/ipython/py2/IPython/kernel/__init__.py
index 036548fffa..70a05ed4aa 100644
--- a/contrib/python/ipython/py2/IPython/kernel/__init__.py
+++ b/contrib/python/ipython/py2/IPython/kernel/__init__.py
@@ -1,35 +1,35 @@
-"""
-Shim to maintain backwards compatibility with old IPython.kernel imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-from warnings import warn
-
-from IPython.utils.shimmodule import ShimModule, ShimWarning
-
+"""
+Shim to maintain backwards compatibility with old IPython.kernel imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from warnings import warn
+
+from IPython.utils.shimmodule import ShimModule, ShimWarning
+
warn("The `IPython.kernel` package has been deprecated since IPython 4.0."
- "You should import from ipykernel or jupyter_client instead.", ShimWarning)
-
-
-# zmq subdir is gone
-sys.modules['IPython.kernel.zmq.session'] = ShimModule(
- src='IPython.kernel.zmq.session', mirror='jupyter_client.session')
-sys.modules['IPython.kernel.zmq'] = ShimModule(
- src='IPython.kernel.zmq', mirror='ipykernel')
-
-for pkg in ('comm', 'inprocess'):
- src = 'IPython.kernel.%s' % pkg
- sys.modules[src] = ShimModule(src=src, mirror='ipykernel.%s' % pkg)
-
-for pkg in ('ioloop', 'blocking'):
- src = 'IPython.kernel.%s' % pkg
- sys.modules[src] = ShimModule(src=src, mirror='jupyter_client.%s' % pkg)
-
-# required for `from IPython.kernel import PKG`
-from ipykernel import comm, inprocess
-from jupyter_client import ioloop, blocking
-# public API
-from ipykernel.connect import *
-from jupyter_client import *
+ "You should import from ipykernel or jupyter_client instead.", ShimWarning)
+
+
+# zmq subdir is gone
+sys.modules['IPython.kernel.zmq.session'] = ShimModule(
+ src='IPython.kernel.zmq.session', mirror='jupyter_client.session')
+sys.modules['IPython.kernel.zmq'] = ShimModule(
+ src='IPython.kernel.zmq', mirror='ipykernel')
+
+for pkg in ('comm', 'inprocess'):
+ src = 'IPython.kernel.%s' % pkg
+ sys.modules[src] = ShimModule(src=src, mirror='ipykernel.%s' % pkg)
+
+for pkg in ('ioloop', 'blocking'):
+ src = 'IPython.kernel.%s' % pkg
+ sys.modules[src] = ShimModule(src=src, mirror='jupyter_client.%s' % pkg)
+
+# required for `from IPython.kernel import PKG`
+from ipykernel import comm, inprocess
+from jupyter_client import ioloop, blocking
+# public API
+from ipykernel.connect import *
+from jupyter_client import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/__main__.py b/contrib/python/ipython/py2/IPython/kernel/__main__.py
index adafe73d02..d1f0cf5334 100644
--- a/contrib/python/ipython/py2/IPython/kernel/__main__.py
+++ b/contrib/python/ipython/py2/IPython/kernel/__main__.py
@@ -1,3 +1,3 @@
-if __name__ == '__main__':
- from ipykernel import kernelapp as app
- app.launch_new_instance()
+if __name__ == '__main__':
+ from ipykernel import kernelapp as app
+ app.launch_new_instance()
diff --git a/contrib/python/ipython/py2/IPython/kernel/adapter.py b/contrib/python/ipython/py2/IPython/kernel/adapter.py
index 8a52dbbc2b..3b8c046b2d 100644
--- a/contrib/python/ipython/py2/IPython/kernel/adapter.py
+++ b/contrib/python/ipython/py2/IPython/kernel/adapter.py
@@ -1 +1 @@
-from jupyter_client.adapter import *
+from jupyter_client.adapter import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/channels.py b/contrib/python/ipython/py2/IPython/kernel/channels.py
index f204db9ca3..8c7fe2a063 100644
--- a/contrib/python/ipython/py2/IPython/kernel/channels.py
+++ b/contrib/python/ipython/py2/IPython/kernel/channels.py
@@ -1 +1 @@
-from jupyter_client.channels import *
+from jupyter_client.channels import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/channelsabc.py b/contrib/python/ipython/py2/IPython/kernel/channelsabc.py
index c0a44511b1..88944012d4 100644
--- a/contrib/python/ipython/py2/IPython/kernel/channelsabc.py
+++ b/contrib/python/ipython/py2/IPython/kernel/channelsabc.py
@@ -1 +1 @@
-from jupyter_client.channelsabc import *
+from jupyter_client.channelsabc import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/client.py b/contrib/python/ipython/py2/IPython/kernel/client.py
index d9768e5bc4..a98690b74c 100644
--- a/contrib/python/ipython/py2/IPython/kernel/client.py
+++ b/contrib/python/ipython/py2/IPython/kernel/client.py
@@ -1 +1 @@
-from jupyter_client.client import *
+from jupyter_client.client import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/clientabc.py b/contrib/python/ipython/py2/IPython/kernel/clientabc.py
index e82cb19e2a..e0cf06c942 100644
--- a/contrib/python/ipython/py2/IPython/kernel/clientabc.py
+++ b/contrib/python/ipython/py2/IPython/kernel/clientabc.py
@@ -1 +1 @@
-from jupyter_client.clientabc import *
+from jupyter_client.clientabc import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/connect.py b/contrib/python/ipython/py2/IPython/kernel/connect.py
index be992cc709..5b6d40a5d3 100644
--- a/contrib/python/ipython/py2/IPython/kernel/connect.py
+++ b/contrib/python/ipython/py2/IPython/kernel/connect.py
@@ -1,2 +1,2 @@
-from ipykernel.connect import *
-from jupyter_client.connect import *
+from ipykernel.connect import *
+from jupyter_client.connect import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/kernelspec.py b/contrib/python/ipython/py2/IPython/kernel/kernelspec.py
index 8a64337920..123419b2f5 100644
--- a/contrib/python/ipython/py2/IPython/kernel/kernelspec.py
+++ b/contrib/python/ipython/py2/IPython/kernel/kernelspec.py
@@ -1 +1 @@
-from jupyter_client.kernelspec import *
+from jupyter_client.kernelspec import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/kernelspecapp.py b/contrib/python/ipython/py2/IPython/kernel/kernelspecapp.py
index 16f9f9eba9..28cd33abd3 100644
--- a/contrib/python/ipython/py2/IPython/kernel/kernelspecapp.py
+++ b/contrib/python/ipython/py2/IPython/kernel/kernelspecapp.py
@@ -1 +1 @@
-from jupyter_client.kernelspecapp import *
+from jupyter_client.kernelspecapp import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/launcher.py b/contrib/python/ipython/py2/IPython/kernel/launcher.py
index 0500ab198b..1953bc4809 100644
--- a/contrib/python/ipython/py2/IPython/kernel/launcher.py
+++ b/contrib/python/ipython/py2/IPython/kernel/launcher.py
@@ -1 +1 @@
-from jupyter_client.launcher import *
+from jupyter_client.launcher import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/manager.py b/contrib/python/ipython/py2/IPython/kernel/manager.py
index 9d9d84806f..c88097cff6 100644
--- a/contrib/python/ipython/py2/IPython/kernel/manager.py
+++ b/contrib/python/ipython/py2/IPython/kernel/manager.py
@@ -1 +1 @@
-from jupyter_client.manager import *
+from jupyter_client.manager import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/managerabc.py b/contrib/python/ipython/py2/IPython/kernel/managerabc.py
index f748bdf2ce..6b40827ff8 100644
--- a/contrib/python/ipython/py2/IPython/kernel/managerabc.py
+++ b/contrib/python/ipython/py2/IPython/kernel/managerabc.py
@@ -1 +1 @@
-from jupyter_client.managerabc import *
+from jupyter_client.managerabc import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/multikernelmanager.py b/contrib/python/ipython/py2/IPython/kernel/multikernelmanager.py
index 71fd8dbb34..ce576e27ea 100644
--- a/contrib/python/ipython/py2/IPython/kernel/multikernelmanager.py
+++ b/contrib/python/ipython/py2/IPython/kernel/multikernelmanager.py
@@ -1 +1 @@
-from jupyter_client.multikernelmanager import *
+from jupyter_client.multikernelmanager import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/restarter.py b/contrib/python/ipython/py2/IPython/kernel/restarter.py
index 2b1de99c8e..dc24117c3a 100644
--- a/contrib/python/ipython/py2/IPython/kernel/restarter.py
+++ b/contrib/python/ipython/py2/IPython/kernel/restarter.py
@@ -1 +1 @@
-from jupyter_client.restarter import *
+from jupyter_client.restarter import *
diff --git a/contrib/python/ipython/py2/IPython/kernel/threaded.py b/contrib/python/ipython/py2/IPython/kernel/threaded.py
index 97997eeb70..4a1072f7fe 100644
--- a/contrib/python/ipython/py2/IPython/kernel/threaded.py
+++ b/contrib/python/ipython/py2/IPython/kernel/threaded.py
@@ -1 +1 @@
-from jupyter_client.threaded import *
+from jupyter_client.threaded import *
diff --git a/contrib/python/ipython/py2/IPython/lib/__init__.py b/contrib/python/ipython/py2/IPython/lib/__init__.py
index 21e34d84ca..8eb89012df 100644
--- a/contrib/python/ipython/py2/IPython/lib/__init__.py
+++ b/contrib/python/ipython/py2/IPython/lib/__init__.py
@@ -1,21 +1,21 @@
-# encoding: utf-8
-"""
-Extra capabilities for IPython
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-from IPython.lib.security import passwd
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
+# encoding: utf-8
+"""
+Extra capabilities for IPython
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from IPython.lib.security import passwd
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
diff --git a/contrib/python/ipython/py2/IPython/lib/backgroundjobs.py b/contrib/python/ipython/py2/IPython/lib/backgroundjobs.py
index 1acfe7df1e..b724126bbb 100644
--- a/contrib/python/ipython/py2/IPython/lib/backgroundjobs.py
+++ b/contrib/python/ipython/py2/IPython/lib/backgroundjobs.py
@@ -1,491 +1,491 @@
-# -*- coding: utf-8 -*-
-"""Manage background (threaded) jobs conveniently from an interactive shell.
-
-This module provides a BackgroundJobManager class. This is the main class
-meant for public usage, it implements an object which can create and manage
-new background jobs.
-
-It also provides the actual job classes managed by these BackgroundJobManager
-objects, see their docstrings below.
-
-
-This system was inspired by discussions with B. Granger and the
-BackgroundCommand class described in the book Python Scripting for
-Computational Science, by H. P. Langtangen:
-
-http://folk.uio.no/hpl/scripting
-
-(although ultimately no code from this text was used, as IPython's system is a
-separate implementation).
-
-An example notebook is provided in our documentation illustrating interactive
-use of the system.
-"""
-from __future__ import print_function
-
-#*****************************************************************************
-# Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#*****************************************************************************
-
-# Code begins
-import sys
-import threading
-
-from IPython import get_ipython
-from IPython.core.ultratb import AutoFormattedTB
+# -*- coding: utf-8 -*-
+"""Manage background (threaded) jobs conveniently from an interactive shell.
+
+This module provides a BackgroundJobManager class. This is the main class
+meant for public usage, it implements an object which can create and manage
+new background jobs.
+
+It also provides the actual job classes managed by these BackgroundJobManager
+objects, see their docstrings below.
+
+
+This system was inspired by discussions with B. Granger and the
+BackgroundCommand class described in the book Python Scripting for
+Computational Science, by H. P. Langtangen:
+
+http://folk.uio.no/hpl/scripting
+
+(although ultimately no code from this text was used, as IPython's system is a
+separate implementation).
+
+An example notebook is provided in our documentation illustrating interactive
+use of the system.
+"""
+from __future__ import print_function
+
+#*****************************************************************************
+# Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+# Code begins
+import sys
+import threading
+
+from IPython import get_ipython
+from IPython.core.ultratb import AutoFormattedTB
from logging import error
-from IPython.utils.py3compat import string_types
-
-
-class BackgroundJobManager(object):
- """Class to manage a pool of backgrounded threaded jobs.
-
- Below, we assume that 'jobs' is a BackgroundJobManager instance.
-
- Usage summary (see the method docstrings for details):
-
- jobs.new(...) -> start a new job
-
- jobs() or jobs.status() -> print status summary of all jobs
-
- jobs[N] -> returns job number N.
-
- foo = jobs[N].result -> assign to variable foo the result of job N
-
- jobs[N].traceback() -> print the traceback of dead job N
-
- jobs.remove(N) -> remove (finished) job N
-
- jobs.flush() -> remove all finished jobs
-
- As a convenience feature, BackgroundJobManager instances provide the
- utility result and traceback methods which retrieve the corresponding
- information from the jobs list:
-
- jobs.result(N) <--> jobs[N].result
- jobs.traceback(N) <--> jobs[N].traceback()
-
- While this appears minor, it allows you to use tab completion
- interactively on the job manager instance.
- """
-
- def __init__(self):
- # Lists for job management, accessed via a property to ensure they're
- # up to date.x
- self._running = []
- self._completed = []
- self._dead = []
- # A dict of all jobs, so users can easily access any of them
- self.all = {}
- # For reporting
- self._comp_report = []
- self._dead_report = []
- # Store status codes locally for fast lookups
- self._s_created = BackgroundJobBase.stat_created_c
- self._s_running = BackgroundJobBase.stat_running_c
- self._s_completed = BackgroundJobBase.stat_completed_c
- self._s_dead = BackgroundJobBase.stat_dead_c
-
- @property
- def running(self):
- self._update_status()
- return self._running
-
- @property
- def dead(self):
- self._update_status()
- return self._dead
-
- @property
- def completed(self):
- self._update_status()
- return self._completed
-
- def new(self, func_or_exp, *args, **kwargs):
- """Add a new background job and start it in a separate thread.
-
- There are two types of jobs which can be created:
-
- 1. Jobs based on expressions which can be passed to an eval() call.
- The expression must be given as a string. For example:
-
- job_manager.new('myfunc(x,y,z=1)'[,glob[,loc]])
-
- The given expression is passed to eval(), along with the optional
- global/local dicts provided. If no dicts are given, they are
- extracted automatically from the caller's frame.
-
- A Python statement is NOT a valid eval() expression. Basically, you
- can only use as an eval() argument something which can go on the right
- of an '=' sign and be assigned to a variable.
-
- For example,"print 'hello'" is not valid, but '2+3' is.
-
- 2. Jobs given a function object, optionally passing additional
- positional arguments:
-
- job_manager.new(myfunc, x, y)
-
- The function is called with the given arguments.
-
- If you need to pass keyword arguments to your function, you must
- supply them as a dict named kw:
-
- job_manager.new(myfunc, x, y, kw=dict(z=1))
-
- The reason for this assymmetry is that the new() method needs to
- maintain access to its own keywords, and this prevents name collisions
- between arguments to new() and arguments to your own functions.
-
- In both cases, the result is stored in the job.result field of the
- background job object.
-
- You can set `daemon` attribute of the thread by giving the keyword
- argument `daemon`.
-
- Notes and caveats:
-
- 1. All threads running share the same standard output. Thus, if your
- background jobs generate output, it will come out on top of whatever
- you are currently writing. For this reason, background jobs are best
- used with silent functions which simply return their output.
-
- 2. Threads also all work within the same global namespace, and this
- system does not lock interactive variables. So if you send job to the
- background which operates on a mutable object for a long time, and
- start modifying that same mutable object interactively (or in another
- backgrounded job), all sorts of bizarre behaviour will occur.
-
- 3. If a background job is spending a lot of time inside a C extension
- module which does not release the Python Global Interpreter Lock
- (GIL), this will block the IPython prompt. This is simply because the
- Python interpreter can only switch between threads at Python
- bytecodes. While the execution is inside C code, the interpreter must
- simply wait unless the extension module releases the GIL.
-
- 4. There is no way, due to limitations in the Python threads library,
- to kill a thread once it has started."""
-
- if callable(func_or_exp):
- kw = kwargs.get('kw',{})
- job = BackgroundJobFunc(func_or_exp,*args,**kw)
- elif isinstance(func_or_exp, string_types):
- if not args:
- frame = sys._getframe(1)
- glob, loc = frame.f_globals, frame.f_locals
- elif len(args)==1:
- glob = loc = args[0]
- elif len(args)==2:
- glob,loc = args
- else:
- raise ValueError(
- 'Expression jobs take at most 2 args (globals,locals)')
- job = BackgroundJobExpr(func_or_exp, glob, loc)
- else:
- raise TypeError('invalid args for new job')
-
- if kwargs.get('daemon', False):
- job.daemon = True
- job.num = len(self.all)+1 if self.all else 0
- self.running.append(job)
- self.all[job.num] = job
- print('Starting job # %s in a separate thread.' % job.num)
- job.start()
- return job
-
- def __getitem__(self, job_key):
- num = job_key if isinstance(job_key, int) else job_key.num
- return self.all[num]
-
- def __call__(self):
- """An alias to self.status(),
-
- This allows you to simply call a job manager instance much like the
- Unix `jobs` shell command."""
-
- return self.status()
-
- def _update_status(self):
- """Update the status of the job lists.
-
- This method moves finished jobs to one of two lists:
- - self.completed: jobs which completed successfully
- - self.dead: jobs which finished but died.
-
- It also copies those jobs to corresponding _report lists. These lists
- are used to report jobs completed/dead since the last update, and are
- then cleared by the reporting function after each call."""
-
- # Status codes
- srun, scomp, sdead = self._s_running, self._s_completed, self._s_dead
- # State lists, use the actual lists b/c the public names are properties
- # that call this very function on access
- running, completed, dead = self._running, self._completed, self._dead
-
- # Now, update all state lists
- for num, job in enumerate(running):
- stat = job.stat_code
- if stat == srun:
- continue
- elif stat == scomp:
- completed.append(job)
- self._comp_report.append(job)
- running[num] = False
- elif stat == sdead:
- dead.append(job)
- self._dead_report.append(job)
- running[num] = False
- # Remove dead/completed jobs from running list
- running[:] = filter(None, running)
-
- def _group_report(self,group,name):
- """Report summary for a given job group.
-
- Return True if the group had any elements."""
-
- if group:
- print('%s jobs:' % name)
- for job in group:
- print('%s : %s' % (job.num,job))
- print()
- return True
-
- def _group_flush(self,group,name):
- """Flush a given job group
-
- Return True if the group had any elements."""
-
- njobs = len(group)
- if njobs:
- plural = {1:''}.setdefault(njobs,'s')
- print('Flushing %s %s job%s.' % (njobs,name,plural))
- group[:] = []
- return True
-
- def _status_new(self):
- """Print the status of newly finished jobs.
-
- Return True if any new jobs are reported.
-
- This call resets its own state every time, so it only reports jobs
- which have finished since the last time it was called."""
-
- self._update_status()
- new_comp = self._group_report(self._comp_report, 'Completed')
- new_dead = self._group_report(self._dead_report,
- 'Dead, call jobs.traceback() for details')
- self._comp_report[:] = []
- self._dead_report[:] = []
- return new_comp or new_dead
-
- def status(self,verbose=0):
- """Print a status of all jobs currently being managed."""
-
- self._update_status()
- self._group_report(self.running,'Running')
- self._group_report(self.completed,'Completed')
- self._group_report(self.dead,'Dead')
- # Also flush the report queues
- self._comp_report[:] = []
- self._dead_report[:] = []
-
- def remove(self,num):
- """Remove a finished (completed or dead) job."""
-
- try:
- job = self.all[num]
- except KeyError:
- error('Job #%s not found' % num)
- else:
- stat_code = job.stat_code
- if stat_code == self._s_running:
- error('Job #%s is still running, it can not be removed.' % num)
- return
- elif stat_code == self._s_completed:
- self.completed.remove(job)
- elif stat_code == self._s_dead:
- self.dead.remove(job)
-
- def flush(self):
- """Flush all finished jobs (completed and dead) from lists.
-
- Running jobs are never flushed.
-
- It first calls _status_new(), to update info. If any jobs have
- completed since the last _status_new() call, the flush operation
- aborts."""
-
- # Remove the finished jobs from the master dict
- alljobs = self.all
- for job in self.completed+self.dead:
- del(alljobs[job.num])
-
- # Now flush these lists completely
- fl_comp = self._group_flush(self.completed, 'Completed')
- fl_dead = self._group_flush(self.dead, 'Dead')
- if not (fl_comp or fl_dead):
- print('No jobs to flush.')
-
- def result(self,num):
- """result(N) -> return the result of job N."""
- try:
- return self.all[num].result
- except KeyError:
- error('Job #%s not found' % num)
-
- def _traceback(self, job):
- num = job if isinstance(job, int) else job.num
- try:
- self.all[num].traceback()
- except KeyError:
- error('Job #%s not found' % num)
-
- def traceback(self, job=None):
- if job is None:
- self._update_status()
- for deadjob in self.dead:
- print("Traceback for: %r" % deadjob)
- self._traceback(deadjob)
- print()
- else:
- self._traceback(job)
-
-
-class BackgroundJobBase(threading.Thread):
- """Base class to build BackgroundJob classes.
-
- The derived classes must implement:
-
- - Their own __init__, since the one here raises NotImplementedError. The
- derived constructor must call self._init() at the end, to provide common
- initialization.
-
- - A strform attribute used in calls to __str__.
-
- - A call() method, which will make the actual execution call and must
- return a value to be held in the 'result' field of the job object.
- """
-
- # Class constants for status, in string and as numerical codes (when
- # updating jobs lists, we don't want to do string comparisons). This will
- # be done at every user prompt, so it has to be as fast as possible
- stat_created = 'Created'; stat_created_c = 0
- stat_running = 'Running'; stat_running_c = 1
- stat_completed = 'Completed'; stat_completed_c = 2
- stat_dead = 'Dead (Exception), call jobs.traceback() for details'
- stat_dead_c = -1
-
- def __init__(self):
- """Must be implemented in subclasses.
-
- Subclasses must call :meth:`_init` for standard initialisation.
- """
- raise NotImplementedError("This class can not be instantiated directly.")
-
- def _init(self):
- """Common initialization for all BackgroundJob objects"""
-
- for attr in ['call','strform']:
- assert hasattr(self,attr), "Missing attribute <%s>" % attr
-
- # The num tag can be set by an external job manager
- self.num = None
-
- self.status = BackgroundJobBase.stat_created
- self.stat_code = BackgroundJobBase.stat_created_c
- self.finished = False
- self.result = '<BackgroundJob has not completed>'
-
- # reuse the ipython traceback handler if we can get to it, otherwise
- # make a new one
- try:
- make_tb = get_ipython().InteractiveTB.text
- except:
- make_tb = AutoFormattedTB(mode = 'Context',
- color_scheme='NoColor',
- tb_offset = 1).text
- # Note that the actual API for text() requires the three args to be
- # passed in, so we wrap it in a simple lambda.
- self._make_tb = lambda : make_tb(None, None, None)
-
- # Hold a formatted traceback if one is generated.
- self._tb = None
-
- threading.Thread.__init__(self)
-
- def __str__(self):
- return self.strform
-
- def __repr__(self):
- return '<BackgroundJob #%d: %s>' % (self.num, self.strform)
-
- def traceback(self):
- print(self._tb)
-
- def run(self):
- try:
- self.status = BackgroundJobBase.stat_running
- self.stat_code = BackgroundJobBase.stat_running_c
- self.result = self.call()
- except:
- self.status = BackgroundJobBase.stat_dead
- self.stat_code = BackgroundJobBase.stat_dead_c
- self.finished = None
- self.result = ('<BackgroundJob died, call jobs.traceback() for details>')
- self._tb = self._make_tb()
- else:
- self.status = BackgroundJobBase.stat_completed
- self.stat_code = BackgroundJobBase.stat_completed_c
- self.finished = True
-
-
-class BackgroundJobExpr(BackgroundJobBase):
- """Evaluate an expression as a background job (uses a separate thread)."""
-
- def __init__(self, expression, glob=None, loc=None):
- """Create a new job from a string which can be fed to eval().
-
- global/locals dicts can be provided, which will be passed to the eval
- call."""
-
- # fail immediately if the given expression can't be compiled
- self.code = compile(expression,'<BackgroundJob compilation>','eval')
-
- glob = {} if glob is None else glob
- loc = {} if loc is None else loc
- self.expression = self.strform = expression
- self.glob = glob
- self.loc = loc
- self._init()
-
- def call(self):
- return eval(self.code,self.glob,self.loc)
-
-
-class BackgroundJobFunc(BackgroundJobBase):
- """Run a function call as a background job (uses a separate thread)."""
-
- def __init__(self, func, *args, **kwargs):
- """Create a new job from a callable object.
-
- Any positional arguments and keyword args given to this constructor
- after the initial callable are passed directly to it."""
-
- if not callable(func):
- raise TypeError(
- 'first argument to BackgroundJobFunc must be callable')
-
- self.func = func
- self.args = args
- self.kwargs = kwargs
- # The string form will only include the function passed, because
- # generating string representations of the arguments is a potentially
- # _very_ expensive operation (e.g. with large arrays).
- self.strform = str(func)
- self._init()
-
- def call(self):
- return self.func(*self.args, **self.kwargs)
+from IPython.utils.py3compat import string_types
+
+
+class BackgroundJobManager(object):
+ """Class to manage a pool of backgrounded threaded jobs.
+
+ Below, we assume that 'jobs' is a BackgroundJobManager instance.
+
+ Usage summary (see the method docstrings for details):
+
+ jobs.new(...) -> start a new job
+
+ jobs() or jobs.status() -> print status summary of all jobs
+
+ jobs[N] -> returns job number N.
+
+ foo = jobs[N].result -> assign to variable foo the result of job N
+
+ jobs[N].traceback() -> print the traceback of dead job N
+
+ jobs.remove(N) -> remove (finished) job N
+
+ jobs.flush() -> remove all finished jobs
+
+ As a convenience feature, BackgroundJobManager instances provide the
+ utility result and traceback methods which retrieve the corresponding
+ information from the jobs list:
+
+ jobs.result(N) <--> jobs[N].result
+ jobs.traceback(N) <--> jobs[N].traceback()
+
+ While this appears minor, it allows you to use tab completion
+ interactively on the job manager instance.
+ """
+
+ def __init__(self):
+ # Lists for job management, accessed via a property to ensure they're
+ # up to date.x
+ self._running = []
+ self._completed = []
+ self._dead = []
+ # A dict of all jobs, so users can easily access any of them
+ self.all = {}
+ # For reporting
+ self._comp_report = []
+ self._dead_report = []
+ # Store status codes locally for fast lookups
+ self._s_created = BackgroundJobBase.stat_created_c
+ self._s_running = BackgroundJobBase.stat_running_c
+ self._s_completed = BackgroundJobBase.stat_completed_c
+ self._s_dead = BackgroundJobBase.stat_dead_c
+
+ @property
+ def running(self):
+ self._update_status()
+ return self._running
+
+ @property
+ def dead(self):
+ self._update_status()
+ return self._dead
+
+ @property
+ def completed(self):
+ self._update_status()
+ return self._completed
+
+ def new(self, func_or_exp, *args, **kwargs):
+ """Add a new background job and start it in a separate thread.
+
+ There are two types of jobs which can be created:
+
+ 1. Jobs based on expressions which can be passed to an eval() call.
+ The expression must be given as a string. For example:
+
+ job_manager.new('myfunc(x,y,z=1)'[,glob[,loc]])
+
+ The given expression is passed to eval(), along with the optional
+ global/local dicts provided. If no dicts are given, they are
+ extracted automatically from the caller's frame.
+
+ A Python statement is NOT a valid eval() expression. Basically, you
+ can only use as an eval() argument something which can go on the right
+ of an '=' sign and be assigned to a variable.
+
+ For example,"print 'hello'" is not valid, but '2+3' is.
+
+ 2. Jobs given a function object, optionally passing additional
+ positional arguments:
+
+ job_manager.new(myfunc, x, y)
+
+ The function is called with the given arguments.
+
+ If you need to pass keyword arguments to your function, you must
+ supply them as a dict named kw:
+
+ job_manager.new(myfunc, x, y, kw=dict(z=1))
+
+ The reason for this assymmetry is that the new() method needs to
+ maintain access to its own keywords, and this prevents name collisions
+ between arguments to new() and arguments to your own functions.
+
+ In both cases, the result is stored in the job.result field of the
+ background job object.
+
+ You can set `daemon` attribute of the thread by giving the keyword
+ argument `daemon`.
+
+ Notes and caveats:
+
+ 1. All threads running share the same standard output. Thus, if your
+ background jobs generate output, it will come out on top of whatever
+ you are currently writing. For this reason, background jobs are best
+ used with silent functions which simply return their output.
+
+ 2. Threads also all work within the same global namespace, and this
+ system does not lock interactive variables. So if you send job to the
+ background which operates on a mutable object for a long time, and
+ start modifying that same mutable object interactively (or in another
+ backgrounded job), all sorts of bizarre behaviour will occur.
+
+ 3. If a background job is spending a lot of time inside a C extension
+ module which does not release the Python Global Interpreter Lock
+ (GIL), this will block the IPython prompt. This is simply because the
+ Python interpreter can only switch between threads at Python
+ bytecodes. While the execution is inside C code, the interpreter must
+ simply wait unless the extension module releases the GIL.
+
+ 4. There is no way, due to limitations in the Python threads library,
+ to kill a thread once it has started."""
+
+ if callable(func_or_exp):
+ kw = kwargs.get('kw',{})
+ job = BackgroundJobFunc(func_or_exp,*args,**kw)
+ elif isinstance(func_or_exp, string_types):
+ if not args:
+ frame = sys._getframe(1)
+ glob, loc = frame.f_globals, frame.f_locals
+ elif len(args)==1:
+ glob = loc = args[0]
+ elif len(args)==2:
+ glob,loc = args
+ else:
+ raise ValueError(
+ 'Expression jobs take at most 2 args (globals,locals)')
+ job = BackgroundJobExpr(func_or_exp, glob, loc)
+ else:
+ raise TypeError('invalid args for new job')
+
+ if kwargs.get('daemon', False):
+ job.daemon = True
+ job.num = len(self.all)+1 if self.all else 0
+ self.running.append(job)
+ self.all[job.num] = job
+ print('Starting job # %s in a separate thread.' % job.num)
+ job.start()
+ return job
+
+ def __getitem__(self, job_key):
+ num = job_key if isinstance(job_key, int) else job_key.num
+ return self.all[num]
+
+ def __call__(self):
+ """An alias to self.status(),
+
+ This allows you to simply call a job manager instance much like the
+ Unix `jobs` shell command."""
+
+ return self.status()
+
+ def _update_status(self):
+ """Update the status of the job lists.
+
+ This method moves finished jobs to one of two lists:
+ - self.completed: jobs which completed successfully
+ - self.dead: jobs which finished but died.
+
+ It also copies those jobs to corresponding _report lists. These lists
+ are used to report jobs completed/dead since the last update, and are
+ then cleared by the reporting function after each call."""
+
+ # Status codes
+ srun, scomp, sdead = self._s_running, self._s_completed, self._s_dead
+ # State lists, use the actual lists b/c the public names are properties
+ # that call this very function on access
+ running, completed, dead = self._running, self._completed, self._dead
+
+ # Now, update all state lists
+ for num, job in enumerate(running):
+ stat = job.stat_code
+ if stat == srun:
+ continue
+ elif stat == scomp:
+ completed.append(job)
+ self._comp_report.append(job)
+ running[num] = False
+ elif stat == sdead:
+ dead.append(job)
+ self._dead_report.append(job)
+ running[num] = False
+ # Remove dead/completed jobs from running list
+ running[:] = filter(None, running)
+
+ def _group_report(self,group,name):
+ """Report summary for a given job group.
+
+ Return True if the group had any elements."""
+
+ if group:
+ print('%s jobs:' % name)
+ for job in group:
+ print('%s : %s' % (job.num,job))
+ print()
+ return True
+
+ def _group_flush(self,group,name):
+ """Flush a given job group
+
+ Return True if the group had any elements."""
+
+ njobs = len(group)
+ if njobs:
+ plural = {1:''}.setdefault(njobs,'s')
+ print('Flushing %s %s job%s.' % (njobs,name,plural))
+ group[:] = []
+ return True
+
+ def _status_new(self):
+ """Print the status of newly finished jobs.
+
+ Return True if any new jobs are reported.
+
+ This call resets its own state every time, so it only reports jobs
+ which have finished since the last time it was called."""
+
+ self._update_status()
+ new_comp = self._group_report(self._comp_report, 'Completed')
+ new_dead = self._group_report(self._dead_report,
+ 'Dead, call jobs.traceback() for details')
+ self._comp_report[:] = []
+ self._dead_report[:] = []
+ return new_comp or new_dead
+
+ def status(self,verbose=0):
+ """Print a status of all jobs currently being managed."""
+
+ self._update_status()
+ self._group_report(self.running,'Running')
+ self._group_report(self.completed,'Completed')
+ self._group_report(self.dead,'Dead')
+ # Also flush the report queues
+ self._comp_report[:] = []
+ self._dead_report[:] = []
+
+ def remove(self,num):
+ """Remove a finished (completed or dead) job."""
+
+ try:
+ job = self.all[num]
+ except KeyError:
+ error('Job #%s not found' % num)
+ else:
+ stat_code = job.stat_code
+ if stat_code == self._s_running:
+ error('Job #%s is still running, it can not be removed.' % num)
+ return
+ elif stat_code == self._s_completed:
+ self.completed.remove(job)
+ elif stat_code == self._s_dead:
+ self.dead.remove(job)
+
+ def flush(self):
+ """Flush all finished jobs (completed and dead) from lists.
+
+ Running jobs are never flushed.
+
+ It first calls _status_new(), to update info. If any jobs have
+ completed since the last _status_new() call, the flush operation
+ aborts."""
+
+ # Remove the finished jobs from the master dict
+ alljobs = self.all
+ for job in self.completed+self.dead:
+ del(alljobs[job.num])
+
+ # Now flush these lists completely
+ fl_comp = self._group_flush(self.completed, 'Completed')
+ fl_dead = self._group_flush(self.dead, 'Dead')
+ if not (fl_comp or fl_dead):
+ print('No jobs to flush.')
+
+ def result(self,num):
+ """result(N) -> return the result of job N."""
+ try:
+ return self.all[num].result
+ except KeyError:
+ error('Job #%s not found' % num)
+
+ def _traceback(self, job):
+ num = job if isinstance(job, int) else job.num
+ try:
+ self.all[num].traceback()
+ except KeyError:
+ error('Job #%s not found' % num)
+
+ def traceback(self, job=None):
+ if job is None:
+ self._update_status()
+ for deadjob in self.dead:
+ print("Traceback for: %r" % deadjob)
+ self._traceback(deadjob)
+ print()
+ else:
+ self._traceback(job)
+
+
+class BackgroundJobBase(threading.Thread):
+ """Base class to build BackgroundJob classes.
+
+ The derived classes must implement:
+
+ - Their own __init__, since the one here raises NotImplementedError. The
+ derived constructor must call self._init() at the end, to provide common
+ initialization.
+
+ - A strform attribute used in calls to __str__.
+
+ - A call() method, which will make the actual execution call and must
+ return a value to be held in the 'result' field of the job object.
+ """
+
+ # Class constants for status, in string and as numerical codes (when
+ # updating jobs lists, we don't want to do string comparisons). This will
+ # be done at every user prompt, so it has to be as fast as possible
+ stat_created = 'Created'; stat_created_c = 0
+ stat_running = 'Running'; stat_running_c = 1
+ stat_completed = 'Completed'; stat_completed_c = 2
+ stat_dead = 'Dead (Exception), call jobs.traceback() for details'
+ stat_dead_c = -1
+
+ def __init__(self):
+ """Must be implemented in subclasses.
+
+ Subclasses must call :meth:`_init` for standard initialisation.
+ """
+ raise NotImplementedError("This class can not be instantiated directly.")
+
+ def _init(self):
+ """Common initialization for all BackgroundJob objects"""
+
+ for attr in ['call','strform']:
+ assert hasattr(self,attr), "Missing attribute <%s>" % attr
+
+ # The num tag can be set by an external job manager
+ self.num = None
+
+ self.status = BackgroundJobBase.stat_created
+ self.stat_code = BackgroundJobBase.stat_created_c
+ self.finished = False
+ self.result = '<BackgroundJob has not completed>'
+
+ # reuse the ipython traceback handler if we can get to it, otherwise
+ # make a new one
+ try:
+ make_tb = get_ipython().InteractiveTB.text
+ except:
+ make_tb = AutoFormattedTB(mode = 'Context',
+ color_scheme='NoColor',
+ tb_offset = 1).text
+ # Note that the actual API for text() requires the three args to be
+ # passed in, so we wrap it in a simple lambda.
+ self._make_tb = lambda : make_tb(None, None, None)
+
+ # Hold a formatted traceback if one is generated.
+ self._tb = None
+
+ threading.Thread.__init__(self)
+
+ def __str__(self):
+ return self.strform
+
+ def __repr__(self):
+ return '<BackgroundJob #%d: %s>' % (self.num, self.strform)
+
+ def traceback(self):
+ print(self._tb)
+
+ def run(self):
+ try:
+ self.status = BackgroundJobBase.stat_running
+ self.stat_code = BackgroundJobBase.stat_running_c
+ self.result = self.call()
+ except:
+ self.status = BackgroundJobBase.stat_dead
+ self.stat_code = BackgroundJobBase.stat_dead_c
+ self.finished = None
+ self.result = ('<BackgroundJob died, call jobs.traceback() for details>')
+ self._tb = self._make_tb()
+ else:
+ self.status = BackgroundJobBase.stat_completed
+ self.stat_code = BackgroundJobBase.stat_completed_c
+ self.finished = True
+
+
+class BackgroundJobExpr(BackgroundJobBase):
+ """Evaluate an expression as a background job (uses a separate thread)."""
+
+ def __init__(self, expression, glob=None, loc=None):
+ """Create a new job from a string which can be fed to eval().
+
+ global/locals dicts can be provided, which will be passed to the eval
+ call."""
+
+ # fail immediately if the given expression can't be compiled
+ self.code = compile(expression,'<BackgroundJob compilation>','eval')
+
+ glob = {} if glob is None else glob
+ loc = {} if loc is None else loc
+ self.expression = self.strform = expression
+ self.glob = glob
+ self.loc = loc
+ self._init()
+
+ def call(self):
+ return eval(self.code,self.glob,self.loc)
+
+
+class BackgroundJobFunc(BackgroundJobBase):
+ """Run a function call as a background job (uses a separate thread)."""
+
+ def __init__(self, func, *args, **kwargs):
+ """Create a new job from a callable object.
+
+ Any positional arguments and keyword args given to this constructor
+ after the initial callable are passed directly to it."""
+
+ if not callable(func):
+ raise TypeError(
+ 'first argument to BackgroundJobFunc must be callable')
+
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+ # The string form will only include the function passed, because
+ # generating string representations of the arguments is a potentially
+ # _very_ expensive operation (e.g. with large arrays).
+ self.strform = str(func)
+ self._init()
+
+ def call(self):
+ return self.func(*self.args, **self.kwargs)
diff --git a/contrib/python/ipython/py2/IPython/lib/clipboard.py b/contrib/python/ipython/py2/IPython/lib/clipboard.py
index 713313da33..ac9b685c7d 100644
--- a/contrib/python/ipython/py2/IPython/lib/clipboard.py
+++ b/contrib/python/ipython/py2/IPython/lib/clipboard.py
@@ -1,72 +1,72 @@
-""" Utilities for accessing the platform's clipboard.
-"""
-
-import subprocess
-
-from IPython.core.error import TryNext
-import IPython.utils.py3compat as py3compat
-
-class ClipboardEmpty(ValueError):
- pass
-
-def win32_clipboard_get():
- """ Get the current clipboard's text on Windows.
-
- Requires Mark Hammond's pywin32 extensions.
- """
- try:
- import win32clipboard
- except ImportError:
- raise TryNext("Getting text from the clipboard requires the pywin32 "
- "extensions: http://sourceforge.net/projects/pywin32/")
- win32clipboard.OpenClipboard()
- try:
- text = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)
- except (TypeError, win32clipboard.error):
- try:
- text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
- text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
- except (TypeError, win32clipboard.error):
- raise ClipboardEmpty
- finally:
- win32clipboard.CloseClipboard()
- return text
-
-def osx_clipboard_get():
- """ Get the clipboard's text on OS X.
- """
- p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'],
- stdout=subprocess.PIPE)
- text, stderr = p.communicate()
- # Text comes in with old Mac \r line endings. Change them to \n.
- text = text.replace(b'\r', b'\n')
- text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
- return text
-
-def tkinter_clipboard_get():
- """ Get the clipboard's text using Tkinter.
-
- This is the default on systems that are not Windows or OS X. It may
- interfere with other UI toolkits and should be replaced with an
- implementation that uses that toolkit.
- """
- try:
- from tkinter import Tk, TclError # Py 3
- except ImportError:
- try:
- from Tkinter import Tk, TclError # Py 2
- except ImportError:
- raise TryNext("Getting text from the clipboard on this platform "
- "requires Tkinter.")
- root = Tk()
- root.withdraw()
- try:
- text = root.clipboard_get()
- except TclError:
- raise ClipboardEmpty
- finally:
- root.destroy()
- text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
- return text
-
-
+""" Utilities for accessing the platform's clipboard.
+"""
+
+import subprocess
+
+from IPython.core.error import TryNext
+import IPython.utils.py3compat as py3compat
+
+class ClipboardEmpty(ValueError):
+ pass
+
+def win32_clipboard_get():
+ """ Get the current clipboard's text on Windows.
+
+ Requires Mark Hammond's pywin32 extensions.
+ """
+ try:
+ import win32clipboard
+ except ImportError:
+ raise TryNext("Getting text from the clipboard requires the pywin32 "
+ "extensions: http://sourceforge.net/projects/pywin32/")
+ win32clipboard.OpenClipboard()
+ try:
+ text = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)
+ except (TypeError, win32clipboard.error):
+ try:
+ text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
+ text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
+ except (TypeError, win32clipboard.error):
+ raise ClipboardEmpty
+ finally:
+ win32clipboard.CloseClipboard()
+ return text
+
+def osx_clipboard_get():
+ """ Get the clipboard's text on OS X.
+ """
+ p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'],
+ stdout=subprocess.PIPE)
+ text, stderr = p.communicate()
+ # Text comes in with old Mac \r line endings. Change them to \n.
+ text = text.replace(b'\r', b'\n')
+ text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
+ return text
+
+def tkinter_clipboard_get():
+ """ Get the clipboard's text using Tkinter.
+
+ This is the default on systems that are not Windows or OS X. It may
+ interfere with other UI toolkits and should be replaced with an
+ implementation that uses that toolkit.
+ """
+ try:
+ from tkinter import Tk, TclError # Py 3
+ except ImportError:
+ try:
+ from Tkinter import Tk, TclError # Py 2
+ except ImportError:
+ raise TryNext("Getting text from the clipboard on this platform "
+ "requires Tkinter.")
+ root = Tk()
+ root.withdraw()
+ try:
+ text = root.clipboard_get()
+ except TclError:
+ raise ClipboardEmpty
+ finally:
+ root.destroy()
+ text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
+ return text
+
+
diff --git a/contrib/python/ipython/py2/IPython/lib/deepreload.py b/contrib/python/ipython/py2/IPython/lib/deepreload.py
index 72f2b5752f..76b493c0bb 100644
--- a/contrib/python/ipython/py2/IPython/lib/deepreload.py
+++ b/contrib/python/ipython/py2/IPython/lib/deepreload.py
@@ -1,362 +1,362 @@
-# -*- coding: utf-8 -*-
-"""
-Provides a reload() function that acts recursively.
-
-Python's normal :func:`python:reload` function only reloads the module that it's
-passed. The :func:`reload` function in this module also reloads everything
-imported from that module, which is useful when you're changing files deep
-inside a package.
-
-To use this as your default reload function, type this for Python 2::
-
- import __builtin__
- from IPython.lib import deepreload
- __builtin__.reload = deepreload.reload
-
-Or this for Python 3::
-
- import builtins
- from IPython.lib import deepreload
- builtins.reload = deepreload.reload
-
-A reference to the original :func:`python:reload` is stored in this module as
-:data:`original_reload`, so you can restore it later.
-
-This code is almost entirely based on knee.py, which is a Python
-re-implementation of hierarchical module import.
-"""
-from __future__ import print_function
-#*****************************************************************************
-# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#*****************************************************************************
-
-from contextlib import contextmanager
-import imp
-import sys
-
-from types import ModuleType
-from warnings import warn
-
-from IPython.utils.py3compat import builtin_mod, builtin_mod_name
-
-original_import = builtin_mod.__import__
-
-@contextmanager
-def replace_import_hook(new_import):
- saved_import = builtin_mod.__import__
- builtin_mod.__import__ = new_import
- try:
- yield
- finally:
- builtin_mod.__import__ = saved_import
-
-def get_parent(globals, level):
- """
- parent, name = get_parent(globals, level)
-
- Return the package that an import is being performed in. If globals comes
- from the module foo.bar.bat (not itself a package), this returns the
- sys.modules entry for foo.bar. If globals is from a package's __init__.py,
- the package's entry in sys.modules is returned.
-
- If globals doesn't come from a package or a module in a package, or a
- corresponding entry is not found in sys.modules, None is returned.
- """
- orig_level = level
-
- if not level or not isinstance(globals, dict):
- return None, ''
-
- pkgname = globals.get('__package__', None)
-
- if pkgname is not None:
- # __package__ is set, so use it
- if not hasattr(pkgname, 'rindex'):
- raise ValueError('__package__ set to non-string')
- if len(pkgname) == 0:
- if level > 0:
- raise ValueError('Attempted relative import in non-package')
- return None, ''
- name = pkgname
- else:
- # __package__ not set, so figure it out and set it
- if '__name__' not in globals:
- return None, ''
- modname = globals['__name__']
-
- if '__path__' in globals:
- # __path__ is set, so modname is already the package name
- globals['__package__'] = name = modname
- else:
- # Normal module, so work out the package name if any
- lastdot = modname.rfind('.')
- if lastdot < 0 < level:
- raise ValueError("Attempted relative import in non-package")
- if lastdot < 0:
- globals['__package__'] = None
- return None, ''
- globals['__package__'] = name = modname[:lastdot]
-
- dot = len(name)
- for x in range(level, 1, -1):
- try:
- dot = name.rindex('.', 0, dot)
- except ValueError:
- raise ValueError("attempted relative import beyond top-level "
- "package")
- name = name[:dot]
-
- try:
- parent = sys.modules[name]
- except:
- if orig_level < 1:
- warn("Parent module '%.200s' not found while handling absolute "
- "import" % name)
- parent = None
- else:
- raise SystemError("Parent module '%.200s' not loaded, cannot "
- "perform relative import" % name)
-
- # We expect, but can't guarantee, if parent != None, that:
- # - parent.__name__ == name
- # - parent.__dict__ is globals
- # If this is violated... Who cares?
- return parent, name
-
-def load_next(mod, altmod, name, buf):
- """
- mod, name, buf = load_next(mod, altmod, name, buf)
-
- altmod is either None or same as mod
- """
-
- if len(name) == 0:
- # completely empty module name should only happen in
- # 'from . import' (or '__import__("")')
- return mod, None, buf
-
- dot = name.find('.')
- if dot == 0:
- raise ValueError('Empty module name')
-
- if dot < 0:
- subname = name
- next = None
- else:
- subname = name[:dot]
- next = name[dot+1:]
-
- if buf != '':
- buf += '.'
- buf += subname
-
- result = import_submodule(mod, subname, buf)
- if result is None and mod != altmod:
- result = import_submodule(altmod, subname, subname)
- if result is not None:
- buf = subname
-
- if result is None:
- raise ImportError("No module named %.200s" % name)
-
- return result, next, buf
-
-# Need to keep track of what we've already reloaded to prevent cyclic evil
-found_now = {}
-
-def import_submodule(mod, subname, fullname):
- """m = import_submodule(mod, subname, fullname)"""
- # Require:
- # if mod == None: subname == fullname
- # else: mod.__name__ + "." + subname == fullname
-
- global found_now
- if fullname in found_now and fullname in sys.modules:
- m = sys.modules[fullname]
- else:
- print('Reloading', fullname)
- found_now[fullname] = 1
- oldm = sys.modules.get(fullname, None)
-
- if mod is None:
- path = None
- elif hasattr(mod, '__path__'):
- path = mod.__path__
- else:
- return None
-
- try:
- # This appears to be necessary on Python 3, because imp.find_module()
- # tries to import standard libraries (like io) itself, and we don't
- # want them to be processed by our deep_import_hook.
- with replace_import_hook(original_import):
- fp, filename, stuff = imp.find_module(subname, path)
- except ImportError:
- return None
-
- try:
- m = imp.load_module(fullname, fp, filename, stuff)
- except:
- # load_module probably removed name from modules because of
- # the error. Put back the original module object.
- if oldm:
- sys.modules[fullname] = oldm
- raise
- finally:
- if fp: fp.close()
-
- add_submodule(mod, m, fullname, subname)
-
- return m
-
-def add_submodule(mod, submod, fullname, subname):
- """mod.{subname} = submod"""
- if mod is None:
- return #Nothing to do here.
-
- if submod is None:
- submod = sys.modules[fullname]
-
- setattr(mod, subname, submod)
-
- return
-
-def ensure_fromlist(mod, fromlist, buf, recursive):
- """Handle 'from module import a, b, c' imports."""
- if not hasattr(mod, '__path__'):
- return
- for item in fromlist:
- if not hasattr(item, 'rindex'):
- raise TypeError("Item in ``from list'' not a string")
- if item == '*':
- if recursive:
- continue # avoid endless recursion
- try:
- all = mod.__all__
- except AttributeError:
- pass
- else:
- ret = ensure_fromlist(mod, all, buf, 1)
- if not ret:
- return 0
- elif not hasattr(mod, item):
- import_submodule(mod, item, buf + '.' + item)
-
-def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1):
- """Replacement for __import__()"""
- parent, buf = get_parent(globals, level)
-
- head, name, buf = load_next(parent, None if level < 0 else parent, name, buf)
-
- tail = head
- while name:
- tail, name, buf = load_next(tail, tail, name, buf)
-
- # If tail is None, both get_parent and load_next found
- # an empty module name: someone called __import__("") or
- # doctored faulty bytecode
- if tail is None:
- raise ValueError('Empty module name')
-
- if not fromlist:
- return head
-
- ensure_fromlist(tail, fromlist, buf, 0)
- return tail
-
-modules_reloading = {}
-
-def deep_reload_hook(m):
- """Replacement for reload()."""
- if not isinstance(m, ModuleType):
- raise TypeError("reload() argument must be module")
-
- name = m.__name__
-
- if name not in sys.modules:
- raise ImportError("reload(): module %.200s not in sys.modules" % name)
-
- global modules_reloading
- try:
- return modules_reloading[name]
- except:
- modules_reloading[name] = m
-
- dot = name.rfind('.')
- if dot < 0:
- subname = name
- path = None
- else:
- try:
- parent = sys.modules[name[:dot]]
- except KeyError:
- modules_reloading.clear()
- raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot])
- subname = name[dot+1:]
- path = getattr(parent, "__path__", None)
-
- try:
- # This appears to be necessary on Python 3, because imp.find_module()
- # tries to import standard libraries (like io) itself, and we don't
- # want them to be processed by our deep_import_hook.
- with replace_import_hook(original_import):
- fp, filename, stuff = imp.find_module(subname, path)
- finally:
- modules_reloading.clear()
-
- try:
- newm = imp.load_module(name, fp, filename, stuff)
- except:
- # load_module probably removed name from modules because of
- # the error. Put back the original module object.
- sys.modules[name] = m
- raise
- finally:
- if fp: fp.close()
-
- modules_reloading.clear()
- return newm
-
-# Save the original hooks
-try:
- original_reload = builtin_mod.reload
-except AttributeError:
- original_reload = imp.reload # Python 3
-
-# Replacement for reload()
+# -*- coding: utf-8 -*-
+"""
+Provides a reload() function that acts recursively.
+
+Python's normal :func:`python:reload` function only reloads the module that it's
+passed. The :func:`reload` function in this module also reloads everything
+imported from that module, which is useful when you're changing files deep
+inside a package.
+
+To use this as your default reload function, type this for Python 2::
+
+ import __builtin__
+ from IPython.lib import deepreload
+ __builtin__.reload = deepreload.reload
+
+Or this for Python 3::
+
+ import builtins
+ from IPython.lib import deepreload
+ builtins.reload = deepreload.reload
+
+A reference to the original :func:`python:reload` is stored in this module as
+:data:`original_reload`, so you can restore it later.
+
+This code is almost entirely based on knee.py, which is a Python
+re-implementation of hierarchical module import.
+"""
+from __future__ import print_function
+#*****************************************************************************
+# Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+from contextlib import contextmanager
+import imp
+import sys
+
+from types import ModuleType
+from warnings import warn
+
+from IPython.utils.py3compat import builtin_mod, builtin_mod_name
+
+original_import = builtin_mod.__import__
+
+@contextmanager
+def replace_import_hook(new_import):
+ saved_import = builtin_mod.__import__
+ builtin_mod.__import__ = new_import
+ try:
+ yield
+ finally:
+ builtin_mod.__import__ = saved_import
+
+def get_parent(globals, level):
+ """
+ parent, name = get_parent(globals, level)
+
+ Return the package that an import is being performed in. If globals comes
+ from the module foo.bar.bat (not itself a package), this returns the
+ sys.modules entry for foo.bar. If globals is from a package's __init__.py,
+ the package's entry in sys.modules is returned.
+
+ If globals doesn't come from a package or a module in a package, or a
+ corresponding entry is not found in sys.modules, None is returned.
+ """
+ orig_level = level
+
+ if not level or not isinstance(globals, dict):
+ return None, ''
+
+ pkgname = globals.get('__package__', None)
+
+ if pkgname is not None:
+ # __package__ is set, so use it
+ if not hasattr(pkgname, 'rindex'):
+ raise ValueError('__package__ set to non-string')
+ if len(pkgname) == 0:
+ if level > 0:
+ raise ValueError('Attempted relative import in non-package')
+ return None, ''
+ name = pkgname
+ else:
+ # __package__ not set, so figure it out and set it
+ if '__name__' not in globals:
+ return None, ''
+ modname = globals['__name__']
+
+ if '__path__' in globals:
+ # __path__ is set, so modname is already the package name
+ globals['__package__'] = name = modname
+ else:
+ # Normal module, so work out the package name if any
+ lastdot = modname.rfind('.')
+ if lastdot < 0 < level:
+ raise ValueError("Attempted relative import in non-package")
+ if lastdot < 0:
+ globals['__package__'] = None
+ return None, ''
+ globals['__package__'] = name = modname[:lastdot]
+
+ dot = len(name)
+ for x in range(level, 1, -1):
+ try:
+ dot = name.rindex('.', 0, dot)
+ except ValueError:
+ raise ValueError("attempted relative import beyond top-level "
+ "package")
+ name = name[:dot]
+
+ try:
+ parent = sys.modules[name]
+ except:
+ if orig_level < 1:
+ warn("Parent module '%.200s' not found while handling absolute "
+ "import" % name)
+ parent = None
+ else:
+ raise SystemError("Parent module '%.200s' not loaded, cannot "
+ "perform relative import" % name)
+
+ # We expect, but can't guarantee, if parent != None, that:
+ # - parent.__name__ == name
+ # - parent.__dict__ is globals
+ # If this is violated... Who cares?
+ return parent, name
+
+def load_next(mod, altmod, name, buf):
+ """
+ mod, name, buf = load_next(mod, altmod, name, buf)
+
+ altmod is either None or same as mod
+ """
+
+ if len(name) == 0:
+ # completely empty module name should only happen in
+ # 'from . import' (or '__import__("")')
+ return mod, None, buf
+
+ dot = name.find('.')
+ if dot == 0:
+ raise ValueError('Empty module name')
+
+ if dot < 0:
+ subname = name
+ next = None
+ else:
+ subname = name[:dot]
+ next = name[dot+1:]
+
+ if buf != '':
+ buf += '.'
+ buf += subname
+
+ result = import_submodule(mod, subname, buf)
+ if result is None and mod != altmod:
+ result = import_submodule(altmod, subname, subname)
+ if result is not None:
+ buf = subname
+
+ if result is None:
+ raise ImportError("No module named %.200s" % name)
+
+ return result, next, buf
+
+# Need to keep track of what we've already reloaded to prevent cyclic evil
+found_now = {}
+
+def import_submodule(mod, subname, fullname):
+ """m = import_submodule(mod, subname, fullname)"""
+ # Require:
+ # if mod == None: subname == fullname
+ # else: mod.__name__ + "." + subname == fullname
+
+ global found_now
+ if fullname in found_now and fullname in sys.modules:
+ m = sys.modules[fullname]
+ else:
+ print('Reloading', fullname)
+ found_now[fullname] = 1
+ oldm = sys.modules.get(fullname, None)
+
+ if mod is None:
+ path = None
+ elif hasattr(mod, '__path__'):
+ path = mod.__path__
+ else:
+ return None
+
+ try:
+ # This appears to be necessary on Python 3, because imp.find_module()
+ # tries to import standard libraries (like io) itself, and we don't
+ # want them to be processed by our deep_import_hook.
+ with replace_import_hook(original_import):
+ fp, filename, stuff = imp.find_module(subname, path)
+ except ImportError:
+ return None
+
+ try:
+ m = imp.load_module(fullname, fp, filename, stuff)
+ except:
+ # load_module probably removed name from modules because of
+ # the error. Put back the original module object.
+ if oldm:
+ sys.modules[fullname] = oldm
+ raise
+ finally:
+ if fp: fp.close()
+
+ add_submodule(mod, m, fullname, subname)
+
+ return m
+
+def add_submodule(mod, submod, fullname, subname):
+ """mod.{subname} = submod"""
+ if mod is None:
+ return #Nothing to do here.
+
+ if submod is None:
+ submod = sys.modules[fullname]
+
+ setattr(mod, subname, submod)
+
+ return
+
+def ensure_fromlist(mod, fromlist, buf, recursive):
+ """Handle 'from module import a, b, c' imports."""
+ if not hasattr(mod, '__path__'):
+ return
+ for item in fromlist:
+ if not hasattr(item, 'rindex'):
+ raise TypeError("Item in ``from list'' not a string")
+ if item == '*':
+ if recursive:
+ continue # avoid endless recursion
+ try:
+ all = mod.__all__
+ except AttributeError:
+ pass
+ else:
+ ret = ensure_fromlist(mod, all, buf, 1)
+ if not ret:
+ return 0
+ elif not hasattr(mod, item):
+ import_submodule(mod, item, buf + '.' + item)
+
+def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1):
+ """Replacement for __import__()"""
+ parent, buf = get_parent(globals, level)
+
+ head, name, buf = load_next(parent, None if level < 0 else parent, name, buf)
+
+ tail = head
+ while name:
+ tail, name, buf = load_next(tail, tail, name, buf)
+
+ # If tail is None, both get_parent and load_next found
+ # an empty module name: someone called __import__("") or
+ # doctored faulty bytecode
+ if tail is None:
+ raise ValueError('Empty module name')
+
+ if not fromlist:
+ return head
+
+ ensure_fromlist(tail, fromlist, buf, 0)
+ return tail
+
+modules_reloading = {}
+
+def deep_reload_hook(m):
+ """Replacement for reload()."""
+ if not isinstance(m, ModuleType):
+ raise TypeError("reload() argument must be module")
+
+ name = m.__name__
+
+ if name not in sys.modules:
+ raise ImportError("reload(): module %.200s not in sys.modules" % name)
+
+ global modules_reloading
+ try:
+ return modules_reloading[name]
+ except:
+ modules_reloading[name] = m
+
+ dot = name.rfind('.')
+ if dot < 0:
+ subname = name
+ path = None
+ else:
+ try:
+ parent = sys.modules[name[:dot]]
+ except KeyError:
+ modules_reloading.clear()
+ raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot])
+ subname = name[dot+1:]
+ path = getattr(parent, "__path__", None)
+
+ try:
+ # This appears to be necessary on Python 3, because imp.find_module()
+ # tries to import standard libraries (like io) itself, and we don't
+ # want them to be processed by our deep_import_hook.
+ with replace_import_hook(original_import):
+ fp, filename, stuff = imp.find_module(subname, path)
+ finally:
+ modules_reloading.clear()
+
+ try:
+ newm = imp.load_module(name, fp, filename, stuff)
+ except:
+ # load_module probably removed name from modules because of
+ # the error. Put back the original module object.
+ sys.modules[name] = m
+ raise
+ finally:
+ if fp: fp.close()
+
+ modules_reloading.clear()
+ return newm
+
+# Save the original hooks
+try:
+ original_reload = builtin_mod.reload
+except AttributeError:
+ original_reload = imp.reload # Python 3
+
+# Replacement for reload()
def reload(module, exclude=('sys', 'os.path', builtin_mod_name, '__main__',
'numpy', 'numpy._globals')):
- """Recursively reload all modules used in the given module. Optionally
- takes a list of modules to exclude from reloading. The default exclude
- list contains sys, __main__, and __builtin__, to prevent, e.g., resetting
- display, exception, and io hooks.
- """
- global found_now
- for i in exclude:
- found_now[i] = 1
- try:
- with replace_import_hook(deep_import_hook):
- return deep_reload_hook(module)
- finally:
- found_now = {}
-
-
-def _dreload(module, **kwargs):
- """
- **deprecated**
-
- import reload explicitly from `IPython.lib.deepreload` to use it
-
- """
+ """Recursively reload all modules used in the given module. Optionally
+ takes a list of modules to exclude from reloading. The default exclude
+ list contains sys, __main__, and __builtin__, to prevent, e.g., resetting
+ display, exception, and io hooks.
+ """
+ global found_now
+ for i in exclude:
+ found_now[i] = 1
+ try:
+ with replace_import_hook(deep_import_hook):
+ return deep_reload_hook(module)
+ finally:
+ found_now = {}
+
+
+def _dreload(module, **kwargs):
+ """
+ **deprecated**
+
+ import reload explicitly from `IPython.lib.deepreload` to use it
+
+ """
# this was marked as deprecated and for 5.0 removal, but
# IPython.core_builtin_trap have a Deprecation warning for 6.0, so cannot
# remove that now.
- warn("""
+ warn("""
injecting `dreload` in interactive namespace is deprecated since IPython 4.0.
-Please import `reload` explicitly from `IPython.lib.deepreload`.
-""", DeprecationWarning, stacklevel=2)
- reload(module, **kwargs)
-
+Please import `reload` explicitly from `IPython.lib.deepreload`.
+""", DeprecationWarning, stacklevel=2)
+ reload(module, **kwargs)
+
diff --git a/contrib/python/ipython/py2/IPython/lib/demo.py b/contrib/python/ipython/py2/IPython/lib/demo.py
index d630db99f9..b0f3503ed7 100644
--- a/contrib/python/ipython/py2/IPython/lib/demo.py
+++ b/contrib/python/ipython/py2/IPython/lib/demo.py
@@ -1,583 +1,583 @@
-"""Module for interactive demos using IPython.
-
-This module implements a few classes for running Python scripts interactively
-in IPython for demonstrations. With very simple markup (a few tags in
-comments), you can control points where the script stops executing and returns
-control to IPython.
-
-
-Provided classes
-----------------
-
-The classes are (see their docstrings for further details):
-
- - Demo: pure python demos
-
- - IPythonDemo: demos with input to be processed by IPython as if it had been
- typed interactively (so magics work, as well as any other special syntax you
- may have added via input prefilters).
-
- - LineDemo: single-line version of the Demo class. These demos are executed
- one line at a time, and require no markup.
-
- - IPythonLineDemo: IPython version of the LineDemo class (the demo is
- executed a line at a time, but processed via IPython).
-
- - ClearMixin: mixin to make Demo classes with less visual clutter. It
- declares an empty marquee and a pre_cmd that clears the screen before each
- block (see Subclassing below).
-
- - ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
- classes.
-
-Inheritance diagram:
-
-.. inheritance-diagram:: IPython.lib.demo
- :parts: 3
-
-Subclassing
------------
-
-The classes here all include a few methods meant to make customization by
-subclassing more convenient. Their docstrings below have some more details:
-
- - marquee(): generates a marquee to provide visible on-screen markers at each
- block start and end.
-
- - pre_cmd(): run right before the execution of each block.
-
- - post_cmd(): run right after the execution of each block. If the block
- raises an exception, this is NOT called.
-
-
-Operation
----------
-
-The file is run in its own empty namespace (though you can pass it a string of
-arguments as if in a command line environment, and it will see those as
-sys.argv). But at each stop, the global IPython namespace is updated with the
-current internal demo namespace, so you can work interactively with the data
-accumulated so far.
-
-By default, each block of code is printed (with syntax highlighting) before
-executing it and you have to confirm execution. This is intended to show the
-code to an audience first so you can discuss it, and only proceed with
-execution once you agree. There are a few tags which allow you to modify this
-behavior.
-
-The supported tags are:
-
-# <demo> stop
-
- Defines block boundaries, the points where IPython stops execution of the
- file and returns to the interactive prompt.
-
- You can optionally mark the stop tag with extra dashes before and after the
- word 'stop', to help visually distinguish the blocks in a text editor:
-
- # <demo> --- stop ---
-
-
-# <demo> silent
-
- Make a block execute silently (and hence automatically). Typically used in
- cases where you have some boilerplate or initialization code which you need
- executed but do not want to be seen in the demo.
-
-# <demo> auto
-
- Make a block execute automatically, but still being printed. Useful for
- simple code which does not warrant discussion, since it avoids the extra
- manual confirmation.
-
-# <demo> auto_all
-
- This tag can _only_ be in the first block, and if given it overrides the
- individual auto tags to make the whole demo fully automatic (no block asks
- for confirmation). It can also be given at creation time (or the attribute
- set later) to override what's in the file.
-
-While _any_ python file can be run as a Demo instance, if there are no stop
-tags the whole file will run in a single block (no different that calling
-first %pycat and then %run). The minimal markup to make this useful is to
-place a set of stop tags; the other tags are only there to let you fine-tune
-the execution.
-
-This is probably best explained with the simple example file below. You can
-copy this into a file named ex_demo.py, and try running it via::
-
+"""Module for interactive demos using IPython.
+
+This module implements a few classes for running Python scripts interactively
+in IPython for demonstrations. With very simple markup (a few tags in
+comments), you can control points where the script stops executing and returns
+control to IPython.
+
+
+Provided classes
+----------------
+
+The classes are (see their docstrings for further details):
+
+ - Demo: pure python demos
+
+ - IPythonDemo: demos with input to be processed by IPython as if it had been
+ typed interactively (so magics work, as well as any other special syntax you
+ may have added via input prefilters).
+
+ - LineDemo: single-line version of the Demo class. These demos are executed
+ one line at a time, and require no markup.
+
+ - IPythonLineDemo: IPython version of the LineDemo class (the demo is
+ executed a line at a time, but processed via IPython).
+
+ - ClearMixin: mixin to make Demo classes with less visual clutter. It
+ declares an empty marquee and a pre_cmd that clears the screen before each
+ block (see Subclassing below).
+
+ - ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
+ classes.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.lib.demo
+ :parts: 3
+
+Subclassing
+-----------
+
+The classes here all include a few methods meant to make customization by
+subclassing more convenient. Their docstrings below have some more details:
+
+ - marquee(): generates a marquee to provide visible on-screen markers at each
+ block start and end.
+
+ - pre_cmd(): run right before the execution of each block.
+
+ - post_cmd(): run right after the execution of each block. If the block
+ raises an exception, this is NOT called.
+
+
+Operation
+---------
+
+The file is run in its own empty namespace (though you can pass it a string of
+arguments as if in a command line environment, and it will see those as
+sys.argv). But at each stop, the global IPython namespace is updated with the
+current internal demo namespace, so you can work interactively with the data
+accumulated so far.
+
+By default, each block of code is printed (with syntax highlighting) before
+executing it and you have to confirm execution. This is intended to show the
+code to an audience first so you can discuss it, and only proceed with
+execution once you agree. There are a few tags which allow you to modify this
+behavior.
+
+The supported tags are:
+
+# <demo> stop
+
+ Defines block boundaries, the points where IPython stops execution of the
+ file and returns to the interactive prompt.
+
+ You can optionally mark the stop tag with extra dashes before and after the
+ word 'stop', to help visually distinguish the blocks in a text editor:
+
+ # <demo> --- stop ---
+
+
+# <demo> silent
+
+ Make a block execute silently (and hence automatically). Typically used in
+ cases where you have some boilerplate or initialization code which you need
+ executed but do not want to be seen in the demo.
+
+# <demo> auto
+
+ Make a block execute automatically, but still being printed. Useful for
+ simple code which does not warrant discussion, since it avoids the extra
+ manual confirmation.
+
+# <demo> auto_all
+
+ This tag can _only_ be in the first block, and if given it overrides the
+ individual auto tags to make the whole demo fully automatic (no block asks
+ for confirmation). It can also be given at creation time (or the attribute
+ set later) to override what's in the file.
+
+While _any_ python file can be run as a Demo instance, if there are no stop
+tags the whole file will run in a single block (no different that calling
+first %pycat and then %run). The minimal markup to make this useful is to
+place a set of stop tags; the other tags are only there to let you fine-tune
+the execution.
+
+This is probably best explained with the simple example file below. You can
+copy this into a file named ex_demo.py, and try running it via::
+
from IPython.lib.demo import Demo
- d = Demo('ex_demo.py')
- d()
-
-Each time you call the demo object, it runs the next block. The demo object
-has a few useful methods for navigation, like again(), edit(), jump(), seek()
-and back(). It can be reset for a new run via reset() or reloaded from disk
-(in case you've edited the source) via reload(). See their docstrings below.
-
-Note: To make this simpler to explore, a file called "demo-exercizer.py" has
-been added to the "docs/examples/core" directory. Just cd to this directory in
-an IPython session, and type::
-
- %run demo-exercizer.py
-
-and then follow the directions.
-
-Example
--------
-
-The following is a very simple example of a valid demo file.
-
-::
-
- #################### EXAMPLE DEMO <ex_demo.py> ###############################
- '''A simple interactive demo to illustrate the use of IPython's Demo class.'''
-
- print 'Hello, welcome to an interactive IPython demo.'
-
- # The mark below defines a block boundary, which is a point where IPython will
- # stop execution and return to the interactive prompt. The dashes are actually
- # optional and used only as a visual aid to clearly separate blocks while
- # editing the demo code.
- # <demo> stop
-
- x = 1
- y = 2
-
- # <demo> stop
-
- # the mark below makes this block as silent
- # <demo> silent
-
- print 'This is a silent block, which gets executed but not printed.'
-
- # <demo> stop
- # <demo> auto
- print 'This is an automatic block.'
- print 'It is executed without asking for confirmation, but printed.'
- z = x+y
-
- print 'z=',x
-
- # <demo> stop
- # This is just another normal block.
- print 'z is now:', z
-
- print 'bye!'
- ################### END EXAMPLE DEMO <ex_demo.py> ############################
-"""
-
-from __future__ import unicode_literals
-
-#*****************************************************************************
-# Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#
-#*****************************************************************************
-from __future__ import print_function
-
-import os
-import re
-import shlex
-import sys
-
-from IPython.utils import io
-from IPython.utils.text import marquee
-from IPython.utils import openpy
-from IPython.utils import py3compat
-__all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
-
-class DemoError(Exception): pass
-
-def re_mark(mark):
- return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
-
-class Demo(object):
-
- re_stop = re_mark('-*\s?stop\s?-*')
- re_silent = re_mark('silent')
- re_auto = re_mark('auto')
- re_auto_all = re_mark('auto_all')
-
- def __init__(self,src,title='',arg_str='',auto_all=None):
- """Make a new demo object. To run the demo, simply call the object.
-
- See the module docstring for full details and an example (you can use
- IPython.Demo? in IPython to see it).
-
- Inputs:
-
- - src is either a file, or file-like object, or a
- string that can be resolved to a filename.
-
- Optional inputs:
-
- - title: a string to use as the demo name. Of most use when the demo
- you are making comes from an object that has no filename, or if you
- want an alternate denotation distinct from the filename.
-
- - arg_str(''): a string of arguments, internally converted to a list
- just like sys.argv, so the demo script can see a similar
- environment.
-
- - auto_all(None): global flag to run all blocks automatically without
- confirmation. This attribute overrides the block-level tags and
- applies to the whole demo. It is an attribute of the object, and
- can be changed at runtime simply by reassigning it to a boolean
- value.
- """
- if hasattr(src, "read"):
- # It seems to be a file or a file-like object
- self.fname = "from a file-like object"
- if title == '':
- self.title = "from a file-like object"
- else:
- self.title = title
- else:
- # Assume it's a string or something that can be converted to one
- self.fname = src
- if title == '':
- (filepath, filename) = os.path.split(src)
- self.title = filename
- else:
- self.title = title
- self.sys_argv = [src] + shlex.split(arg_str)
- self.auto_all = auto_all
- self.src = src
-
- # get a few things from ipython. While it's a bit ugly design-wise,
- # it ensures that things like color scheme and the like are always in
- # sync with the ipython mode being used. This class is only meant to
- # be used inside ipython anyways, so it's OK.
- ip = get_ipython() # this is in builtins whenever IPython is running
- self.ip_ns = ip.user_ns
- self.ip_colorize = ip.pycolorize
- self.ip_showtb = ip.showtraceback
- self.ip_run_cell = ip.run_cell
- self.shell = ip
-
- # load user data and initialize data structures
- self.reload()
-
- def fload(self):
- """Load file object."""
- # read data and parse into blocks
- if hasattr(self, 'fobj') and self.fobj is not None:
- self.fobj.close()
- if hasattr(self.src, "read"):
- # It seems to be a file or a file-like object
- self.fobj = self.src
- else:
- # Assume it's a string or something that can be converted to one
- self.fobj = openpy.open(self.fname)
-
- def reload(self):
- """Reload source from disk and initialize state."""
- self.fload()
-
- self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
- src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
- self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
- self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
-
- # if auto_all is not given (def. None), we read it from the file
- if self.auto_all is None:
- self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
- else:
- self.auto_all = bool(self.auto_all)
-
- # Clean the sources from all markup so it doesn't get displayed when
- # running the demo
- src_blocks = []
- auto_strip = lambda s: self.re_auto.sub('',s)
- for i,b in enumerate(src_b):
- if self._auto[i]:
- src_blocks.append(auto_strip(b))
- else:
- src_blocks.append(b)
- # remove the auto_all marker
- src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
-
- self.nblocks = len(src_blocks)
- self.src_blocks = src_blocks
-
- # also build syntax-highlighted source
- self.src_blocks_colored = list(map(self.ip_colorize,self.src_blocks))
-
- # ensure clean namespace and seek offset
- self.reset()
-
- def reset(self):
- """Reset the namespace and seek pointer to restart the demo"""
- self.user_ns = {}
- self.finished = False
- self.block_index = 0
-
- def _validate_index(self,index):
- if index<0 or index>=self.nblocks:
- raise ValueError('invalid block index %s' % index)
-
- def _get_index(self,index):
- """Get the current block index, validating and checking status.
-
- Returns None if the demo is finished"""
-
- if index is None:
- if self.finished:
+ d = Demo('ex_demo.py')
+ d()
+
+Each time you call the demo object, it runs the next block. The demo object
+has a few useful methods for navigation, like again(), edit(), jump(), seek()
+and back(). It can be reset for a new run via reset() or reloaded from disk
+(in case you've edited the source) via reload(). See their docstrings below.
+
+Note: To make this simpler to explore, a file called "demo-exercizer.py" has
+been added to the "docs/examples/core" directory. Just cd to this directory in
+an IPython session, and type::
+
+ %run demo-exercizer.py
+
+and then follow the directions.
+
+Example
+-------
+
+The following is a very simple example of a valid demo file.
+
+::
+
+ #################### EXAMPLE DEMO <ex_demo.py> ###############################
+ '''A simple interactive demo to illustrate the use of IPython's Demo class.'''
+
+ print 'Hello, welcome to an interactive IPython demo.'
+
+ # The mark below defines a block boundary, which is a point where IPython will
+ # stop execution and return to the interactive prompt. The dashes are actually
+ # optional and used only as a visual aid to clearly separate blocks while
+ # editing the demo code.
+ # <demo> stop
+
+ x = 1
+ y = 2
+
+ # <demo> stop
+
+ # the mark below makes this block as silent
+ # <demo> silent
+
+ print 'This is a silent block, which gets executed but not printed.'
+
+ # <demo> stop
+ # <demo> auto
+ print 'This is an automatic block.'
+ print 'It is executed without asking for confirmation, but printed.'
+ z = x+y
+
+ print 'z=',x
+
+ # <demo> stop
+ # This is just another normal block.
+ print 'z is now:', z
+
+ print 'bye!'
+ ################### END EXAMPLE DEMO <ex_demo.py> ############################
+"""
+
+from __future__ import unicode_literals
+
+#*****************************************************************************
+# Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#
+#*****************************************************************************
+from __future__ import print_function
+
+import os
+import re
+import shlex
+import sys
+
+from IPython.utils import io
+from IPython.utils.text import marquee
+from IPython.utils import openpy
+from IPython.utils import py3compat
+__all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
+
+class DemoError(Exception): pass
+
+def re_mark(mark):
+ return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
+
+class Demo(object):
+
+ re_stop = re_mark('-*\s?stop\s?-*')
+ re_silent = re_mark('silent')
+ re_auto = re_mark('auto')
+ re_auto_all = re_mark('auto_all')
+
+ def __init__(self,src,title='',arg_str='',auto_all=None):
+ """Make a new demo object. To run the demo, simply call the object.
+
+ See the module docstring for full details and an example (you can use
+ IPython.Demo? in IPython to see it).
+
+ Inputs:
+
+ - src is either a file, or file-like object, or a
+ string that can be resolved to a filename.
+
+ Optional inputs:
+
+ - title: a string to use as the demo name. Of most use when the demo
+ you are making comes from an object that has no filename, or if you
+ want an alternate denotation distinct from the filename.
+
+ - arg_str(''): a string of arguments, internally converted to a list
+ just like sys.argv, so the demo script can see a similar
+ environment.
+
+ - auto_all(None): global flag to run all blocks automatically without
+ confirmation. This attribute overrides the block-level tags and
+ applies to the whole demo. It is an attribute of the object, and
+ can be changed at runtime simply by reassigning it to a boolean
+ value.
+ """
+ if hasattr(src, "read"):
+ # It seems to be a file or a file-like object
+ self.fname = "from a file-like object"
+ if title == '':
+ self.title = "from a file-like object"
+ else:
+ self.title = title
+ else:
+ # Assume it's a string or something that can be converted to one
+ self.fname = src
+ if title == '':
+ (filepath, filename) = os.path.split(src)
+ self.title = filename
+ else:
+ self.title = title
+ self.sys_argv = [src] + shlex.split(arg_str)
+ self.auto_all = auto_all
+ self.src = src
+
+ # get a few things from ipython. While it's a bit ugly design-wise,
+ # it ensures that things like color scheme and the like are always in
+ # sync with the ipython mode being used. This class is only meant to
+ # be used inside ipython anyways, so it's OK.
+ ip = get_ipython() # this is in builtins whenever IPython is running
+ self.ip_ns = ip.user_ns
+ self.ip_colorize = ip.pycolorize
+ self.ip_showtb = ip.showtraceback
+ self.ip_run_cell = ip.run_cell
+ self.shell = ip
+
+ # load user data and initialize data structures
+ self.reload()
+
+ def fload(self):
+ """Load file object."""
+ # read data and parse into blocks
+ if hasattr(self, 'fobj') and self.fobj is not None:
+ self.fobj.close()
+ if hasattr(self.src, "read"):
+ # It seems to be a file or a file-like object
+ self.fobj = self.src
+ else:
+ # Assume it's a string or something that can be converted to one
+ self.fobj = openpy.open(self.fname)
+
+ def reload(self):
+ """Reload source from disk and initialize state."""
+ self.fload()
+
+ self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
+ src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
+ self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
+ self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
+
+ # if auto_all is not given (def. None), we read it from the file
+ if self.auto_all is None:
+ self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
+ else:
+ self.auto_all = bool(self.auto_all)
+
+ # Clean the sources from all markup so it doesn't get displayed when
+ # running the demo
+ src_blocks = []
+ auto_strip = lambda s: self.re_auto.sub('',s)
+ for i,b in enumerate(src_b):
+ if self._auto[i]:
+ src_blocks.append(auto_strip(b))
+ else:
+ src_blocks.append(b)
+ # remove the auto_all marker
+ src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
+
+ self.nblocks = len(src_blocks)
+ self.src_blocks = src_blocks
+
+ # also build syntax-highlighted source
+ self.src_blocks_colored = list(map(self.ip_colorize,self.src_blocks))
+
+ # ensure clean namespace and seek offset
+ self.reset()
+
+ def reset(self):
+ """Reset the namespace and seek pointer to restart the demo"""
+ self.user_ns = {}
+ self.finished = False
+ self.block_index = 0
+
+ def _validate_index(self,index):
+ if index<0 or index>=self.nblocks:
+ raise ValueError('invalid block index %s' % index)
+
+ def _get_index(self,index):
+ """Get the current block index, validating and checking status.
+
+ Returns None if the demo is finished"""
+
+ if index is None:
+ if self.finished:
print('Demo finished. Use <demo_name>.reset() if you want to rerun it.')
- return None
- index = self.block_index
- else:
- self._validate_index(index)
- return index
-
- def seek(self,index):
- """Move the current seek pointer to the given block.
-
- You can use negative indices to seek from the end, with identical
- semantics to those of Python lists."""
- if index<0:
- index = self.nblocks + index
- self._validate_index(index)
- self.block_index = index
- self.finished = False
-
- def back(self,num=1):
- """Move the seek pointer back num blocks (default is 1)."""
- self.seek(self.block_index-num)
-
- def jump(self,num=1):
- """Jump a given number of blocks relative to the current one.
-
- The offset can be positive or negative, defaults to 1."""
- self.seek(self.block_index+num)
-
- def again(self):
- """Move the seek pointer back one block and re-execute."""
- self.back(1)
- self()
-
- def edit(self,index=None):
- """Edit a block.
-
- If no number is given, use the last block executed.
-
- This edits the in-memory copy of the demo, it does NOT modify the
- original source file. If you want to do that, simply open the file in
- an editor and use reload() when you make changes to the file. This
- method is meant to let you change a block during a demonstration for
- explanatory purposes, without damaging your original script."""
-
- index = self._get_index(index)
- if index is None:
- return
- # decrease the index by one (unless we're at the very beginning), so
- # that the default demo.edit() call opens up the sblock we've last run
- if index>0:
- index -= 1
-
- filename = self.shell.mktempfile(self.src_blocks[index])
- self.shell.hooks.editor(filename,1)
- with open(filename, 'r') as f:
- new_block = f.read()
- # update the source and colored block
- self.src_blocks[index] = new_block
- self.src_blocks_colored[index] = self.ip_colorize(new_block)
- self.block_index = index
- # call to run with the newly edited index
- self()
-
- def show(self,index=None):
- """Show a single block on screen"""
-
- index = self._get_index(index)
- if index is None:
- return
-
- print(self.marquee('<%s> block # %s (%s remaining)' %
+ return None
+ index = self.block_index
+ else:
+ self._validate_index(index)
+ return index
+
+ def seek(self,index):
+ """Move the current seek pointer to the given block.
+
+ You can use negative indices to seek from the end, with identical
+ semantics to those of Python lists."""
+ if index<0:
+ index = self.nblocks + index
+ self._validate_index(index)
+ self.block_index = index
+ self.finished = False
+
+ def back(self,num=1):
+ """Move the seek pointer back num blocks (default is 1)."""
+ self.seek(self.block_index-num)
+
+ def jump(self,num=1):
+ """Jump a given number of blocks relative to the current one.
+
+ The offset can be positive or negative, defaults to 1."""
+ self.seek(self.block_index+num)
+
+ def again(self):
+ """Move the seek pointer back one block and re-execute."""
+ self.back(1)
+ self()
+
+ def edit(self,index=None):
+ """Edit a block.
+
+ If no number is given, use the last block executed.
+
+ This edits the in-memory copy of the demo, it does NOT modify the
+ original source file. If you want to do that, simply open the file in
+ an editor and use reload() when you make changes to the file. This
+ method is meant to let you change a block during a demonstration for
+ explanatory purposes, without damaging your original script."""
+
+ index = self._get_index(index)
+ if index is None:
+ return
+ # decrease the index by one (unless we're at the very beginning), so
+ # that the default demo.edit() call opens up the sblock we've last run
+ if index>0:
+ index -= 1
+
+ filename = self.shell.mktempfile(self.src_blocks[index])
+ self.shell.hooks.editor(filename,1)
+ with open(filename, 'r') as f:
+ new_block = f.read()
+ # update the source and colored block
+ self.src_blocks[index] = new_block
+ self.src_blocks_colored[index] = self.ip_colorize(new_block)
+ self.block_index = index
+ # call to run with the newly edited index
+ self()
+
+ def show(self,index=None):
+ """Show a single block on screen"""
+
+ index = self._get_index(index)
+ if index is None:
+ return
+
+ print(self.marquee('<%s> block # %s (%s remaining)' %
(self.title,index,self.nblocks-index-1)))
print(self.src_blocks_colored[index])
- sys.stdout.flush()
-
- def show_all(self):
- """Show entire demo on screen, block by block"""
-
- fname = self.title
- title = self.title
- nblocks = self.nblocks
- silent = self._silent
- marquee = self.marquee
- for index,block in enumerate(self.src_blocks_colored):
- if silent[index]:
- print(marquee('<%s> SILENT block # %s (%s remaining)' %
+ sys.stdout.flush()
+
+ def show_all(self):
+ """Show entire demo on screen, block by block"""
+
+ fname = self.title
+ title = self.title
+ nblocks = self.nblocks
+ silent = self._silent
+ marquee = self.marquee
+ for index,block in enumerate(self.src_blocks_colored):
+ if silent[index]:
+ print(marquee('<%s> SILENT block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
- else:
- print(marquee('<%s> block # %s (%s remaining)' %
+ else:
+ print(marquee('<%s> block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
print(block, end=' ')
- sys.stdout.flush()
-
- def run_cell(self,source):
- """Execute a string with one or more lines of code"""
-
- exec(source, self.user_ns)
-
- def __call__(self,index=None):
- """run a block of the demo.
-
- If index is given, it should be an integer >=1 and <= nblocks. This
- means that the calling convention is one off from typical Python
- lists. The reason for the inconsistency is that the demo always
- prints 'Block n/N, and N is the total, so it would be very odd to use
- zero-indexing here."""
-
- index = self._get_index(index)
- if index is None:
- return
- try:
- marquee = self.marquee
- next_block = self.src_blocks[index]
- self.block_index += 1
- if self._silent[index]:
- print(marquee('Executing silent block # %s (%s remaining)' %
+ sys.stdout.flush()
+
+ def run_cell(self,source):
+ """Execute a string with one or more lines of code"""
+
+ exec(source, self.user_ns)
+
+ def __call__(self,index=None):
+ """run a block of the demo.
+
+ If index is given, it should be an integer >=1 and <= nblocks. This
+ means that the calling convention is one off from typical Python
+ lists. The reason for the inconsistency is that the demo always
+ prints 'Block n/N, and N is the total, so it would be very odd to use
+ zero-indexing here."""
+
+ index = self._get_index(index)
+ if index is None:
+ return
+ try:
+ marquee = self.marquee
+ next_block = self.src_blocks[index]
+ self.block_index += 1
+ if self._silent[index]:
+ print(marquee('Executing silent block # %s (%s remaining)' %
(index,self.nblocks-index-1)))
- else:
- self.pre_cmd()
- self.show(index)
- if self.auto_all or self._auto[index]:
+ else:
+ self.pre_cmd()
+ self.show(index)
+ if self.auto_all or self._auto[index]:
print(marquee('output:'))
- else:
+ else:
print(marquee('Press <q> to quit, <Enter> to execute...'), end=' ')
- ans = py3compat.input().strip()
- if ans:
+ ans = py3compat.input().strip()
+ if ans:
print(marquee('Block NOT executed'))
- return
- try:
- save_argv = sys.argv
- sys.argv = self.sys_argv
- self.run_cell(next_block)
- self.post_cmd()
- finally:
- sys.argv = save_argv
-
- except:
- self.ip_showtb(filename=self.fname)
- else:
- self.ip_ns.update(self.user_ns)
-
- if self.block_index == self.nblocks:
- mq1 = self.marquee('END OF DEMO')
- if mq1:
+ return
+ try:
+ save_argv = sys.argv
+ sys.argv = self.sys_argv
+ self.run_cell(next_block)
+ self.post_cmd()
+ finally:
+ sys.argv = save_argv
+
+ except:
+ self.ip_showtb(filename=self.fname)
+ else:
+ self.ip_ns.update(self.user_ns)
+
+ if self.block_index == self.nblocks:
+ mq1 = self.marquee('END OF DEMO')
+ if mq1:
# avoid spurious print if empty marquees are used
print()
print(mq1)
print(self.marquee('Use <demo_name>.reset() if you want to rerun it.'))
- self.finished = True
-
- # These methods are meant to be overridden by subclasses who may wish to
- # customize the behavior of of their demos.
- def marquee(self,txt='',width=78,mark='*'):
- """Return the input string centered in a 'marquee'."""
- return marquee(txt,width,mark)
-
- def pre_cmd(self):
- """Method called before executing each block."""
- pass
-
- def post_cmd(self):
- """Method called after executing each block."""
- pass
-
-
-class IPythonDemo(Demo):
- """Class for interactive demos with IPython's input processing applied.
-
- This subclasses Demo, but instead of executing each block by the Python
- interpreter (via exec), it actually calls IPython on it, so that any input
- filters which may be in place are applied to the input block.
-
- If you have an interactive environment which exposes special input
- processing, you can use this class instead to write demo scripts which
- operate exactly as if you had typed them interactively. The default Demo
- class requires the input to be valid, pure Python code.
- """
-
- def run_cell(self,source):
- """Execute a string with one or more lines of code"""
-
- self.shell.run_cell(source)
-
-class LineDemo(Demo):
- """Demo where each line is executed as a separate block.
-
- The input script should be valid Python code.
-
- This class doesn't require any markup at all, and it's meant for simple
- scripts (with no nesting or any kind of indentation) which consist of
- multiple lines of input to be executed, one at a time, as if they had been
- typed in the interactive prompt.
-
- Note: the input can not have *any* indentation, which means that only
- single-lines of input are accepted, not even function definitions are
- valid."""
-
- def reload(self):
- """Reload source from disk and initialize state."""
- # read data and parse into blocks
- self.fload()
- lines = self.fobj.readlines()
- src_b = [l for l in lines if l.strip()]
- nblocks = len(src_b)
- self.src = ''.join(lines)
- self._silent = [False]*nblocks
- self._auto = [True]*nblocks
- self.auto_all = True
- self.nblocks = nblocks
- self.src_blocks = src_b
-
- # also build syntax-highlighted source
- self.src_blocks_colored = map(self.ip_colorize,self.src_blocks)
-
- # ensure clean namespace and seek offset
- self.reset()
-
-
-class IPythonLineDemo(IPythonDemo,LineDemo):
- """Variant of the LineDemo class whose input is processed by IPython."""
- pass
-
-
-class ClearMixin(object):
- """Use this mixin to make Demo classes with less visual clutter.
-
- Demos using this mixin will clear the screen before every block and use
- blank marquees.
-
- Note that in order for the methods defined here to actually override those
- of the classes it's mixed with, it must go /first/ in the inheritance
- tree. For example:
-
- class ClearIPDemo(ClearMixin,IPythonDemo): pass
-
- will provide an IPythonDemo class with the mixin's features.
- """
-
- def marquee(self,txt='',width=78,mark='*'):
- """Blank marquee that returns '' no matter what the input."""
- return ''
-
- def pre_cmd(self):
- """Method called before executing each block.
-
- This one simply clears the screen."""
- from IPython.utils.terminal import term_clear
- term_clear()
-
-class ClearDemo(ClearMixin,Demo):
- pass
-
-
-class ClearIPDemo(ClearMixin,IPythonDemo):
- pass
+ self.finished = True
+
+ # These methods are meant to be overridden by subclasses who may wish to
+ # customize the behavior of of their demos.
+ def marquee(self,txt='',width=78,mark='*'):
+ """Return the input string centered in a 'marquee'."""
+ return marquee(txt,width,mark)
+
+ def pre_cmd(self):
+ """Method called before executing each block."""
+ pass
+
+ def post_cmd(self):
+ """Method called after executing each block."""
+ pass
+
+
+class IPythonDemo(Demo):
+ """Class for interactive demos with IPython's input processing applied.
+
+ This subclasses Demo, but instead of executing each block by the Python
+ interpreter (via exec), it actually calls IPython on it, so that any input
+ filters which may be in place are applied to the input block.
+
+ If you have an interactive environment which exposes special input
+ processing, you can use this class instead to write demo scripts which
+ operate exactly as if you had typed them interactively. The default Demo
+ class requires the input to be valid, pure Python code.
+ """
+
+ def run_cell(self,source):
+ """Execute a string with one or more lines of code"""
+
+ self.shell.run_cell(source)
+
+class LineDemo(Demo):
+ """Demo where each line is executed as a separate block.
+
+ The input script should be valid Python code.
+
+ This class doesn't require any markup at all, and it's meant for simple
+ scripts (with no nesting or any kind of indentation) which consist of
+ multiple lines of input to be executed, one at a time, as if they had been
+ typed in the interactive prompt.
+
+ Note: the input can not have *any* indentation, which means that only
+ single-lines of input are accepted, not even function definitions are
+ valid."""
+
+ def reload(self):
+ """Reload source from disk and initialize state."""
+ # read data and parse into blocks
+ self.fload()
+ lines = self.fobj.readlines()
+ src_b = [l for l in lines if l.strip()]
+ nblocks = len(src_b)
+ self.src = ''.join(lines)
+ self._silent = [False]*nblocks
+ self._auto = [True]*nblocks
+ self.auto_all = True
+ self.nblocks = nblocks
+ self.src_blocks = src_b
+
+ # also build syntax-highlighted source
+ self.src_blocks_colored = map(self.ip_colorize,self.src_blocks)
+
+ # ensure clean namespace and seek offset
+ self.reset()
+
+
+class IPythonLineDemo(IPythonDemo,LineDemo):
+ """Variant of the LineDemo class whose input is processed by IPython."""
+ pass
+
+
+class ClearMixin(object):
+ """Use this mixin to make Demo classes with less visual clutter.
+
+ Demos using this mixin will clear the screen before every block and use
+ blank marquees.
+
+ Note that in order for the methods defined here to actually override those
+ of the classes it's mixed with, it must go /first/ in the inheritance
+ tree. For example:
+
+ class ClearIPDemo(ClearMixin,IPythonDemo): pass
+
+ will provide an IPythonDemo class with the mixin's features.
+ """
+
+ def marquee(self,txt='',width=78,mark='*'):
+ """Blank marquee that returns '' no matter what the input."""
+ return ''
+
+ def pre_cmd(self):
+ """Method called before executing each block.
+
+ This one simply clears the screen."""
+ from IPython.utils.terminal import term_clear
+ term_clear()
+
+class ClearDemo(ClearMixin,Demo):
+ pass
+
+
+class ClearIPDemo(ClearMixin,IPythonDemo):
+ pass
diff --git a/contrib/python/ipython/py2/IPython/lib/display.py b/contrib/python/ipython/py2/IPython/lib/display.py
index bf55015ff4..9221e2e062 100644
--- a/contrib/python/ipython/py2/IPython/lib/display.py
+++ b/contrib/python/ipython/py2/IPython/lib/display.py
@@ -1,558 +1,558 @@
-"""Various display related classes.
-
-Authors : MinRK, gregcaporaso, dannystaple
-"""
-from os.path import exists, isfile, splitext, abspath, join, isdir
-from os import walk, sep
-
-from IPython.core.display import DisplayObject
-
-__all__ = ['Audio', 'IFrame', 'YouTubeVideo', 'VimeoVideo', 'ScribdDocument',
- 'FileLink', 'FileLinks']
-
-
-class Audio(DisplayObject):
- """Create an audio object.
-
- When this object is returned by an input cell or passed to the
- display function, it will result in Audio controls being displayed
- in the frontend (only works in the notebook).
-
- Parameters
- ----------
- data : numpy array, list, unicode, str or bytes
- Can be one of
-
- * Numpy 1d array containing the desired waveform (mono)
- * Numpy 2d array containing waveforms for each channel.
- Shape=(NCHAN, NSAMPLES). For the standard channel order, see
- http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
- * List of float or integer representing the waveform (mono)
- * String containing the filename
- * Bytestring containing raw PCM data or
- * URL pointing to a file on the web.
-
- If the array option is used the waveform will be normalized.
-
- If a filename or url is used the format support will be browser
- dependent.
- url : unicode
- A URL to download the data from.
- filename : unicode
- Path to a local file to load the data from.
- embed : boolean
+"""Various display related classes.
+
+Authors : MinRK, gregcaporaso, dannystaple
+"""
+from os.path import exists, isfile, splitext, abspath, join, isdir
+from os import walk, sep
+
+from IPython.core.display import DisplayObject
+
+__all__ = ['Audio', 'IFrame', 'YouTubeVideo', 'VimeoVideo', 'ScribdDocument',
+ 'FileLink', 'FileLinks']
+
+
+class Audio(DisplayObject):
+ """Create an audio object.
+
+ When this object is returned by an input cell or passed to the
+ display function, it will result in Audio controls being displayed
+ in the frontend (only works in the notebook).
+
+ Parameters
+ ----------
+ data : numpy array, list, unicode, str or bytes
+ Can be one of
+
+ * Numpy 1d array containing the desired waveform (mono)
+ * Numpy 2d array containing waveforms for each channel.
+ Shape=(NCHAN, NSAMPLES). For the standard channel order, see
+ http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
+ * List of float or integer representing the waveform (mono)
+ * String containing the filename
+ * Bytestring containing raw PCM data or
+ * URL pointing to a file on the web.
+
+ If the array option is used the waveform will be normalized.
+
+ If a filename or url is used the format support will be browser
+ dependent.
+ url : unicode
+ A URL to download the data from.
+ filename : unicode
+ Path to a local file to load the data from.
+ embed : boolean
Should the audio data be embedded using a data URI (True) or should
- the original source be referenced. Set this to True if you want the
- audio to playable later with no internet connection in the notebook.
-
- Default is `True`, unless the keyword argument `url` is set, then
- default value is `False`.
- rate : integer
- The sampling rate of the raw data.
- Only required when data parameter is being used as an array
- autoplay : bool
- Set to True if the audio should immediately start playing.
- Default is `False`.
-
- Examples
- --------
- ::
-
- # Generate a sound
- import numpy as np
- framerate = 44100
- t = np.linspace(0,5,framerate*5)
- data = np.sin(2*np.pi*220*t) + np.sin(2*np.pi*224*t))
- Audio(data,rate=framerate)
-
- # Can also do stereo or more channels
- dataleft = np.sin(2*np.pi*220*t)
- dataright = np.sin(2*np.pi*224*t)
- Audio([dataleft, dataright],rate=framerate)
-
- Audio("http://www.nch.com.au/acm/8k16bitpcm.wav") # From URL
- Audio(url="http://www.w3schools.com/html/horse.ogg")
-
- Audio('/path/to/sound.wav') # From file
- Audio(filename='/path/to/sound.ogg')
-
- Audio(b'RAW_WAV_DATA..) # From bytes
- Audio(data=b'RAW_WAV_DATA..)
-
- """
- _read_flags = 'rb'
-
- def __init__(self, data=None, filename=None, url=None, embed=None, rate=None, autoplay=False):
- if filename is None and url is None and data is None:
- raise ValueError("No image data found. Expecting filename, url, or data.")
- if embed is False and url is None:
- raise ValueError("No url found. Expecting url when embed=False")
-
- if url is not None and embed is not True:
- self.embed = False
- else:
- self.embed = True
- self.autoplay = autoplay
- super(Audio, self).__init__(data=data, url=url, filename=filename)
-
- if self.data is not None and not isinstance(self.data, bytes):
- self.data = self._make_wav(data,rate)
-
- def reload(self):
- """Reload the raw data from file or URL."""
- import mimetypes
- if self.embed:
- super(Audio, self).reload()
-
- if self.filename is not None:
- self.mimetype = mimetypes.guess_type(self.filename)[0]
- elif self.url is not None:
- self.mimetype = mimetypes.guess_type(self.url)[0]
- else:
- self.mimetype = "audio/wav"
-
- def _make_wav(self, data, rate):
- """ Transform a numpy array to a PCM bytestring """
- import struct
- from io import BytesIO
- import wave
-
- try:
- import numpy as np
-
- data = np.array(data, dtype=float)
- if len(data.shape) == 1:
- nchan = 1
- elif len(data.shape) == 2:
- # In wave files,channels are interleaved. E.g.,
- # "L1R1L2R2..." for stereo. See
- # http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
- # for channel ordering
- nchan = data.shape[0]
- data = data.T.ravel()
- else:
- raise ValueError('Array audio input must be a 1D or 2D array')
- scaled = np.int16(data/np.max(np.abs(data))*32767).tolist()
- except ImportError:
- # check that it is a "1D" list
- idata = iter(data) # fails if not an iterable
- try:
- iter(idata.next())
- raise TypeError('Only lists of mono audio are '
- 'supported if numpy is not installed')
- except TypeError:
- # this means it's not a nested list, which is what we want
- pass
- maxabsvalue = float(max([abs(x) for x in data]))
- scaled = [int(x/maxabsvalue*32767) for x in data]
- nchan = 1
-
- fp = BytesIO()
- waveobj = wave.open(fp,mode='wb')
- waveobj.setnchannels(nchan)
- waveobj.setframerate(rate)
- waveobj.setsampwidth(2)
- waveobj.setcomptype('NONE','NONE')
- waveobj.writeframes(b''.join([struct.pack('<h',x) for x in scaled]))
- val = fp.getvalue()
- waveobj.close()
-
- return val
-
- def _data_and_metadata(self):
- """shortcut for returning metadata with url information, if defined"""
- md = {}
- if self.url:
- md['url'] = self.url
- if md:
- return self.data, md
- else:
- return self.data
-
- def _repr_html_(self):
- src = """
- <audio controls="controls" {autoplay}>
- <source src="{src}" type="{type}" />
- Your browser does not support the audio element.
- </audio>
- """
- return src.format(src=self.src_attr(),type=self.mimetype, autoplay=self.autoplay_attr())
-
- def src_attr(self):
- import base64
- if self.embed and (self.data is not None):
- data = base64=base64.b64encode(self.data).decode('ascii')
- return """data:{type};base64,{base64}""".format(type=self.mimetype,
- base64=data)
- elif self.url is not None:
- return self.url
- else:
- return ""
-
- def autoplay_attr(self):
- if(self.autoplay):
- return 'autoplay="autoplay"'
- else:
- return ''
-
-class IFrame(object):
- """
- Generic class to embed an iframe in an IPython notebook
- """
-
- iframe = """
- <iframe
- width="{width}"
- height="{height}"
- src="{src}{params}"
- frameborder="0"
- allowfullscreen
- ></iframe>
- """
-
- def __init__(self, src, width, height, **kwargs):
- self.src = src
- self.width = width
- self.height = height
- self.params = kwargs
-
- def _repr_html_(self):
- """return the embed iframe"""
- if self.params:
- try:
- from urllib.parse import urlencode # Py 3
- except ImportError:
- from urllib import urlencode
- params = "?" + urlencode(self.params)
- else:
- params = ""
- return self.iframe.format(src=self.src,
- width=self.width,
- height=self.height,
- params=params)
-
-class YouTubeVideo(IFrame):
- """Class for embedding a YouTube Video in an IPython session, based on its video id.
-
- e.g. to embed the video from https://www.youtube.com/watch?v=foo , you would
- do::
-
- vid = YouTubeVideo("foo")
- display(vid)
-
- To start from 30 seconds::
-
- vid = YouTubeVideo("abc", start=30)
- display(vid)
-
- To calculate seconds from time as hours, minutes, seconds use
- :class:`datetime.timedelta`::
-
- start=int(timedelta(hours=1, minutes=46, seconds=40).total_seconds())
-
- Other parameters can be provided as documented at
+ the original source be referenced. Set this to True if you want the
+ audio to playable later with no internet connection in the notebook.
+
+ Default is `True`, unless the keyword argument `url` is set, then
+ default value is `False`.
+ rate : integer
+ The sampling rate of the raw data.
+ Only required when data parameter is being used as an array
+ autoplay : bool
+ Set to True if the audio should immediately start playing.
+ Default is `False`.
+
+ Examples
+ --------
+ ::
+
+ # Generate a sound
+ import numpy as np
+ framerate = 44100
+ t = np.linspace(0,5,framerate*5)
+ data = np.sin(2*np.pi*220*t) + np.sin(2*np.pi*224*t))
+ Audio(data,rate=framerate)
+
+ # Can also do stereo or more channels
+ dataleft = np.sin(2*np.pi*220*t)
+ dataright = np.sin(2*np.pi*224*t)
+ Audio([dataleft, dataright],rate=framerate)
+
+ Audio("http://www.nch.com.au/acm/8k16bitpcm.wav") # From URL
+ Audio(url="http://www.w3schools.com/html/horse.ogg")
+
+ Audio('/path/to/sound.wav') # From file
+ Audio(filename='/path/to/sound.ogg')
+
+ Audio(b'RAW_WAV_DATA..) # From bytes
+ Audio(data=b'RAW_WAV_DATA..)
+
+ """
+ _read_flags = 'rb'
+
+ def __init__(self, data=None, filename=None, url=None, embed=None, rate=None, autoplay=False):
+ if filename is None and url is None and data is None:
+ raise ValueError("No image data found. Expecting filename, url, or data.")
+ if embed is False and url is None:
+ raise ValueError("No url found. Expecting url when embed=False")
+
+ if url is not None and embed is not True:
+ self.embed = False
+ else:
+ self.embed = True
+ self.autoplay = autoplay
+ super(Audio, self).__init__(data=data, url=url, filename=filename)
+
+ if self.data is not None and not isinstance(self.data, bytes):
+ self.data = self._make_wav(data,rate)
+
+ def reload(self):
+ """Reload the raw data from file or URL."""
+ import mimetypes
+ if self.embed:
+ super(Audio, self).reload()
+
+ if self.filename is not None:
+ self.mimetype = mimetypes.guess_type(self.filename)[0]
+ elif self.url is not None:
+ self.mimetype = mimetypes.guess_type(self.url)[0]
+ else:
+ self.mimetype = "audio/wav"
+
+ def _make_wav(self, data, rate):
+ """ Transform a numpy array to a PCM bytestring """
+ import struct
+ from io import BytesIO
+ import wave
+
+ try:
+ import numpy as np
+
+ data = np.array(data, dtype=float)
+ if len(data.shape) == 1:
+ nchan = 1
+ elif len(data.shape) == 2:
+ # In wave files,channels are interleaved. E.g.,
+ # "L1R1L2R2..." for stereo. See
+ # http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
+ # for channel ordering
+ nchan = data.shape[0]
+ data = data.T.ravel()
+ else:
+ raise ValueError('Array audio input must be a 1D or 2D array')
+ scaled = np.int16(data/np.max(np.abs(data))*32767).tolist()
+ except ImportError:
+ # check that it is a "1D" list
+ idata = iter(data) # fails if not an iterable
+ try:
+ iter(idata.next())
+ raise TypeError('Only lists of mono audio are '
+ 'supported if numpy is not installed')
+ except TypeError:
+ # this means it's not a nested list, which is what we want
+ pass
+ maxabsvalue = float(max([abs(x) for x in data]))
+ scaled = [int(x/maxabsvalue*32767) for x in data]
+ nchan = 1
+
+ fp = BytesIO()
+ waveobj = wave.open(fp,mode='wb')
+ waveobj.setnchannels(nchan)
+ waveobj.setframerate(rate)
+ waveobj.setsampwidth(2)
+ waveobj.setcomptype('NONE','NONE')
+ waveobj.writeframes(b''.join([struct.pack('<h',x) for x in scaled]))
+ val = fp.getvalue()
+ waveobj.close()
+
+ return val
+
+ def _data_and_metadata(self):
+ """shortcut for returning metadata with url information, if defined"""
+ md = {}
+ if self.url:
+ md['url'] = self.url
+ if md:
+ return self.data, md
+ else:
+ return self.data
+
+ def _repr_html_(self):
+ src = """
+ <audio controls="controls" {autoplay}>
+ <source src="{src}" type="{type}" />
+ Your browser does not support the audio element.
+ </audio>
+ """
+ return src.format(src=self.src_attr(),type=self.mimetype, autoplay=self.autoplay_attr())
+
+ def src_attr(self):
+ import base64
+ if self.embed and (self.data is not None):
+ data = base64=base64.b64encode(self.data).decode('ascii')
+ return """data:{type};base64,{base64}""".format(type=self.mimetype,
+ base64=data)
+ elif self.url is not None:
+ return self.url
+ else:
+ return ""
+
+ def autoplay_attr(self):
+ if(self.autoplay):
+ return 'autoplay="autoplay"'
+ else:
+ return ''
+
+class IFrame(object):
+ """
+ Generic class to embed an iframe in an IPython notebook
+ """
+
+ iframe = """
+ <iframe
+ width="{width}"
+ height="{height}"
+ src="{src}{params}"
+ frameborder="0"
+ allowfullscreen
+ ></iframe>
+ """
+
+ def __init__(self, src, width, height, **kwargs):
+ self.src = src
+ self.width = width
+ self.height = height
+ self.params = kwargs
+
+ def _repr_html_(self):
+ """return the embed iframe"""
+ if self.params:
+ try:
+ from urllib.parse import urlencode # Py 3
+ except ImportError:
+ from urllib import urlencode
+ params = "?" + urlencode(self.params)
+ else:
+ params = ""
+ return self.iframe.format(src=self.src,
+ width=self.width,
+ height=self.height,
+ params=params)
+
+class YouTubeVideo(IFrame):
+ """Class for embedding a YouTube Video in an IPython session, based on its video id.
+
+ e.g. to embed the video from https://www.youtube.com/watch?v=foo , you would
+ do::
+
+ vid = YouTubeVideo("foo")
+ display(vid)
+
+ To start from 30 seconds::
+
+ vid = YouTubeVideo("abc", start=30)
+ display(vid)
+
+ To calculate seconds from time as hours, minutes, seconds use
+ :class:`datetime.timedelta`::
+
+ start=int(timedelta(hours=1, minutes=46, seconds=40).total_seconds())
+
+ Other parameters can be provided as documented at
https://developers.google.com/youtube/player_parameters#Parameters
-
- When converting the notebook using nbconvert, a jpeg representation of the video
- will be inserted in the document.
- """
-
- def __init__(self, id, width=400, height=300, **kwargs):
- self.id=id
- src = "https://www.youtube.com/embed/{0}".format(id)
- super(YouTubeVideo, self).__init__(src, width, height, **kwargs)
-
- def _repr_jpeg_(self):
- try:
- from urllib.request import urlopen # Py3
- except ImportError:
- from urllib2 import urlopen
- try:
- return urlopen("https://img.youtube.com/vi/{id}/hqdefault.jpg".format(id=self.id)).read()
- except IOError:
- return None
-
-class VimeoVideo(IFrame):
- """
- Class for embedding a Vimeo video in an IPython session, based on its video id.
- """
-
- def __init__(self, id, width=400, height=300, **kwargs):
- src="https://player.vimeo.com/video/{0}".format(id)
- super(VimeoVideo, self).__init__(src, width, height, **kwargs)
-
-class ScribdDocument(IFrame):
- """
- Class for embedding a Scribd document in an IPython session
-
- Use the start_page params to specify a starting point in the document
- Use the view_mode params to specify display type one off scroll | slideshow | book
-
- e.g to Display Wes' foundational paper about PANDAS in book mode from page 3
-
- ScribdDocument(71048089, width=800, height=400, start_page=3, view_mode="book")
- """
-
- def __init__(self, id, width=400, height=300, **kwargs):
- src="https://www.scribd.com/embeds/{0}/content".format(id)
- super(ScribdDocument, self).__init__(src, width, height, **kwargs)
-
-class FileLink(object):
- """Class for embedding a local file link in an IPython session, based on path
-
- e.g. to embed a link that was generated in the IPython notebook as my/data.txt
-
- you would do::
-
- local_file = FileLink("my/data.txt")
- display(local_file)
-
- or in the HTML notebook, just::
-
- FileLink("my/data.txt")
- """
-
- html_link_str = "<a href='%s' target='_blank'>%s</a>"
-
- def __init__(self,
- path,
- url_prefix='',
- result_html_prefix='',
- result_html_suffix='<br>'):
- """
- Parameters
- ----------
- path : str
- path to the file or directory that should be formatted
+
+ When converting the notebook using nbconvert, a jpeg representation of the video
+ will be inserted in the document.
+ """
+
+ def __init__(self, id, width=400, height=300, **kwargs):
+ self.id=id
+ src = "https://www.youtube.com/embed/{0}".format(id)
+ super(YouTubeVideo, self).__init__(src, width, height, **kwargs)
+
+ def _repr_jpeg_(self):
+ try:
+ from urllib.request import urlopen # Py3
+ except ImportError:
+ from urllib2 import urlopen
+ try:
+ return urlopen("https://img.youtube.com/vi/{id}/hqdefault.jpg".format(id=self.id)).read()
+ except IOError:
+ return None
+
+class VimeoVideo(IFrame):
+ """
+ Class for embedding a Vimeo video in an IPython session, based on its video id.
+ """
+
+ def __init__(self, id, width=400, height=300, **kwargs):
+ src="https://player.vimeo.com/video/{0}".format(id)
+ super(VimeoVideo, self).__init__(src, width, height, **kwargs)
+
+class ScribdDocument(IFrame):
+ """
+ Class for embedding a Scribd document in an IPython session
+
+ Use the start_page params to specify a starting point in the document
+ Use the view_mode params to specify display type one off scroll | slideshow | book
+
+ e.g to Display Wes' foundational paper about PANDAS in book mode from page 3
+
+ ScribdDocument(71048089, width=800, height=400, start_page=3, view_mode="book")
+ """
+
+ def __init__(self, id, width=400, height=300, **kwargs):
+ src="https://www.scribd.com/embeds/{0}/content".format(id)
+ super(ScribdDocument, self).__init__(src, width, height, **kwargs)
+
+class FileLink(object):
+ """Class for embedding a local file link in an IPython session, based on path
+
+ e.g. to embed a link that was generated in the IPython notebook as my/data.txt
+
+ you would do::
+
+ local_file = FileLink("my/data.txt")
+ display(local_file)
+
+ or in the HTML notebook, just::
+
+ FileLink("my/data.txt")
+ """
+
+ html_link_str = "<a href='%s' target='_blank'>%s</a>"
+
+ def __init__(self,
+ path,
+ url_prefix='',
+ result_html_prefix='',
+ result_html_suffix='<br>'):
+ """
+ Parameters
+ ----------
+ path : str
+ path to the file or directory that should be formatted
url_prefix : str
- prefix to be prepended to all files to form a working link [default:
+ prefix to be prepended to all files to form a working link [default:
'']
- result_html_prefix : str
+ result_html_prefix : str
text to append to beginning to link [default: '']
- result_html_suffix : str
- text to append at the end of link [default: '<br>']
- """
- if isdir(path):
- raise ValueError("Cannot display a directory using FileLink. "
- "Use FileLinks to display '%s'." % path)
- self.path = path
- self.url_prefix = url_prefix
- self.result_html_prefix = result_html_prefix
- self.result_html_suffix = result_html_suffix
-
- def _format_path(self):
- fp = ''.join([self.url_prefix,self.path])
- return ''.join([self.result_html_prefix,
- self.html_link_str % (fp, self.path),
- self.result_html_suffix])
-
- def _repr_html_(self):
- """return html link to file
- """
- if not exists(self.path):
- return ("Path (<tt>%s</tt>) doesn't exist. "
- "It may still be in the process of "
- "being generated, or you may have the "
- "incorrect path." % self.path)
-
- return self._format_path()
-
- def __repr__(self):
- """return absolute path to file
- """
- return abspath(self.path)
-
-class FileLinks(FileLink):
- """Class for embedding local file links in an IPython session, based on path
-
- e.g. to embed links to files that were generated in the IPython notebook
- under ``my/data``, you would do::
-
- local_files = FileLinks("my/data")
- display(local_files)
-
- or in the HTML notebook, just::
-
- FileLinks("my/data")
- """
- def __init__(self,
- path,
- url_prefix='',
- included_suffixes=None,
- result_html_prefix='',
- result_html_suffix='<br>',
- notebook_display_formatter=None,
- terminal_display_formatter=None,
- recursive=True):
- """
- See :class:`FileLink` for the ``path``, ``url_prefix``,
- ``result_html_prefix`` and ``result_html_suffix`` parameters.
-
- included_suffixes : list
- Filename suffixes to include when formatting output [default: include
- all files]
-
- notebook_display_formatter : function
- Used to format links for display in the notebook. See discussion of
- formatter functions below.
-
- terminal_display_formatter : function
- Used to format links for display in the terminal. See discussion of
- formatter functions below.
-
- Formatter functions must be of the form::
-
- f(dirname, fnames, included_suffixes)
-
- dirname : str
- The name of a directory
- fnames : list
- The files in that directory
- included_suffixes : list
- The file suffixes that should be included in the output (passing None
- meansto include all suffixes in the output in the built-in formatters)
- recursive : boolean
- Whether to recurse into subdirectories. Default is True.
-
- The function should return a list of lines that will be printed in the
- notebook (if passing notebook_display_formatter) or the terminal (if
- passing terminal_display_formatter). This function is iterated over for
- each directory in self.path. Default formatters are in place, can be
- passed here to support alternative formatting.
-
- """
- if isfile(path):
- raise ValueError("Cannot display a file using FileLinks. "
- "Use FileLink to display '%s'." % path)
- self.included_suffixes = included_suffixes
- # remove trailing slashs for more consistent output formatting
- path = path.rstrip('/')
-
- self.path = path
- self.url_prefix = url_prefix
- self.result_html_prefix = result_html_prefix
- self.result_html_suffix = result_html_suffix
-
- self.notebook_display_formatter = \
- notebook_display_formatter or self._get_notebook_display_formatter()
- self.terminal_display_formatter = \
- terminal_display_formatter or self._get_terminal_display_formatter()
-
- self.recursive = recursive
-
- def _get_display_formatter(self,
- dirname_output_format,
- fname_output_format,
- fp_format,
- fp_cleaner=None):
- """ generate built-in formatter function
-
- this is used to define both the notebook and terminal built-in
- formatters as they only differ by some wrapper text for each entry
-
- dirname_output_format: string to use for formatting directory
- names, dirname will be substituted for a single "%s" which
- must appear in this string
- fname_output_format: string to use for formatting file names,
- if a single "%s" appears in the string, fname will be substituted
- if two "%s" appear in the string, the path to fname will be
- substituted for the first and fname will be substituted for the
- second
- fp_format: string to use for formatting filepaths, must contain
- exactly two "%s" and the dirname will be subsituted for the first
- and fname will be substituted for the second
- """
- def f(dirname, fnames, included_suffixes=None):
- result = []
- # begin by figuring out which filenames, if any,
- # are going to be displayed
- display_fnames = []
- for fname in fnames:
- if (isfile(join(dirname,fname)) and
- (included_suffixes is None or
- splitext(fname)[1] in included_suffixes)):
- display_fnames.append(fname)
-
- if len(display_fnames) == 0:
- # if there are no filenames to display, don't print anything
- # (not even the directory name)
- pass
- else:
- # otherwise print the formatted directory name followed by
- # the formatted filenames
- dirname_output_line = dirname_output_format % dirname
- result.append(dirname_output_line)
- for fname in display_fnames:
- fp = fp_format % (dirname,fname)
- if fp_cleaner is not None:
- fp = fp_cleaner(fp)
- try:
- # output can include both a filepath and a filename...
- fname_output_line = fname_output_format % (fp, fname)
- except TypeError:
- # ... or just a single filepath
- fname_output_line = fname_output_format % fname
- result.append(fname_output_line)
- return result
- return f
-
- def _get_notebook_display_formatter(self,
- spacer="&nbsp;&nbsp;"):
- """ generate function to use for notebook formatting
- """
- dirname_output_format = \
- self.result_html_prefix + "%s/" + self.result_html_suffix
- fname_output_format = \
- self.result_html_prefix + spacer + self.html_link_str + self.result_html_suffix
- fp_format = self.url_prefix + '%s/%s'
- if sep == "\\":
- # Working on a platform where the path separator is "\", so
- # must convert these to "/" for generating a URI
- def fp_cleaner(fp):
- # Replace all occurences of backslash ("\") with a forward
- # slash ("/") - this is necessary on windows when a path is
- # provided as input, but we must link to a URI
- return fp.replace('\\','/')
- else:
- fp_cleaner = None
-
- return self._get_display_formatter(dirname_output_format,
- fname_output_format,
- fp_format,
- fp_cleaner)
-
- def _get_terminal_display_formatter(self,
- spacer=" "):
- """ generate function to use for terminal formatting
- """
- dirname_output_format = "%s/"
- fname_output_format = spacer + "%s"
- fp_format = '%s/%s'
-
- return self._get_display_formatter(dirname_output_format,
- fname_output_format,
- fp_format)
-
- def _format_path(self):
- result_lines = []
- if self.recursive:
- walked_dir = list(walk(self.path))
- else:
- walked_dir = [next(walk(self.path))]
- walked_dir.sort()
- for dirname, subdirs, fnames in walked_dir:
- result_lines += self.notebook_display_formatter(dirname, fnames, self.included_suffixes)
- return '\n'.join(result_lines)
-
- def __repr__(self):
- """return newline-separated absolute paths
- """
- result_lines = []
- if self.recursive:
- walked_dir = list(walk(self.path))
- else:
- walked_dir = [next(walk(self.path))]
- walked_dir.sort()
- for dirname, subdirs, fnames in walked_dir:
- result_lines += self.terminal_display_formatter(dirname, fnames, self.included_suffixes)
- return '\n'.join(result_lines)
+ result_html_suffix : str
+ text to append at the end of link [default: '<br>']
+ """
+ if isdir(path):
+ raise ValueError("Cannot display a directory using FileLink. "
+ "Use FileLinks to display '%s'." % path)
+ self.path = path
+ self.url_prefix = url_prefix
+ self.result_html_prefix = result_html_prefix
+ self.result_html_suffix = result_html_suffix
+
+ def _format_path(self):
+ fp = ''.join([self.url_prefix,self.path])
+ return ''.join([self.result_html_prefix,
+ self.html_link_str % (fp, self.path),
+ self.result_html_suffix])
+
+ def _repr_html_(self):
+ """return html link to file
+ """
+ if not exists(self.path):
+ return ("Path (<tt>%s</tt>) doesn't exist. "
+ "It may still be in the process of "
+ "being generated, or you may have the "
+ "incorrect path." % self.path)
+
+ return self._format_path()
+
+ def __repr__(self):
+ """return absolute path to file
+ """
+ return abspath(self.path)
+
+class FileLinks(FileLink):
+ """Class for embedding local file links in an IPython session, based on path
+
+ e.g. to embed links to files that were generated in the IPython notebook
+ under ``my/data``, you would do::
+
+ local_files = FileLinks("my/data")
+ display(local_files)
+
+ or in the HTML notebook, just::
+
+ FileLinks("my/data")
+ """
+ def __init__(self,
+ path,
+ url_prefix='',
+ included_suffixes=None,
+ result_html_prefix='',
+ result_html_suffix='<br>',
+ notebook_display_formatter=None,
+ terminal_display_formatter=None,
+ recursive=True):
+ """
+ See :class:`FileLink` for the ``path``, ``url_prefix``,
+ ``result_html_prefix`` and ``result_html_suffix`` parameters.
+
+ included_suffixes : list
+ Filename suffixes to include when formatting output [default: include
+ all files]
+
+ notebook_display_formatter : function
+ Used to format links for display in the notebook. See discussion of
+ formatter functions below.
+
+ terminal_display_formatter : function
+ Used to format links for display in the terminal. See discussion of
+ formatter functions below.
+
+ Formatter functions must be of the form::
+
+ f(dirname, fnames, included_suffixes)
+
+ dirname : str
+ The name of a directory
+ fnames : list
+ The files in that directory
+ included_suffixes : list
+ The file suffixes that should be included in the output (passing None
+ meansto include all suffixes in the output in the built-in formatters)
+ recursive : boolean
+ Whether to recurse into subdirectories. Default is True.
+
+ The function should return a list of lines that will be printed in the
+ notebook (if passing notebook_display_formatter) or the terminal (if
+ passing terminal_display_formatter). This function is iterated over for
+ each directory in self.path. Default formatters are in place, can be
+ passed here to support alternative formatting.
+
+ """
+ if isfile(path):
+ raise ValueError("Cannot display a file using FileLinks. "
+ "Use FileLink to display '%s'." % path)
+ self.included_suffixes = included_suffixes
+ # remove trailing slashs for more consistent output formatting
+ path = path.rstrip('/')
+
+ self.path = path
+ self.url_prefix = url_prefix
+ self.result_html_prefix = result_html_prefix
+ self.result_html_suffix = result_html_suffix
+
+ self.notebook_display_formatter = \
+ notebook_display_formatter or self._get_notebook_display_formatter()
+ self.terminal_display_formatter = \
+ terminal_display_formatter or self._get_terminal_display_formatter()
+
+ self.recursive = recursive
+
+ def _get_display_formatter(self,
+ dirname_output_format,
+ fname_output_format,
+ fp_format,
+ fp_cleaner=None):
+ """ generate built-in formatter function
+
+ this is used to define both the notebook and terminal built-in
+ formatters as they only differ by some wrapper text for each entry
+
+ dirname_output_format: string to use for formatting directory
+ names, dirname will be substituted for a single "%s" which
+ must appear in this string
+ fname_output_format: string to use for formatting file names,
+ if a single "%s" appears in the string, fname will be substituted
+ if two "%s" appear in the string, the path to fname will be
+ substituted for the first and fname will be substituted for the
+ second
+ fp_format: string to use for formatting filepaths, must contain
+ exactly two "%s" and the dirname will be subsituted for the first
+ and fname will be substituted for the second
+ """
+ def f(dirname, fnames, included_suffixes=None):
+ result = []
+ # begin by figuring out which filenames, if any,
+ # are going to be displayed
+ display_fnames = []
+ for fname in fnames:
+ if (isfile(join(dirname,fname)) and
+ (included_suffixes is None or
+ splitext(fname)[1] in included_suffixes)):
+ display_fnames.append(fname)
+
+ if len(display_fnames) == 0:
+ # if there are no filenames to display, don't print anything
+ # (not even the directory name)
+ pass
+ else:
+ # otherwise print the formatted directory name followed by
+ # the formatted filenames
+ dirname_output_line = dirname_output_format % dirname
+ result.append(dirname_output_line)
+ for fname in display_fnames:
+ fp = fp_format % (dirname,fname)
+ if fp_cleaner is not None:
+ fp = fp_cleaner(fp)
+ try:
+ # output can include both a filepath and a filename...
+ fname_output_line = fname_output_format % (fp, fname)
+ except TypeError:
+ # ... or just a single filepath
+ fname_output_line = fname_output_format % fname
+ result.append(fname_output_line)
+ return result
+ return f
+
+ def _get_notebook_display_formatter(self,
+ spacer="&nbsp;&nbsp;"):
+ """ generate function to use for notebook formatting
+ """
+ dirname_output_format = \
+ self.result_html_prefix + "%s/" + self.result_html_suffix
+ fname_output_format = \
+ self.result_html_prefix + spacer + self.html_link_str + self.result_html_suffix
+ fp_format = self.url_prefix + '%s/%s'
+ if sep == "\\":
+ # Working on a platform where the path separator is "\", so
+ # must convert these to "/" for generating a URI
+ def fp_cleaner(fp):
+ # Replace all occurences of backslash ("\") with a forward
+ # slash ("/") - this is necessary on windows when a path is
+ # provided as input, but we must link to a URI
+ return fp.replace('\\','/')
+ else:
+ fp_cleaner = None
+
+ return self._get_display_formatter(dirname_output_format,
+ fname_output_format,
+ fp_format,
+ fp_cleaner)
+
+ def _get_terminal_display_formatter(self,
+ spacer=" "):
+ """ generate function to use for terminal formatting
+ """
+ dirname_output_format = "%s/"
+ fname_output_format = spacer + "%s"
+ fp_format = '%s/%s'
+
+ return self._get_display_formatter(dirname_output_format,
+ fname_output_format,
+ fp_format)
+
+ def _format_path(self):
+ result_lines = []
+ if self.recursive:
+ walked_dir = list(walk(self.path))
+ else:
+ walked_dir = [next(walk(self.path))]
+ walked_dir.sort()
+ for dirname, subdirs, fnames in walked_dir:
+ result_lines += self.notebook_display_formatter(dirname, fnames, self.included_suffixes)
+ return '\n'.join(result_lines)
+
+ def __repr__(self):
+ """return newline-separated absolute paths
+ """
+ result_lines = []
+ if self.recursive:
+ walked_dir = list(walk(self.path))
+ else:
+ walked_dir = [next(walk(self.path))]
+ walked_dir.sort()
+ for dirname, subdirs, fnames in walked_dir:
+ result_lines += self.terminal_display_formatter(dirname, fnames, self.included_suffixes)
+ return '\n'.join(result_lines)
diff --git a/contrib/python/ipython/py2/IPython/lib/editorhooks.py b/contrib/python/ipython/py2/IPython/lib/editorhooks.py
index 31964fbe72..392557b509 100644
--- a/contrib/python/ipython/py2/IPython/lib/editorhooks.py
+++ b/contrib/python/ipython/py2/IPython/lib/editorhooks.py
@@ -1,129 +1,129 @@
-""" 'editor' hooks for common editors that work well with ipython
-
-They should honor the line number argument, at least.
-
-Contributions are *very* welcome.
-"""
-from __future__ import print_function
-
-import os
-import pipes
-import shlex
-import subprocess
-import sys
-
-from IPython import get_ipython
-from IPython.core.error import TryNext
-from IPython.utils import py3compat
-
-
-def install_editor(template, wait=False):
- """Installs the editor that is called by IPython for the %edit magic.
-
- This overrides the default editor, which is generally set by your EDITOR
- environment variable or is notepad (windows) or vi (linux). By supplying a
- template string `run_template`, you can control how the editor is invoked
- by IPython -- (e.g. the format in which it accepts command line options)
-
- Parameters
- ----------
- template : basestring
- run_template acts as a template for how your editor is invoked by
- the shell. It should contain '{filename}', which will be replaced on
- invokation with the file name, and '{line}', $line by line number
- (or 0) to invoke the file with.
- wait : bool
- If `wait` is true, wait until the user presses enter before returning,
- to facilitate non-blocking editors that exit immediately after
- the call.
- """
-
- # not all editors support $line, so we'll leave out this check
- # for substitution in ['$file', '$line']:
- # if not substitution in run_template:
- # raise ValueError(('run_template should contain %s'
- # ' for string substitution. You supplied "%s"' % (substitution,
- # run_template)))
-
- def call_editor(self, filename, line=0):
- if line is None:
- line = 0
- cmd = template.format(filename=pipes.quote(filename), line=line)
- print(">", cmd)
- # pipes.quote doesn't work right on Windows, but it does after splitting
- if sys.platform.startswith('win'):
- cmd = shlex.split(cmd)
- proc = subprocess.Popen(cmd, shell=True)
+""" 'editor' hooks for common editors that work well with ipython
+
+They should honor the line number argument, at least.
+
+Contributions are *very* welcome.
+"""
+from __future__ import print_function
+
+import os
+import pipes
+import shlex
+import subprocess
+import sys
+
+from IPython import get_ipython
+from IPython.core.error import TryNext
+from IPython.utils import py3compat
+
+
+def install_editor(template, wait=False):
+ """Installs the editor that is called by IPython for the %edit magic.
+
+ This overrides the default editor, which is generally set by your EDITOR
+ environment variable or is notepad (windows) or vi (linux). By supplying a
+ template string `run_template`, you can control how the editor is invoked
+ by IPython -- (e.g. the format in which it accepts command line options)
+
+ Parameters
+ ----------
+ template : basestring
+ run_template acts as a template for how your editor is invoked by
+ the shell. It should contain '{filename}', which will be replaced on
+ invokation with the file name, and '{line}', $line by line number
+ (or 0) to invoke the file with.
+ wait : bool
+ If `wait` is true, wait until the user presses enter before returning,
+ to facilitate non-blocking editors that exit immediately after
+ the call.
+ """
+
+ # not all editors support $line, so we'll leave out this check
+ # for substitution in ['$file', '$line']:
+ # if not substitution in run_template:
+ # raise ValueError(('run_template should contain %s'
+ # ' for string substitution. You supplied "%s"' % (substitution,
+ # run_template)))
+
+ def call_editor(self, filename, line=0):
+ if line is None:
+ line = 0
+ cmd = template.format(filename=pipes.quote(filename), line=line)
+ print(">", cmd)
+ # pipes.quote doesn't work right on Windows, but it does after splitting
+ if sys.platform.startswith('win'):
+ cmd = shlex.split(cmd)
+ proc = subprocess.Popen(cmd, shell=True)
if proc.wait() != 0:
- raise TryNext()
- if wait:
- py3compat.input("Press Enter when done editing:")
-
- get_ipython().set_hook('editor', call_editor)
- get_ipython().editor = template
-
-
-# in these, exe is always the path/name of the executable. Useful
-# if you don't have the editor directory in your path
-def komodo(exe=u'komodo'):
- """ Activestate Komodo [Edit] """
- install_editor(exe + u' -l {line} {filename}', wait=True)
-
-
-def scite(exe=u"scite"):
- """ SciTE or Sc1 """
- install_editor(exe + u' {filename} -goto:{line}')
-
-
-def notepadplusplus(exe=u'notepad++'):
- """ Notepad++ http://notepad-plus.sourceforge.net """
- install_editor(exe + u' -n{line} {filename}')
-
-
-def jed(exe=u'jed'):
- """ JED, the lightweight emacsish editor """
- install_editor(exe + u' +{line} {filename}')
-
-
-def idle(exe=u'idle'):
- """ Idle, the editor bundled with python
-
- Parameters
- ----------
- exe : str, None
- If none, should be pretty smart about finding the executable.
- """
- if exe is None:
- import idlelib
- p = os.path.dirname(idlelib.__filename__)
- # i'm not sure if this actually works. Is this idle.py script
- # guarenteed to be executable?
- exe = os.path.join(p, 'idle.py')
- install_editor(exe + u' {filename}')
-
-
-def mate(exe=u'mate'):
- """ TextMate, the missing editor"""
- # wait=True is not required since we're using the -w flag to mate
- install_editor(exe + u' -w -l {line} {filename}')
-
-
-# ##########################################
-# these are untested, report any problems
-# ##########################################
-
-
-def emacs(exe=u'emacs'):
- install_editor(exe + u' +{line} {filename}')
-
-
-def gnuclient(exe=u'gnuclient'):
- install_editor(exe + u' -nw +{line} {filename}')
-
-
-def crimson_editor(exe=u'cedt.exe'):
- install_editor(exe + u' /L:{line} {filename}')
-
-
-def kate(exe=u'kate'):
- install_editor(exe + u' -u -l {line} {filename}')
+ raise TryNext()
+ if wait:
+ py3compat.input("Press Enter when done editing:")
+
+ get_ipython().set_hook('editor', call_editor)
+ get_ipython().editor = template
+
+
+# in these, exe is always the path/name of the executable. Useful
+# if you don't have the editor directory in your path
+def komodo(exe=u'komodo'):
+ """ Activestate Komodo [Edit] """
+ install_editor(exe + u' -l {line} {filename}', wait=True)
+
+
+def scite(exe=u"scite"):
+ """ SciTE or Sc1 """
+ install_editor(exe + u' {filename} -goto:{line}')
+
+
+def notepadplusplus(exe=u'notepad++'):
+ """ Notepad++ http://notepad-plus.sourceforge.net """
+ install_editor(exe + u' -n{line} {filename}')
+
+
+def jed(exe=u'jed'):
+ """ JED, the lightweight emacsish editor """
+ install_editor(exe + u' +{line} {filename}')
+
+
+def idle(exe=u'idle'):
+ """ Idle, the editor bundled with python
+
+ Parameters
+ ----------
+ exe : str, None
+ If none, should be pretty smart about finding the executable.
+ """
+ if exe is None:
+ import idlelib
+ p = os.path.dirname(idlelib.__filename__)
+ # i'm not sure if this actually works. Is this idle.py script
+ # guarenteed to be executable?
+ exe = os.path.join(p, 'idle.py')
+ install_editor(exe + u' {filename}')
+
+
+def mate(exe=u'mate'):
+ """ TextMate, the missing editor"""
+ # wait=True is not required since we're using the -w flag to mate
+ install_editor(exe + u' -w -l {line} {filename}')
+
+
+# ##########################################
+# these are untested, report any problems
+# ##########################################
+
+
+def emacs(exe=u'emacs'):
+ install_editor(exe + u' +{line} {filename}')
+
+
+def gnuclient(exe=u'gnuclient'):
+ install_editor(exe + u' -nw +{line} {filename}')
+
+
+def crimson_editor(exe=u'cedt.exe'):
+ install_editor(exe + u' /L:{line} {filename}')
+
+
+def kate(exe=u'kate'):
+ install_editor(exe + u' -u -l {line} {filename}')
diff --git a/contrib/python/ipython/py2/IPython/lib/guisupport.py b/contrib/python/ipython/py2/IPython/lib/guisupport.py
index 57020b30b1..5e13d4343c 100644
--- a/contrib/python/ipython/py2/IPython/lib/guisupport.py
+++ b/contrib/python/ipython/py2/IPython/lib/guisupport.py
@@ -1,83 +1,83 @@
-# coding: utf-8
-"""
-Support for creating GUI apps and starting event loops.
-
-IPython's GUI integration allows interative plotting and GUI usage in IPython
-session. IPython has two different types of GUI integration:
-
-1. The terminal based IPython supports GUI event loops through Python's
- PyOS_InputHook. PyOS_InputHook is a hook that Python calls periodically
- whenever raw_input is waiting for a user to type code. We implement GUI
- support in the terminal by setting PyOS_InputHook to a function that
- iterates the event loop for a short while. It is important to note that
- in this situation, the real GUI event loop is NOT run in the normal
- manner, so you can't use the normal means to detect that it is running.
-2. In the two process IPython kernel/frontend, the GUI event loop is run in
- the kernel. In this case, the event loop is run in the normal manner by
- calling the function or method of the GUI toolkit that starts the event
- loop.
-
-In addition to starting the GUI event loops in one of these two ways, IPython
-will *always* create an appropriate GUI application object when GUi
-integration is enabled.
-
-If you want your GUI apps to run in IPython you need to do two things:
-
-1. Test to see if there is already an existing main application object. If
- there is, you should use it. If there is not an existing application object
- you should create one.
-2. Test to see if the GUI event loop is running. If it is, you should not
- start it. If the event loop is not running you may start it.
-
-This module contains functions for each toolkit that perform these things
-in a consistent manner. Because of how PyOS_InputHook runs the event loop
-you cannot detect if the event loop is running using the traditional calls
-(such as ``wx.GetApp.IsMainLoopRunning()`` in wxPython). If PyOS_InputHook is
-set These methods will return a false negative. That is, they will say the
-event loop is not running, when is actually is. To work around this limitation
-we proposed the following informal protocol:
-
-* Whenever someone starts the event loop, they *must* set the ``_in_event_loop``
- attribute of the main application object to ``True``. This should be done
- regardless of how the event loop is actually run.
-* Whenever someone stops the event loop, they *must* set the ``_in_event_loop``
- attribute of the main application object to ``False``.
-* If you want to see if the event loop is running, you *must* use ``hasattr``
- to see if ``_in_event_loop`` attribute has been set. If it is set, you
- *must* use its value. If it has not been set, you can query the toolkit
- in the normal manner.
-* If you want GUI support and no one else has created an application or
- started the event loop you *must* do this. We don't want projects to
- attempt to defer these things to someone else if they themselves need it.
-
-The functions below implement this logic for each GUI toolkit. If you need
-to create custom application subclasses, you will likely have to modify this
-code for your own purposes. This code can be copied into your own project
-so you don't have to depend on IPython.
-
-"""
-
+# coding: utf-8
+"""
+Support for creating GUI apps and starting event loops.
+
+IPython's GUI integration allows interative plotting and GUI usage in IPython
+session. IPython has two different types of GUI integration:
+
+1. The terminal based IPython supports GUI event loops through Python's
+ PyOS_InputHook. PyOS_InputHook is a hook that Python calls periodically
+ whenever raw_input is waiting for a user to type code. We implement GUI
+ support in the terminal by setting PyOS_InputHook to a function that
+ iterates the event loop for a short while. It is important to note that
+ in this situation, the real GUI event loop is NOT run in the normal
+ manner, so you can't use the normal means to detect that it is running.
+2. In the two process IPython kernel/frontend, the GUI event loop is run in
+ the kernel. In this case, the event loop is run in the normal manner by
+ calling the function or method of the GUI toolkit that starts the event
+ loop.
+
+In addition to starting the GUI event loops in one of these two ways, IPython
+will *always* create an appropriate GUI application object when GUi
+integration is enabled.
+
+If you want your GUI apps to run in IPython you need to do two things:
+
+1. Test to see if there is already an existing main application object. If
+ there is, you should use it. If there is not an existing application object
+ you should create one.
+2. Test to see if the GUI event loop is running. If it is, you should not
+ start it. If the event loop is not running you may start it.
+
+This module contains functions for each toolkit that perform these things
+in a consistent manner. Because of how PyOS_InputHook runs the event loop
+you cannot detect if the event loop is running using the traditional calls
+(such as ``wx.GetApp.IsMainLoopRunning()`` in wxPython). If PyOS_InputHook is
+set These methods will return a false negative. That is, they will say the
+event loop is not running, when is actually is. To work around this limitation
+we proposed the following informal protocol:
+
+* Whenever someone starts the event loop, they *must* set the ``_in_event_loop``
+ attribute of the main application object to ``True``. This should be done
+ regardless of how the event loop is actually run.
+* Whenever someone stops the event loop, they *must* set the ``_in_event_loop``
+ attribute of the main application object to ``False``.
+* If you want to see if the event loop is running, you *must* use ``hasattr``
+ to see if ``_in_event_loop`` attribute has been set. If it is set, you
+ *must* use its value. If it has not been set, you can query the toolkit
+ in the normal manner.
+* If you want GUI support and no one else has created an application or
+ started the event loop you *must* do this. We don't want projects to
+ attempt to defer these things to someone else if they themselves need it.
+
+The functions below implement this logic for each GUI toolkit. If you need
+to create custom application subclasses, you will likely have to modify this
+code for your own purposes. This code can be copied into your own project
+so you don't have to depend on IPython.
+
+"""
+
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
-
+
from IPython.core.getipython import get_ipython
-
-#-----------------------------------------------------------------------------
-# wx
-#-----------------------------------------------------------------------------
-
-def get_app_wx(*args, **kwargs):
- """Create a new wx app or return an exiting one."""
- import wx
- app = wx.GetApp()
- if app is None:
- if 'redirect' not in kwargs:
- kwargs['redirect'] = False
- app = wx.PySimpleApp(*args, **kwargs)
- return app
-
-def is_event_loop_running_wx(app=None):
- """Is the wx event loop running."""
+
+#-----------------------------------------------------------------------------
+# wx
+#-----------------------------------------------------------------------------
+
+def get_app_wx(*args, **kwargs):
+ """Create a new wx app or return an exiting one."""
+ import wx
+ app = wx.GetApp()
+ if app is None:
+ if 'redirect' not in kwargs:
+ kwargs['redirect'] = False
+ app = wx.PySimpleApp(*args, **kwargs)
+ return app
+
+def is_event_loop_running_wx(app=None):
+ """Is the wx event loop running."""
# New way: check attribute on shell instance
ip = get_ipython()
if ip is not None:
@@ -87,69 +87,69 @@ def is_event_loop_running_wx(app=None):
# to check if the event loop is running, unlike Qt.
# Old way: check Wx application
- if app is None:
- app = get_app_wx()
- if hasattr(app, '_in_event_loop'):
- return app._in_event_loop
- else:
- return app.IsMainLoopRunning()
-
-def start_event_loop_wx(app=None):
- """Start the wx event loop in a consistent manner."""
- if app is None:
- app = get_app_wx()
- if not is_event_loop_running_wx(app):
- app._in_event_loop = True
- app.MainLoop()
- app._in_event_loop = False
- else:
- app._in_event_loop = True
-
-#-----------------------------------------------------------------------------
-# qt4
-#-----------------------------------------------------------------------------
-
-def get_app_qt4(*args, **kwargs):
- """Create a new qt4 app or return an existing one."""
- from IPython.external.qt_for_kernel import QtGui
- app = QtGui.QApplication.instance()
- if app is None:
- if not args:
- args = ([''],)
- app = QtGui.QApplication(*args, **kwargs)
- return app
-
-def is_event_loop_running_qt4(app=None):
- """Is the qt4 event loop running."""
+ if app is None:
+ app = get_app_wx()
+ if hasattr(app, '_in_event_loop'):
+ return app._in_event_loop
+ else:
+ return app.IsMainLoopRunning()
+
+def start_event_loop_wx(app=None):
+ """Start the wx event loop in a consistent manner."""
+ if app is None:
+ app = get_app_wx()
+ if not is_event_loop_running_wx(app):
+ app._in_event_loop = True
+ app.MainLoop()
+ app._in_event_loop = False
+ else:
+ app._in_event_loop = True
+
+#-----------------------------------------------------------------------------
+# qt4
+#-----------------------------------------------------------------------------
+
+def get_app_qt4(*args, **kwargs):
+ """Create a new qt4 app or return an existing one."""
+ from IPython.external.qt_for_kernel import QtGui
+ app = QtGui.QApplication.instance()
+ if app is None:
+ if not args:
+ args = ([''],)
+ app = QtGui.QApplication(*args, **kwargs)
+ return app
+
+def is_event_loop_running_qt4(app=None):
+ """Is the qt4 event loop running."""
# New way: check attribute on shell instance
ip = get_ipython()
if ip is not None:
return ip.active_eventloop and ip.active_eventloop.startswith('qt')
# Old way: check attribute on QApplication singleton
- if app is None:
- app = get_app_qt4([''])
- if hasattr(app, '_in_event_loop'):
- return app._in_event_loop
- else:
- # Does qt4 provide a other way to detect this?
- return False
-
-def start_event_loop_qt4(app=None):
- """Start the qt4 event loop in a consistent manner."""
- if app is None:
- app = get_app_qt4([''])
- if not is_event_loop_running_qt4(app):
- app._in_event_loop = True
- app.exec_()
- app._in_event_loop = False
- else:
- app._in_event_loop = True
-
-#-----------------------------------------------------------------------------
-# Tk
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# gtk
-#-----------------------------------------------------------------------------
+ if app is None:
+ app = get_app_qt4([''])
+ if hasattr(app, '_in_event_loop'):
+ return app._in_event_loop
+ else:
+ # Does qt4 provide a other way to detect this?
+ return False
+
+def start_event_loop_qt4(app=None):
+ """Start the qt4 event loop in a consistent manner."""
+ if app is None:
+ app = get_app_qt4([''])
+ if not is_event_loop_running_qt4(app):
+ app._in_event_loop = True
+ app.exec_()
+ app._in_event_loop = False
+ else:
+ app._in_event_loop = True
+
+#-----------------------------------------------------------------------------
+# Tk
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# gtk
+#-----------------------------------------------------------------------------
diff --git a/contrib/python/ipython/py2/IPython/lib/inputhook.py b/contrib/python/ipython/py2/IPython/lib/inputhook.py
index 38ccda8831..e6e8f2dbbc 100644
--- a/contrib/python/ipython/py2/IPython/lib/inputhook.py
+++ b/contrib/python/ipython/py2/IPython/lib/inputhook.py
@@ -1,666 +1,666 @@
-# coding: utf-8
-"""
+# coding: utf-8
+"""
Deprecated since IPython 5.0
-Inputhook management for GUI event loop integration.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-try:
- import ctypes
-except ImportError:
- ctypes = None
-except SystemError: # IronPython issue, 2/8/2014
- ctypes = None
-import os
-import platform
-import sys
-from distutils.version import LooseVersion as V
-
+Inputhook management for GUI event loop integration.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+except SystemError: # IronPython issue, 2/8/2014
+ ctypes = None
+import os
+import platform
+import sys
+from distutils.version import LooseVersion as V
+
from warnings import warn
-
+
warn("`IPython.lib.inputhook` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
-#-----------------------------------------------------------------------------
-# Constants
-#-----------------------------------------------------------------------------
-
-# Constants for identifying the GUI toolkits.
-GUI_WX = 'wx'
-GUI_QT = 'qt'
-GUI_QT4 = 'qt4'
-GUI_GTK = 'gtk'
-GUI_TK = 'tk'
-GUI_OSX = 'osx'
-GUI_GLUT = 'glut'
-GUI_PYGLET = 'pyglet'
-GUI_GTK3 = 'gtk3'
-GUI_NONE = 'none' # i.e. disable
-
-#-----------------------------------------------------------------------------
-# Utilities
-#-----------------------------------------------------------------------------
-
-def _stdin_ready_posix():
- """Return True if there's something to read on stdin (posix version)."""
- infds, outfds, erfds = select.select([sys.stdin],[],[],0)
- return bool(infds)
-
-def _stdin_ready_nt():
- """Return True if there's something to read on stdin (nt version)."""
- return msvcrt.kbhit()
-
-def _stdin_ready_other():
- """Return True, assuming there's something to read on stdin."""
- return True
-
-def _use_appnope():
- """Should we use appnope for dealing with OS X app nap?
-
- Checks if we are on OS X 10.9 or greater.
- """
- return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
-
-def _ignore_CTRL_C_posix():
- """Ignore CTRL+C (SIGINT)."""
- signal.signal(signal.SIGINT, signal.SIG_IGN)
-
-def _allow_CTRL_C_posix():
- """Take CTRL+C into account (SIGINT)."""
- signal.signal(signal.SIGINT, signal.default_int_handler)
-
-def _ignore_CTRL_C_other():
- """Ignore CTRL+C (not implemented)."""
- pass
-
-def _allow_CTRL_C_other():
- """Take CTRL+C into account (not implemented)."""
- pass
-
-if os.name == 'posix':
- import select
- import signal
- stdin_ready = _stdin_ready_posix
- ignore_CTRL_C = _ignore_CTRL_C_posix
- allow_CTRL_C = _allow_CTRL_C_posix
-elif os.name == 'nt':
- import msvcrt
- stdin_ready = _stdin_ready_nt
- ignore_CTRL_C = _ignore_CTRL_C_other
- allow_CTRL_C = _allow_CTRL_C_other
-else:
- stdin_ready = _stdin_ready_other
- ignore_CTRL_C = _ignore_CTRL_C_other
- allow_CTRL_C = _allow_CTRL_C_other
-
-
-#-----------------------------------------------------------------------------
-# Main InputHookManager class
-#-----------------------------------------------------------------------------
-
-
-class InputHookManager(object):
+#-----------------------------------------------------------------------------
+# Constants
+#-----------------------------------------------------------------------------
+
+# Constants for identifying the GUI toolkits.
+GUI_WX = 'wx'
+GUI_QT = 'qt'
+GUI_QT4 = 'qt4'
+GUI_GTK = 'gtk'
+GUI_TK = 'tk'
+GUI_OSX = 'osx'
+GUI_GLUT = 'glut'
+GUI_PYGLET = 'pyglet'
+GUI_GTK3 = 'gtk3'
+GUI_NONE = 'none' # i.e. disable
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
+def _stdin_ready_posix():
+ """Return True if there's something to read on stdin (posix version)."""
+ infds, outfds, erfds = select.select([sys.stdin],[],[],0)
+ return bool(infds)
+
+def _stdin_ready_nt():
+ """Return True if there's something to read on stdin (nt version)."""
+ return msvcrt.kbhit()
+
+def _stdin_ready_other():
+ """Return True, assuming there's something to read on stdin."""
+ return True
+
+def _use_appnope():
+ """Should we use appnope for dealing with OS X app nap?
+
+ Checks if we are on OS X 10.9 or greater.
+ """
+ return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
+
+def _ignore_CTRL_C_posix():
+ """Ignore CTRL+C (SIGINT)."""
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+def _allow_CTRL_C_posix():
+ """Take CTRL+C into account (SIGINT)."""
+ signal.signal(signal.SIGINT, signal.default_int_handler)
+
+def _ignore_CTRL_C_other():
+ """Ignore CTRL+C (not implemented)."""
+ pass
+
+def _allow_CTRL_C_other():
+ """Take CTRL+C into account (not implemented)."""
+ pass
+
+if os.name == 'posix':
+ import select
+ import signal
+ stdin_ready = _stdin_ready_posix
+ ignore_CTRL_C = _ignore_CTRL_C_posix
+ allow_CTRL_C = _allow_CTRL_C_posix
+elif os.name == 'nt':
+ import msvcrt
+ stdin_ready = _stdin_ready_nt
+ ignore_CTRL_C = _ignore_CTRL_C_other
+ allow_CTRL_C = _allow_CTRL_C_other
+else:
+ stdin_ready = _stdin_ready_other
+ ignore_CTRL_C = _ignore_CTRL_C_other
+ allow_CTRL_C = _allow_CTRL_C_other
+
+
+#-----------------------------------------------------------------------------
+# Main InputHookManager class
+#-----------------------------------------------------------------------------
+
+
+class InputHookManager(object):
"""DEPRECATED since IPython 5.0
-
+
Manage PyOS_InputHook for different GUI toolkits.
- This class installs various hooks under ``PyOSInputHook`` to handle
- GUI event loop integration.
- """
-
- def __init__(self):
- if ctypes is None:
- warn("IPython GUI event loop requires ctypes, %gui will not be available")
- else:
- self.PYFUNC = ctypes.PYFUNCTYPE(ctypes.c_int)
- self.guihooks = {}
- self.aliases = {}
- self.apps = {}
- self._reset()
-
- def _reset(self):
- self._callback_pyfunctype = None
- self._callback = None
- self._installed = False
- self._current_gui = None
-
- def get_pyos_inputhook(self):
+ This class installs various hooks under ``PyOSInputHook`` to handle
+ GUI event loop integration.
+ """
+
+ def __init__(self):
+ if ctypes is None:
+ warn("IPython GUI event loop requires ctypes, %gui will not be available")
+ else:
+ self.PYFUNC = ctypes.PYFUNCTYPE(ctypes.c_int)
+ self.guihooks = {}
+ self.aliases = {}
+ self.apps = {}
+ self._reset()
+
+ def _reset(self):
+ self._callback_pyfunctype = None
+ self._callback = None
+ self._installed = False
+ self._current_gui = None
+
+ def get_pyos_inputhook(self):
"""DEPRECATED since IPython 5.0
Return the current PyOS_InputHook as a ctypes.c_void_p."""
warn("`get_pyos_inputhook` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- return ctypes.c_void_p.in_dll(ctypes.pythonapi,"PyOS_InputHook")
-
- def get_pyos_inputhook_as_func(self):
+ return ctypes.c_void_p.in_dll(ctypes.pythonapi,"PyOS_InputHook")
+
+ def get_pyos_inputhook_as_func(self):
"""DEPRECATED since IPython 5.0
Return the current PyOS_InputHook as a ctypes.PYFUNCYPE."""
warn("`get_pyos_inputhook_as_func` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- return self.PYFUNC.in_dll(ctypes.pythonapi,"PyOS_InputHook")
-
- def set_inputhook(self, callback):
+ return self.PYFUNC.in_dll(ctypes.pythonapi,"PyOS_InputHook")
+
+ def set_inputhook(self, callback):
"""DEPRECATED since IPython 5.0
Set PyOS_InputHook to callback and return the previous one."""
- # On platforms with 'readline' support, it's all too likely to
- # have a KeyboardInterrupt signal delivered *even before* an
- # initial ``try:`` clause in the callback can be executed, so
- # we need to disable CTRL+C in this situation.
- ignore_CTRL_C()
- self._callback = callback
- self._callback_pyfunctype = self.PYFUNC(callback)
- pyos_inputhook_ptr = self.get_pyos_inputhook()
- original = self.get_pyos_inputhook_as_func()
- pyos_inputhook_ptr.value = \
- ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value
- self._installed = True
- return original
-
- def clear_inputhook(self, app=None):
+ # On platforms with 'readline' support, it's all too likely to
+ # have a KeyboardInterrupt signal delivered *even before* an
+ # initial ``try:`` clause in the callback can be executed, so
+ # we need to disable CTRL+C in this situation.
+ ignore_CTRL_C()
+ self._callback = callback
+ self._callback_pyfunctype = self.PYFUNC(callback)
+ pyos_inputhook_ptr = self.get_pyos_inputhook()
+ original = self.get_pyos_inputhook_as_func()
+ pyos_inputhook_ptr.value = \
+ ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value
+ self._installed = True
+ return original
+
+ def clear_inputhook(self, app=None):
"""DEPRECATED since IPython 5.0
-
+
Set PyOS_InputHook to NULL and return the previous one.
- Parameters
- ----------
- app : optional, ignored
- This parameter is allowed only so that clear_inputhook() can be
- called with a similar interface as all the ``enable_*`` methods. But
- the actual value of the parameter is ignored. This uniform interface
- makes it easier to have user-level entry points in the main IPython
- app like :meth:`enable_gui`."""
+ Parameters
+ ----------
+ app : optional, ignored
+ This parameter is allowed only so that clear_inputhook() can be
+ called with a similar interface as all the ``enable_*`` methods. But
+ the actual value of the parameter is ignored. This uniform interface
+ makes it easier to have user-level entry points in the main IPython
+ app like :meth:`enable_gui`."""
warn("`clear_inputhook` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- pyos_inputhook_ptr = self.get_pyos_inputhook()
- original = self.get_pyos_inputhook_as_func()
- pyos_inputhook_ptr.value = ctypes.c_void_p(None).value
- allow_CTRL_C()
- self._reset()
- return original
-
- def clear_app_refs(self, gui=None):
+ pyos_inputhook_ptr = self.get_pyos_inputhook()
+ original = self.get_pyos_inputhook_as_func()
+ pyos_inputhook_ptr.value = ctypes.c_void_p(None).value
+ allow_CTRL_C()
+ self._reset()
+ return original
+
+ def clear_app_refs(self, gui=None):
"""DEPRECATED since IPython 5.0
-
+
Clear IPython's internal reference to an application instance.
- Whenever we create an app for a user on qt4 or wx, we hold a
- reference to the app. This is needed because in some cases bad things
- can happen if a user doesn't hold a reference themselves. This
- method is provided to clear the references we are holding.
-
- Parameters
- ----------
- gui : None or str
- If None, clear all app references. If ('wx', 'qt4') clear
- the app for that toolkit. References are not held for gtk or tk
- as those toolkits don't have the notion of an app.
- """
+ Whenever we create an app for a user on qt4 or wx, we hold a
+ reference to the app. This is needed because in some cases bad things
+ can happen if a user doesn't hold a reference themselves. This
+ method is provided to clear the references we are holding.
+
+ Parameters
+ ----------
+ gui : None or str
+ If None, clear all app references. If ('wx', 'qt4') clear
+ the app for that toolkit. References are not held for gtk or tk
+ as those toolkits don't have the notion of an app.
+ """
warn("`clear_app_refs` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- if gui is None:
- self.apps = {}
- elif gui in self.apps:
- del self.apps[gui]
-
- def register(self, toolkitname, *aliases):
+ if gui is None:
+ self.apps = {}
+ elif gui in self.apps:
+ del self.apps[gui]
+
+ def register(self, toolkitname, *aliases):
"""DEPRECATED since IPython 5.0
Register a class to provide the event loop for a given GUI.
-
- This is intended to be used as a class decorator. It should be passed
- the names with which to register this GUI integration. The classes
- themselves should subclass :class:`InputHookBase`.
-
- ::
-
- @inputhook_manager.register('qt')
- class QtInputHook(InputHookBase):
- def enable(self, app=None):
- ...
- """
+
+ This is intended to be used as a class decorator. It should be passed
+ the names with which to register this GUI integration. The classes
+ themselves should subclass :class:`InputHookBase`.
+
+ ::
+
+ @inputhook_manager.register('qt')
+ class QtInputHook(InputHookBase):
+ def enable(self, app=None):
+ ...
+ """
warn("`register` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- def decorator(cls):
- if ctypes is not None:
- inst = cls(self)
- self.guihooks[toolkitname] = inst
- for a in aliases:
- self.aliases[a] = toolkitname
- return cls
- return decorator
-
- def current_gui(self):
+ def decorator(cls):
+ if ctypes is not None:
+ inst = cls(self)
+ self.guihooks[toolkitname] = inst
+ for a in aliases:
+ self.aliases[a] = toolkitname
+ return cls
+ return decorator
+
+ def current_gui(self):
"""DEPRECATED since IPython 5.0
Return a string indicating the currently active GUI or None."""
warn("`current_gui` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- return self._current_gui
-
- def enable_gui(self, gui=None, app=None):
+ return self._current_gui
+
+ def enable_gui(self, gui=None, app=None):
"""DEPRECATED since IPython 5.0
-
+
Switch amongst GUI input hooks by name.
- This is a higher level method than :meth:`set_inputhook` - it uses the
- GUI name to look up a registered object which enables the input hook
- for that GUI.
-
- Parameters
- ----------
- gui : optional, string or None
- If None (or 'none'), clears input hook, otherwise it must be one
- of the recognized GUI names (see ``GUI_*`` constants in module).
-
- app : optional, existing application object.
- For toolkits that have the concept of a global app, you can supply an
- existing one. If not given, the toolkit will be probed for one, and if
- none is found, a new one will be created. Note that GTK does not have
- this concept, and passing an app if ``gui=="GTK"`` will raise an error.
-
- Returns
- -------
- The output of the underlying gui switch routine, typically the actual
- PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
- one.
- """
+ This is a higher level method than :meth:`set_inputhook` - it uses the
+ GUI name to look up a registered object which enables the input hook
+ for that GUI.
+
+ Parameters
+ ----------
+ gui : optional, string or None
+ If None (or 'none'), clears input hook, otherwise it must be one
+ of the recognized GUI names (see ``GUI_*`` constants in module).
+
+ app : optional, existing application object.
+ For toolkits that have the concept of a global app, you can supply an
+ existing one. If not given, the toolkit will be probed for one, and if
+ none is found, a new one will be created. Note that GTK does not have
+ this concept, and passing an app if ``gui=="GTK"`` will raise an error.
+
+ Returns
+ -------
+ The output of the underlying gui switch routine, typically the actual
+ PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
+ one.
+ """
warn("`enable_gui` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- if gui in (None, GUI_NONE):
- return self.disable_gui()
-
- if gui in self.aliases:
- return self.enable_gui(self.aliases[gui], app)
-
- try:
- gui_hook = self.guihooks[gui]
- except KeyError:
- e = "Invalid GUI request {!r}, valid ones are: {}"
- raise ValueError(e.format(gui, ', '.join(self.guihooks)))
- self._current_gui = gui
-
- app = gui_hook.enable(app)
- if app is not None:
- app._in_event_loop = True
+ if gui in (None, GUI_NONE):
+ return self.disable_gui()
+
+ if gui in self.aliases:
+ return self.enable_gui(self.aliases[gui], app)
+
+ try:
+ gui_hook = self.guihooks[gui]
+ except KeyError:
+ e = "Invalid GUI request {!r}, valid ones are: {}"
+ raise ValueError(e.format(gui, ', '.join(self.guihooks)))
+ self._current_gui = gui
+
+ app = gui_hook.enable(app)
+ if app is not None:
+ app._in_event_loop = True
self.apps[gui] = app
- return app
-
- def disable_gui(self):
+ return app
+
+ def disable_gui(self):
"""DEPRECATED since IPython 5.0
Disable GUI event loop integration.
-
- If an application was registered, this sets its ``_in_event_loop``
- attribute to False. It then calls :meth:`clear_inputhook`.
- """
+
+ If an application was registered, this sets its ``_in_event_loop``
+ attribute to False. It then calls :meth:`clear_inputhook`.
+ """
warn("`disable_gui` is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- gui = self._current_gui
- if gui in self.apps:
- self.apps[gui]._in_event_loop = False
- return self.clear_inputhook()
-
-class InputHookBase(object):
+ gui = self._current_gui
+ if gui in self.apps:
+ self.apps[gui]._in_event_loop = False
+ return self.clear_inputhook()
+
+class InputHookBase(object):
"""DEPRECATED since IPython 5.0
Base class for input hooks for specific toolkits.
-
- Subclasses should define an :meth:`enable` method with one argument, ``app``,
- which will either be an instance of the toolkit's application class, or None.
- They may also define a :meth:`disable` method with no arguments.
- """
- def __init__(self, manager):
- self.manager = manager
-
- def disable(self):
- pass
-
-inputhook_manager = InputHookManager()
-
-@inputhook_manager.register('osx')
-class NullInputHook(InputHookBase):
+
+ Subclasses should define an :meth:`enable` method with one argument, ``app``,
+ which will either be an instance of the toolkit's application class, or None.
+ They may also define a :meth:`disable` method with no arguments.
+ """
+ def __init__(self, manager):
+ self.manager = manager
+
+ def disable(self):
+ pass
+
+inputhook_manager = InputHookManager()
+
+@inputhook_manager.register('osx')
+class NullInputHook(InputHookBase):
"""DEPRECATED since IPython 5.0
A null inputhook that doesn't need to do anything"""
- def enable(self, app=None):
+ def enable(self, app=None):
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
-
-@inputhook_manager.register('wx')
-class WxInputHook(InputHookBase):
- def enable(self, app=None):
+
+@inputhook_manager.register('wx')
+class WxInputHook(InputHookBase):
+ def enable(self, app=None):
"""DEPRECATED since IPython 5.0
-
+
Enable event loop integration with wxPython.
- Parameters
- ----------
- app : WX Application, optional.
- Running application to use. If not given, we probe WX for an
- existing application object, and create a new one if none is found.
-
- Notes
- -----
- This methods sets the ``PyOS_InputHook`` for wxPython, which allows
- the wxPython to integrate with terminal based applications like
- IPython.
-
- If ``app`` is not given we probe for an existing one, and return it if
- found. If no existing app is found, we create an :class:`wx.App` as
- follows::
-
- import wx
- app = wx.App(redirect=False, clearSigInt=False)
- """
+ Parameters
+ ----------
+ app : WX Application, optional.
+ Running application to use. If not given, we probe WX for an
+ existing application object, and create a new one if none is found.
+
+ Notes
+ -----
+ This methods sets the ``PyOS_InputHook`` for wxPython, which allows
+ the wxPython to integrate with terminal based applications like
+ IPython.
+
+ If ``app`` is not given we probe for an existing one, and return it if
+ found. If no existing app is found, we create an :class:`wx.App` as
+ follows::
+
+ import wx
+ app = wx.App(redirect=False, clearSigInt=False)
+ """
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- import wx
-
- wx_version = V(wx.__version__).version
-
- if wx_version < [2, 8]:
- raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
-
- from IPython.lib.inputhookwx import inputhook_wx
- self.manager.set_inputhook(inputhook_wx)
- if _use_appnope():
- from appnope import nope
- nope()
-
- import wx
- if app is None:
- app = wx.GetApp()
- if app is None:
- app = wx.App(redirect=False, clearSigInt=False)
-
- return app
-
- def disable(self):
+ import wx
+
+ wx_version = V(wx.__version__).version
+
+ if wx_version < [2, 8]:
+ raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
+
+ from IPython.lib.inputhookwx import inputhook_wx
+ self.manager.set_inputhook(inputhook_wx)
+ if _use_appnope():
+ from appnope import nope
+ nope()
+
+ import wx
+ if app is None:
+ app = wx.GetApp()
+ if app is None:
+ app = wx.App(redirect=False, clearSigInt=False)
+
+ return app
+
+ def disable(self):
"""DEPRECATED since IPython 5.0
-
+
Disable event loop integration with wxPython.
- This restores appnapp on OS X
- """
+ This restores appnapp on OS X
+ """
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- if _use_appnope():
- from appnope import nap
- nap()
-
-@inputhook_manager.register('qt', 'qt4')
-class Qt4InputHook(InputHookBase):
- def enable(self, app=None):
+ if _use_appnope():
+ from appnope import nap
+ nap()
+
+@inputhook_manager.register('qt', 'qt4')
+class Qt4InputHook(InputHookBase):
+ def enable(self, app=None):
"""DEPRECATED since IPython 5.0
Enable event loop integration with PyQt4.
-
- Parameters
- ----------
- app : Qt Application, optional.
- Running application to use. If not given, we probe Qt for an
- existing application object, and create a new one if none is found.
-
- Notes
- -----
- This methods sets the PyOS_InputHook for PyQt4, which allows
- the PyQt4 to integrate with terminal based applications like
- IPython.
-
- If ``app`` is not given we probe for an existing one, and return it if
- found. If no existing app is found, we create an :class:`QApplication`
- as follows::
-
- from PyQt4 import QtCore
- app = QtGui.QApplication(sys.argv)
- """
+
+ Parameters
+ ----------
+ app : Qt Application, optional.
+ Running application to use. If not given, we probe Qt for an
+ existing application object, and create a new one if none is found.
+
+ Notes
+ -----
+ This methods sets the PyOS_InputHook for PyQt4, which allows
+ the PyQt4 to integrate with terminal based applications like
+ IPython.
+
+ If ``app`` is not given we probe for an existing one, and return it if
+ found. If no existing app is found, we create an :class:`QApplication`
+ as follows::
+
+ from PyQt4 import QtCore
+ app = QtGui.QApplication(sys.argv)
+ """
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- from IPython.lib.inputhookqt4 import create_inputhook_qt4
- app, inputhook_qt4 = create_inputhook_qt4(self.manager, app)
- self.manager.set_inputhook(inputhook_qt4)
- if _use_appnope():
- from appnope import nope
- nope()
-
- return app
-
- def disable_qt4(self):
+ from IPython.lib.inputhookqt4 import create_inputhook_qt4
+ app, inputhook_qt4 = create_inputhook_qt4(self.manager, app)
+ self.manager.set_inputhook(inputhook_qt4)
+ if _use_appnope():
+ from appnope import nope
+ nope()
+
+ return app
+
+ def disable_qt4(self):
"""DEPRECATED since IPython 5.0
-
+
Disable event loop integration with PyQt4.
- This restores appnapp on OS X
- """
+ This restores appnapp on OS X
+ """
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- if _use_appnope():
- from appnope import nap
- nap()
-
-
-@inputhook_manager.register('qt5')
-class Qt5InputHook(Qt4InputHook):
- def enable(self, app=None):
+ if _use_appnope():
+ from appnope import nap
+ nap()
+
+
+@inputhook_manager.register('qt5')
+class Qt5InputHook(Qt4InputHook):
+ def enable(self, app=None):
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- os.environ['QT_API'] = 'pyqt5'
- return Qt4InputHook.enable(self, app)
-
-
-@inputhook_manager.register('gtk')
-class GtkInputHook(InputHookBase):
- def enable(self, app=None):
+ os.environ['QT_API'] = 'pyqt5'
+ return Qt4InputHook.enable(self, app)
+
+
+@inputhook_manager.register('gtk')
+class GtkInputHook(InputHookBase):
+ def enable(self, app=None):
"""DEPRECATED since IPython 5.0
-
+
Enable event loop integration with PyGTK.
- Parameters
- ----------
- app : ignored
- Ignored, it's only a placeholder to keep the call signature of all
- gui activation methods consistent, which simplifies the logic of
- supporting magics.
-
- Notes
- -----
- This methods sets the PyOS_InputHook for PyGTK, which allows
- the PyGTK to integrate with terminal based applications like
- IPython.
- """
+ Parameters
+ ----------
+ app : ignored
+ Ignored, it's only a placeholder to keep the call signature of all
+ gui activation methods consistent, which simplifies the logic of
+ supporting magics.
+
+ Notes
+ -----
+ This methods sets the PyOS_InputHook for PyGTK, which allows
+ the PyGTK to integrate with terminal based applications like
+ IPython.
+ """
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- import gtk
- try:
- gtk.set_interactive(True)
- except AttributeError:
- # For older versions of gtk, use our own ctypes version
- from IPython.lib.inputhookgtk import inputhook_gtk
- self.manager.set_inputhook(inputhook_gtk)
-
-
-@inputhook_manager.register('tk')
-class TkInputHook(InputHookBase):
- def enable(self, app=None):
+ import gtk
+ try:
+ gtk.set_interactive(True)
+ except AttributeError:
+ # For older versions of gtk, use our own ctypes version
+ from IPython.lib.inputhookgtk import inputhook_gtk
+ self.manager.set_inputhook(inputhook_gtk)
+
+
+@inputhook_manager.register('tk')
+class TkInputHook(InputHookBase):
+ def enable(self, app=None):
"""DEPRECATED since IPython 5.0
-
+
Enable event loop integration with Tk.
- Parameters
- ----------
- app : toplevel :class:`Tkinter.Tk` widget, optional.
- Running toplevel widget to use. If not given, we probe Tk for an
- existing one, and create a new one if none is found.
-
- Notes
- -----
- If you have already created a :class:`Tkinter.Tk` object, the only
- thing done by this method is to register with the
- :class:`InputHookManager`, since creating that object automatically
- sets ``PyOS_InputHook``.
- """
+ Parameters
+ ----------
+ app : toplevel :class:`Tkinter.Tk` widget, optional.
+ Running toplevel widget to use. If not given, we probe Tk for an
+ existing one, and create a new one if none is found.
+
+ Notes
+ -----
+ If you have already created a :class:`Tkinter.Tk` object, the only
+ thing done by this method is to register with the
+ :class:`InputHookManager`, since creating that object automatically
+ sets ``PyOS_InputHook``.
+ """
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- if app is None:
- try:
- from tkinter import Tk # Py 3
- except ImportError:
- from Tkinter import Tk # Py 2
- app = Tk()
- app.withdraw()
- self.manager.apps[GUI_TK] = app
- return app
-
-
-@inputhook_manager.register('glut')
-class GlutInputHook(InputHookBase):
- def enable(self, app=None):
+ if app is None:
+ try:
+ from tkinter import Tk # Py 3
+ except ImportError:
+ from Tkinter import Tk # Py 2
+ app = Tk()
+ app.withdraw()
+ self.manager.apps[GUI_TK] = app
+ return app
+
+
+@inputhook_manager.register('glut')
+class GlutInputHook(InputHookBase):
+ def enable(self, app=None):
"""DEPRECATED since IPython 5.0
-
+
Enable event loop integration with GLUT.
- Parameters
- ----------
-
- app : ignored
- Ignored, it's only a placeholder to keep the call signature of all
- gui activation methods consistent, which simplifies the logic of
- supporting magics.
-
- Notes
- -----
-
- This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
- integrate with terminal based applications like IPython. Due to GLUT
- limitations, it is currently not possible to start the event loop
- without first creating a window. You should thus not create another
- window but use instead the created one. See 'gui-glut.py' in the
- docs/examples/lib directory.
-
- The default screen mode is set to:
- glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
- """
+ Parameters
+ ----------
+
+ app : ignored
+ Ignored, it's only a placeholder to keep the call signature of all
+ gui activation methods consistent, which simplifies the logic of
+ supporting magics.
+
+ Notes
+ -----
+
+ This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
+ integrate with terminal based applications like IPython. Due to GLUT
+ limitations, it is currently not possible to start the event loop
+ without first creating a window. You should thus not create another
+ window but use instead the created one. See 'gui-glut.py' in the
+ docs/examples/lib directory.
+
+ The default screen mode is set to:
+ glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
+ """
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
-
- import OpenGL.GLUT as glut
- from IPython.lib.inputhookglut import glut_display_mode, \
- glut_close, glut_display, \
- glut_idle, inputhook_glut
-
- if GUI_GLUT not in self.manager.apps:
- glut.glutInit( sys.argv )
- glut.glutInitDisplayMode( glut_display_mode )
- # This is specific to freeglut
- if bool(glut.glutSetOption):
- glut.glutSetOption( glut.GLUT_ACTION_ON_WINDOW_CLOSE,
- glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS )
- glut.glutCreateWindow( sys.argv[0] )
- glut.glutReshapeWindow( 1, 1 )
- glut.glutHideWindow( )
- glut.glutWMCloseFunc( glut_close )
- glut.glutDisplayFunc( glut_display )
- glut.glutIdleFunc( glut_idle )
- else:
- glut.glutWMCloseFunc( glut_close )
- glut.glutDisplayFunc( glut_display )
- glut.glutIdleFunc( glut_idle)
- self.manager.set_inputhook( inputhook_glut )
-
-
- def disable(self):
+
+ import OpenGL.GLUT as glut
+ from IPython.lib.inputhookglut import glut_display_mode, \
+ glut_close, glut_display, \
+ glut_idle, inputhook_glut
+
+ if GUI_GLUT not in self.manager.apps:
+ glut.glutInit( sys.argv )
+ glut.glutInitDisplayMode( glut_display_mode )
+ # This is specific to freeglut
+ if bool(glut.glutSetOption):
+ glut.glutSetOption( glut.GLUT_ACTION_ON_WINDOW_CLOSE,
+ glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS )
+ glut.glutCreateWindow( sys.argv[0] )
+ glut.glutReshapeWindow( 1, 1 )
+ glut.glutHideWindow( )
+ glut.glutWMCloseFunc( glut_close )
+ glut.glutDisplayFunc( glut_display )
+ glut.glutIdleFunc( glut_idle )
+ else:
+ glut.glutWMCloseFunc( glut_close )
+ glut.glutDisplayFunc( glut_display )
+ glut.glutIdleFunc( glut_idle)
+ self.manager.set_inputhook( inputhook_glut )
+
+
+ def disable(self):
"""DEPRECATED since IPython 5.0
Disable event loop integration with glut.
-
- This sets PyOS_InputHook to NULL and set the display function to a
- dummy one and set the timer to a dummy timer that will be triggered
- very far in the future.
- """
+
+ This sets PyOS_InputHook to NULL and set the display function to a
+ dummy one and set the timer to a dummy timer that will be triggered
+ very far in the future.
+ """
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- import OpenGL.GLUT as glut
- from glut_support import glutMainLoopEvent
-
- glut.glutHideWindow() # This is an event to be processed below
- glutMainLoopEvent()
- super(GlutInputHook, self).disable()
-
-@inputhook_manager.register('pyglet')
-class PygletInputHook(InputHookBase):
- def enable(self, app=None):
+ import OpenGL.GLUT as glut
+ from glut_support import glutMainLoopEvent
+
+ glut.glutHideWindow() # This is an event to be processed below
+ glutMainLoopEvent()
+ super(GlutInputHook, self).disable()
+
+@inputhook_manager.register('pyglet')
+class PygletInputHook(InputHookBase):
+ def enable(self, app=None):
"""DEPRECATED since IPython 5.0
-
+
Enable event loop integration with pyglet.
- Parameters
- ----------
- app : ignored
- Ignored, it's only a placeholder to keep the call signature of all
- gui activation methods consistent, which simplifies the logic of
- supporting magics.
-
- Notes
- -----
- This methods sets the ``PyOS_InputHook`` for pyglet, which allows
- pyglet to integrate with terminal based applications like
- IPython.
-
- """
+ Parameters
+ ----------
+ app : ignored
+ Ignored, it's only a placeholder to keep the call signature of all
+ gui activation methods consistent, which simplifies the logic of
+ supporting magics.
+
+ Notes
+ -----
+ This methods sets the ``PyOS_InputHook`` for pyglet, which allows
+ pyglet to integrate with terminal based applications like
+ IPython.
+
+ """
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- from IPython.lib.inputhookpyglet import inputhook_pyglet
- self.manager.set_inputhook(inputhook_pyglet)
- return app
-
-
-@inputhook_manager.register('gtk3')
-class Gtk3InputHook(InputHookBase):
- def enable(self, app=None):
+ from IPython.lib.inputhookpyglet import inputhook_pyglet
+ self.manager.set_inputhook(inputhook_pyglet)
+ return app
+
+
+@inputhook_manager.register('gtk3')
+class Gtk3InputHook(InputHookBase):
+ def enable(self, app=None):
"""DEPRECATED since IPython 5.0
-
+
Enable event loop integration with Gtk3 (gir bindings).
- Parameters
- ----------
- app : ignored
- Ignored, it's only a placeholder to keep the call signature of all
- gui activation methods consistent, which simplifies the logic of
- supporting magics.
-
- Notes
- -----
- This methods sets the PyOS_InputHook for Gtk3, which allows
- the Gtk3 to integrate with terminal based applications like
- IPython.
- """
+ Parameters
+ ----------
+ app : ignored
+ Ignored, it's only a placeholder to keep the call signature of all
+ gui activation methods consistent, which simplifies the logic of
+ supporting magics.
+
+ Notes
+ -----
+ This methods sets the PyOS_InputHook for Gtk3, which allows
+ the Gtk3 to integrate with terminal based applications like
+ IPython.
+ """
warn("This function is deprecated since IPython 5.0 and will be removed in future versions.",
DeprecationWarning, stacklevel=2)
- from IPython.lib.inputhookgtk3 import inputhook_gtk3
- self.manager.set_inputhook(inputhook_gtk3)
-
-
-clear_inputhook = inputhook_manager.clear_inputhook
-set_inputhook = inputhook_manager.set_inputhook
-current_gui = inputhook_manager.current_gui
-clear_app_refs = inputhook_manager.clear_app_refs
-enable_gui = inputhook_manager.enable_gui
-disable_gui = inputhook_manager.disable_gui
-register = inputhook_manager.register
-guis = inputhook_manager.guihooks
-
-
-def _deprecated_disable():
+ from IPython.lib.inputhookgtk3 import inputhook_gtk3
+ self.manager.set_inputhook(inputhook_gtk3)
+
+
+clear_inputhook = inputhook_manager.clear_inputhook
+set_inputhook = inputhook_manager.set_inputhook
+current_gui = inputhook_manager.current_gui
+clear_app_refs = inputhook_manager.clear_app_refs
+enable_gui = inputhook_manager.enable_gui
+disable_gui = inputhook_manager.disable_gui
+register = inputhook_manager.register
+guis = inputhook_manager.guihooks
+
+
+def _deprecated_disable():
warn("This function is deprecated since IPython 4.0 use disable_gui() instead",
DeprecationWarning, stacklevel=2)
- inputhook_manager.disable_gui()
+ inputhook_manager.disable_gui()
-disable_wx = disable_qt4 = disable_gtk = disable_gtk3 = disable_glut = \
- disable_pyglet = disable_osx = _deprecated_disable
+disable_wx = disable_qt4 = disable_gtk = disable_gtk3 = disable_glut = \
+ disable_pyglet = disable_osx = _deprecated_disable
diff --git a/contrib/python/ipython/py2/IPython/lib/inputhookglut.py b/contrib/python/ipython/py2/IPython/lib/inputhookglut.py
index 5dd908f814..14bafe1632 100644
--- a/contrib/python/ipython/py2/IPython/lib/inputhookglut.py
+++ b/contrib/python/ipython/py2/IPython/lib/inputhookglut.py
@@ -1,173 +1,173 @@
-# coding: utf-8
-"""
-GLUT Inputhook support functions
-"""
-from __future__ import print_function
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-# GLUT is quite an old library and it is difficult to ensure proper
-# integration within IPython since original GLUT does not allow to handle
-# events one by one. Instead, it requires for the mainloop to be entered
-# and never returned (there is not even a function to exit he
-# mainloop). Fortunately, there are alternatives such as freeglut
-# (available for linux and windows) and the OSX implementation gives
-# access to a glutCheckLoop() function that blocks itself until a new
-# event is received. This means we have to setup the idle callback to
-# ensure we got at least one event that will unblock the function.
-#
-# Furthermore, it is not possible to install these handlers without a window
-# being first created. We choose to make this window invisible. This means that
-# display mode options are set at this level and user won't be able to change
-# them later without modifying the code. This should probably be made available
-# via IPython options system.
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-import os
-import sys
-import time
-import signal
-import OpenGL.GLUT as glut
-import OpenGL.platform as platform
-from timeit import default_timer as clock
-
-#-----------------------------------------------------------------------------
-# Constants
-#-----------------------------------------------------------------------------
-
-# Frame per second : 60
-# Should probably be an IPython option
-glut_fps = 60
-
-
-# Display mode : double buffeed + rgba + depth
-# Should probably be an IPython option
-glut_display_mode = (glut.GLUT_DOUBLE |
- glut.GLUT_RGBA |
- glut.GLUT_DEPTH)
-
-glutMainLoopEvent = None
-if sys.platform == 'darwin':
- try:
- glutCheckLoop = platform.createBaseFunction(
- 'glutCheckLoop', dll=platform.GLUT, resultType=None,
- argTypes=[],
- doc='glutCheckLoop( ) -> None',
- argNames=(),
- )
- except AttributeError:
- raise RuntimeError(
- '''Your glut implementation does not allow interactive sessions'''
- '''Consider installing freeglut.''')
- glutMainLoopEvent = glutCheckLoop
-elif glut.HAVE_FREEGLUT:
- glutMainLoopEvent = glut.glutMainLoopEvent
-else:
- raise RuntimeError(
- '''Your glut implementation does not allow interactive sessions. '''
- '''Consider installing freeglut.''')
-
-
-#-----------------------------------------------------------------------------
-# Platform-dependent imports and functions
-#-----------------------------------------------------------------------------
-
-if os.name == 'posix':
- import select
-
- def stdin_ready():
- infds, outfds, erfds = select.select([sys.stdin],[],[],0)
- if infds:
- return True
- else:
- return False
-
-elif sys.platform == 'win32':
- import msvcrt
-
- def stdin_ready():
- return msvcrt.kbhit()
-
-#-----------------------------------------------------------------------------
-# Callback functions
-#-----------------------------------------------------------------------------
-
-def glut_display():
- # Dummy display function
- pass
-
-def glut_idle():
- # Dummy idle function
- pass
-
-def glut_close():
- # Close function only hides the current window
- glut.glutHideWindow()
- glutMainLoopEvent()
-
-def glut_int_handler(signum, frame):
- # Catch sigint and print the defautl message
- signal.signal(signal.SIGINT, signal.default_int_handler)
- print('\nKeyboardInterrupt')
- # Need to reprint the prompt at this stage
-
-
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-def inputhook_glut():
- """Run the pyglet event loop by processing pending events only.
-
- This keeps processing pending events until stdin is ready. After
- processing all pending events, a call to time.sleep is inserted. This is
- needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
- though for best performance.
- """
- # We need to protect against a user pressing Control-C when IPython is
- # idle and this is running. We trap KeyboardInterrupt and pass.
-
- signal.signal(signal.SIGINT, glut_int_handler)
-
- try:
- t = clock()
-
- # Make sure the default window is set after a window has been closed
- if glut.glutGetWindow() == 0:
- glut.glutSetWindow( 1 )
- glutMainLoopEvent()
- return 0
-
- while not stdin_ready():
- glutMainLoopEvent()
- # We need to sleep at this point to keep the idle CPU load
- # low. However, if sleep to long, GUI response is poor. As
- # a compromise, we watch how often GUI events are being processed
- # and switch between a short and long sleep time. Here are some
- # stats useful in helping to tune this.
- # time CPU load
- # 0.001 13%
- # 0.005 3%
- # 0.01 1.5%
- # 0.05 0.5%
- used_time = clock() - t
- if used_time > 10.0:
- # print 'Sleep for 1 s' # dbg
- time.sleep(1.0)
- elif used_time > 0.1:
- # Few GUI events coming in, so we can sleep longer
- # print 'Sleep for 0.05 s' # dbg
- time.sleep(0.05)
- else:
- # Many GUI events coming in, so sleep only very little
- time.sleep(0.001)
- except KeyboardInterrupt:
- pass
- return 0
+# coding: utf-8
+"""
+GLUT Inputhook support functions
+"""
+from __future__ import print_function
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+# GLUT is quite an old library and it is difficult to ensure proper
+# integration within IPython since original GLUT does not allow to handle
+# events one by one. Instead, it requires for the mainloop to be entered
+# and never returned (there is not even a function to exit he
+# mainloop). Fortunately, there are alternatives such as freeglut
+# (available for linux and windows) and the OSX implementation gives
+# access to a glutCheckLoop() function that blocks itself until a new
+# event is received. This means we have to setup the idle callback to
+# ensure we got at least one event that will unblock the function.
+#
+# Furthermore, it is not possible to install these handlers without a window
+# being first created. We choose to make this window invisible. This means that
+# display mode options are set at this level and user won't be able to change
+# them later without modifying the code. This should probably be made available
+# via IPython options system.
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+import os
+import sys
+import time
+import signal
+import OpenGL.GLUT as glut
+import OpenGL.platform as platform
+from timeit import default_timer as clock
+
+#-----------------------------------------------------------------------------
+# Constants
+#-----------------------------------------------------------------------------
+
+# Frame per second : 60
+# Should probably be an IPython option
+glut_fps = 60
+
+
+# Display mode : double buffeed + rgba + depth
+# Should probably be an IPython option
+glut_display_mode = (glut.GLUT_DOUBLE |
+ glut.GLUT_RGBA |
+ glut.GLUT_DEPTH)
+
+glutMainLoopEvent = None
+if sys.platform == 'darwin':
+ try:
+ glutCheckLoop = platform.createBaseFunction(
+ 'glutCheckLoop', dll=platform.GLUT, resultType=None,
+ argTypes=[],
+ doc='glutCheckLoop( ) -> None',
+ argNames=(),
+ )
+ except AttributeError:
+ raise RuntimeError(
+ '''Your glut implementation does not allow interactive sessions'''
+ '''Consider installing freeglut.''')
+ glutMainLoopEvent = glutCheckLoop
+elif glut.HAVE_FREEGLUT:
+ glutMainLoopEvent = glut.glutMainLoopEvent
+else:
+ raise RuntimeError(
+ '''Your glut implementation does not allow interactive sessions. '''
+ '''Consider installing freeglut.''')
+
+
+#-----------------------------------------------------------------------------
+# Platform-dependent imports and functions
+#-----------------------------------------------------------------------------
+
+if os.name == 'posix':
+ import select
+
+ def stdin_ready():
+ infds, outfds, erfds = select.select([sys.stdin],[],[],0)
+ if infds:
+ return True
+ else:
+ return False
+
+elif sys.platform == 'win32':
+ import msvcrt
+
+ def stdin_ready():
+ return msvcrt.kbhit()
+
+#-----------------------------------------------------------------------------
+# Callback functions
+#-----------------------------------------------------------------------------
+
+def glut_display():
+ # Dummy display function
+ pass
+
+def glut_idle():
+ # Dummy idle function
+ pass
+
+def glut_close():
+ # Close function only hides the current window
+ glut.glutHideWindow()
+ glutMainLoopEvent()
+
+def glut_int_handler(signum, frame):
+ # Catch sigint and print the defautl message
+ signal.signal(signal.SIGINT, signal.default_int_handler)
+ print('\nKeyboardInterrupt')
+ # Need to reprint the prompt at this stage
+
+
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+def inputhook_glut():
+ """Run the pyglet event loop by processing pending events only.
+
+ This keeps processing pending events until stdin is ready. After
+ processing all pending events, a call to time.sleep is inserted. This is
+ needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
+ though for best performance.
+ """
+ # We need to protect against a user pressing Control-C when IPython is
+ # idle and this is running. We trap KeyboardInterrupt and pass.
+
+ signal.signal(signal.SIGINT, glut_int_handler)
+
+ try:
+ t = clock()
+
+ # Make sure the default window is set after a window has been closed
+ if glut.glutGetWindow() == 0:
+ glut.glutSetWindow( 1 )
+ glutMainLoopEvent()
+ return 0
+
+ while not stdin_ready():
+ glutMainLoopEvent()
+ # We need to sleep at this point to keep the idle CPU load
+ # low. However, if sleep to long, GUI response is poor. As
+ # a compromise, we watch how often GUI events are being processed
+ # and switch between a short and long sleep time. Here are some
+ # stats useful in helping to tune this.
+ # time CPU load
+ # 0.001 13%
+ # 0.005 3%
+ # 0.01 1.5%
+ # 0.05 0.5%
+ used_time = clock() - t
+ if used_time > 10.0:
+ # print 'Sleep for 1 s' # dbg
+ time.sleep(1.0)
+ elif used_time > 0.1:
+ # Few GUI events coming in, so we can sleep longer
+ # print 'Sleep for 0.05 s' # dbg
+ time.sleep(0.05)
+ else:
+ # Many GUI events coming in, so sleep only very little
+ time.sleep(0.001)
+ except KeyboardInterrupt:
+ pass
+ return 0
diff --git a/contrib/python/ipython/py2/IPython/lib/inputhookgtk.py b/contrib/python/ipython/py2/IPython/lib/inputhookgtk.py
index 52dfac5f52..2b4b656f91 100644
--- a/contrib/python/ipython/py2/IPython/lib/inputhookgtk.py
+++ b/contrib/python/ipython/py2/IPython/lib/inputhookgtk.py
@@ -1,35 +1,35 @@
-# encoding: utf-8
-"""
-Enable pygtk to be used interacive by setting PyOS_InputHook.
-
-Authors: Brian Granger
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import sys
-import gtk, gobject
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-
-def _main_quit(*args, **kwargs):
- gtk.main_quit()
- return False
-
-def inputhook_gtk():
- gobject.io_add_watch(sys.stdin, gobject.IO_IN, _main_quit)
- gtk.main()
- return 0
-
+# encoding: utf-8
+"""
+Enable pygtk to be used interacive by setting PyOS_InputHook.
+
+Authors: Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+import gtk, gobject
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+
+def _main_quit(*args, **kwargs):
+ gtk.main_quit()
+ return False
+
+def inputhook_gtk():
+ gobject.io_add_watch(sys.stdin, gobject.IO_IN, _main_quit)
+ gtk.main()
+ return 0
+
diff --git a/contrib/python/ipython/py2/IPython/lib/inputhookgtk3.py b/contrib/python/ipython/py2/IPython/lib/inputhookgtk3.py
index 6998805145..531f5cae14 100644
--- a/contrib/python/ipython/py2/IPython/lib/inputhookgtk3.py
+++ b/contrib/python/ipython/py2/IPython/lib/inputhookgtk3.py
@@ -1,34 +1,34 @@
-# encoding: utf-8
-"""
-Enable Gtk3 to be used interacive by IPython.
-
-Authors: Thomi Richards
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2012, the IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import sys
-from gi.repository import Gtk, GLib
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-def _main_quit(*args, **kwargs):
- Gtk.main_quit()
- return False
-
-
-def inputhook_gtk3():
- GLib.io_add_watch(sys.stdin, GLib.IO_IN, _main_quit)
- Gtk.main()
- return 0
+# encoding: utf-8
+"""
+Enable Gtk3 to be used interacive by IPython.
+
+Authors: Thomi Richards
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2012, the IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+from gi.repository import Gtk, GLib
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def _main_quit(*args, **kwargs):
+ Gtk.main_quit()
+ return False
+
+
+def inputhook_gtk3():
+ GLib.io_add_watch(sys.stdin, GLib.IO_IN, _main_quit)
+ Gtk.main()
+ return 0
diff --git a/contrib/python/ipython/py2/IPython/lib/inputhookpyglet.py b/contrib/python/ipython/py2/IPython/lib/inputhookpyglet.py
index 3cd209946e..b82fcf5ea7 100644
--- a/contrib/python/ipython/py2/IPython/lib/inputhookpyglet.py
+++ b/contrib/python/ipython/py2/IPython/lib/inputhookpyglet.py
@@ -1,111 +1,111 @@
-# encoding: utf-8
-"""
-Enable pyglet to be used interacive by setting PyOS_InputHook.
-
-Authors
--------
-
-* Nicolas P. Rougier
-* Fernando Perez
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import os
-import sys
-import time
-from timeit import default_timer as clock
-import pyglet
-
-#-----------------------------------------------------------------------------
-# Platform-dependent imports and functions
-#-----------------------------------------------------------------------------
-
-if os.name == 'posix':
- import select
-
- def stdin_ready():
- infds, outfds, erfds = select.select([sys.stdin],[],[],0)
- if infds:
- return True
- else:
- return False
-
-elif sys.platform == 'win32':
- import msvcrt
-
- def stdin_ready():
- return msvcrt.kbhit()
-
-
-# On linux only, window.flip() has a bug that causes an AttributeError on
-# window close. For details, see:
-# http://groups.google.com/group/pyglet-users/browse_thread/thread/47c1aab9aa4a3d23/c22f9e819826799e?#c22f9e819826799e
-
-if sys.platform.startswith('linux'):
- def flip(window):
- try:
- window.flip()
- except AttributeError:
- pass
-else:
- def flip(window):
- window.flip()
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-def inputhook_pyglet():
- """Run the pyglet event loop by processing pending events only.
-
- This keeps processing pending events until stdin is ready. After
- processing all pending events, a call to time.sleep is inserted. This is
- needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
- though for best performance.
- """
- # We need to protect against a user pressing Control-C when IPython is
- # idle and this is running. We trap KeyboardInterrupt and pass.
- try:
- t = clock()
- while not stdin_ready():
- pyglet.clock.tick()
- for window in pyglet.app.windows:
- window.switch_to()
- window.dispatch_events()
- window.dispatch_event('on_draw')
- flip(window)
-
- # We need to sleep at this point to keep the idle CPU load
- # low. However, if sleep to long, GUI response is poor. As
- # a compromise, we watch how often GUI events are being processed
- # and switch between a short and long sleep time. Here are some
- # stats useful in helping to tune this.
- # time CPU load
- # 0.001 13%
- # 0.005 3%
- # 0.01 1.5%
- # 0.05 0.5%
- used_time = clock() - t
- if used_time > 10.0:
- # print 'Sleep for 1 s' # dbg
- time.sleep(1.0)
- elif used_time > 0.1:
- # Few GUI events coming in, so we can sleep longer
- # print 'Sleep for 0.05 s' # dbg
- time.sleep(0.05)
- else:
- # Many GUI events coming in, so sleep only very little
- time.sleep(0.001)
- except KeyboardInterrupt:
- pass
- return 0
+# encoding: utf-8
+"""
+Enable pyglet to be used interacive by setting PyOS_InputHook.
+
+Authors
+-------
+
+* Nicolas P. Rougier
+* Fernando Perez
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+import sys
+import time
+from timeit import default_timer as clock
+import pyglet
+
+#-----------------------------------------------------------------------------
+# Platform-dependent imports and functions
+#-----------------------------------------------------------------------------
+
+if os.name == 'posix':
+ import select
+
+ def stdin_ready():
+ infds, outfds, erfds = select.select([sys.stdin],[],[],0)
+ if infds:
+ return True
+ else:
+ return False
+
+elif sys.platform == 'win32':
+ import msvcrt
+
+ def stdin_ready():
+ return msvcrt.kbhit()
+
+
+# On linux only, window.flip() has a bug that causes an AttributeError on
+# window close. For details, see:
+# http://groups.google.com/group/pyglet-users/browse_thread/thread/47c1aab9aa4a3d23/c22f9e819826799e?#c22f9e819826799e
+
+if sys.platform.startswith('linux'):
+ def flip(window):
+ try:
+ window.flip()
+ except AttributeError:
+ pass
+else:
+ def flip(window):
+ window.flip()
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def inputhook_pyglet():
+ """Run the pyglet event loop by processing pending events only.
+
+ This keeps processing pending events until stdin is ready. After
+ processing all pending events, a call to time.sleep is inserted. This is
+ needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
+ though for best performance.
+ """
+ # We need to protect against a user pressing Control-C when IPython is
+ # idle and this is running. We trap KeyboardInterrupt and pass.
+ try:
+ t = clock()
+ while not stdin_ready():
+ pyglet.clock.tick()
+ for window in pyglet.app.windows:
+ window.switch_to()
+ window.dispatch_events()
+ window.dispatch_event('on_draw')
+ flip(window)
+
+ # We need to sleep at this point to keep the idle CPU load
+ # low. However, if sleep to long, GUI response is poor. As
+ # a compromise, we watch how often GUI events are being processed
+ # and switch between a short and long sleep time. Here are some
+ # stats useful in helping to tune this.
+ # time CPU load
+ # 0.001 13%
+ # 0.005 3%
+ # 0.01 1.5%
+ # 0.05 0.5%
+ used_time = clock() - t
+ if used_time > 10.0:
+ # print 'Sleep for 1 s' # dbg
+ time.sleep(1.0)
+ elif used_time > 0.1:
+ # Few GUI events coming in, so we can sleep longer
+ # print 'Sleep for 0.05 s' # dbg
+ time.sleep(0.05)
+ else:
+ # Many GUI events coming in, so sleep only very little
+ time.sleep(0.001)
+ except KeyboardInterrupt:
+ pass
+ return 0
diff --git a/contrib/python/ipython/py2/IPython/lib/inputhookqt4.py b/contrib/python/ipython/py2/IPython/lib/inputhookqt4.py
index a0d0f9e348..8a83902fc0 100644
--- a/contrib/python/ipython/py2/IPython/lib/inputhookqt4.py
+++ b/contrib/python/ipython/py2/IPython/lib/inputhookqt4.py
@@ -1,180 +1,180 @@
-# -*- coding: utf-8 -*-
-"""
-Qt4's inputhook support function
-
-Author: Christian Boos
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import os
-import signal
-import threading
-
-from IPython.core.interactiveshell import InteractiveShell
-from IPython.external.qt_for_kernel import QtCore, QtGui
-from IPython.lib.inputhook import allow_CTRL_C, ignore_CTRL_C, stdin_ready
-
-#-----------------------------------------------------------------------------
-# Module Globals
-#-----------------------------------------------------------------------------
-
-got_kbdint = False
-sigint_timer = None
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-def create_inputhook_qt4(mgr, app=None):
- """Create an input hook for running the Qt4 application event loop.
-
- Parameters
- ----------
- mgr : an InputHookManager
-
- app : Qt Application, optional.
- Running application to use. If not given, we probe Qt for an
- existing application object, and create a new one if none is found.
-
- Returns
- -------
- A pair consisting of a Qt Application (either the one given or the
- one found or created) and a inputhook.
-
- Notes
- -----
- We use a custom input hook instead of PyQt4's default one, as it
- interacts better with the readline packages (issue #481).
-
- The inputhook function works in tandem with a 'pre_prompt_hook'
- which automatically restores the hook as an inputhook in case the
- latter has been temporarily disabled after having intercepted a
- KeyboardInterrupt.
- """
-
- if app is None:
- app = QtCore.QCoreApplication.instance()
- if app is None:
- app = QtGui.QApplication([" "])
-
- # Re-use previously created inputhook if any
- ip = InteractiveShell.instance()
- if hasattr(ip, '_inputhook_qt4'):
- return app, ip._inputhook_qt4
-
- # Otherwise create the inputhook_qt4/preprompthook_qt4 pair of
- # hooks (they both share the got_kbdint flag)
-
- def inputhook_qt4():
- """PyOS_InputHook python hook for Qt4.
-
- Process pending Qt events and if there's no pending keyboard
- input, spend a short slice of time (50ms) running the Qt event
- loop.
-
- As a Python ctypes callback can't raise an exception, we catch
- the KeyboardInterrupt and temporarily deactivate the hook,
- which will let a *second* CTRL+C be processed normally and go
- back to a clean prompt line.
- """
- try:
- allow_CTRL_C()
- app = QtCore.QCoreApplication.instance()
- if not app: # shouldn't happen, but safer if it happens anyway...
- return 0
- app.processEvents(QtCore.QEventLoop.AllEvents, 300)
- if not stdin_ready():
- # Generally a program would run QCoreApplication::exec()
- # from main() to enter and process the Qt event loop until
- # quit() or exit() is called and the program terminates.
- #
- # For our input hook integration, we need to repeatedly
- # enter and process the Qt event loop for only a short
- # amount of time (say 50ms) to ensure that Python stays
- # responsive to other user inputs.
- #
- # A naive approach would be to repeatedly call
- # QCoreApplication::exec(), using a timer to quit after a
- # short amount of time. Unfortunately, QCoreApplication
- # emits an aboutToQuit signal before stopping, which has
- # the undesirable effect of closing all modal windows.
- #
- # To work around this problem, we instead create a
- # QEventLoop and call QEventLoop::exec(). Other than
- # setting some state variables which do not seem to be
- # used anywhere, the only thing QCoreApplication adds is
- # the aboutToQuit signal which is precisely what we are
- # trying to avoid.
- timer = QtCore.QTimer()
- event_loop = QtCore.QEventLoop()
- timer.timeout.connect(event_loop.quit)
- while not stdin_ready():
- timer.start(50)
- event_loop.exec_()
- timer.stop()
- except KeyboardInterrupt:
- global got_kbdint, sigint_timer
-
- ignore_CTRL_C()
- got_kbdint = True
- mgr.clear_inputhook()
-
- # This generates a second SIGINT so the user doesn't have to
- # press CTRL+C twice to get a clean prompt.
- #
- # Since we can't catch the resulting KeyboardInterrupt here
- # (because this is a ctypes callback), we use a timer to
- # generate the SIGINT after we leave this callback.
- #
- # Unfortunately this doesn't work on Windows (SIGINT kills
- # Python and CTRL_C_EVENT doesn't work).
- if(os.name == 'posix'):
- pid = os.getpid()
- if(not sigint_timer):
- sigint_timer = threading.Timer(.01, os.kill,
- args=[pid, signal.SIGINT] )
- sigint_timer.start()
- else:
- print("\nKeyboardInterrupt - Ctrl-C again for new prompt")
-
-
- except: # NO exceptions are allowed to escape from a ctypes callback
- ignore_CTRL_C()
- from traceback import print_exc
- print_exc()
- print("Got exception from inputhook_qt4, unregistering.")
- mgr.clear_inputhook()
- finally:
- allow_CTRL_C()
- return 0
-
- def preprompthook_qt4(ishell):
- """'pre_prompt_hook' used to restore the Qt4 input hook
-
- (in case the latter was temporarily deactivated after a
- CTRL+C)
- """
- global got_kbdint, sigint_timer
-
- if(sigint_timer):
- sigint_timer.cancel()
- sigint_timer = None
-
- if got_kbdint:
- mgr.set_inputhook(inputhook_qt4)
- got_kbdint = False
-
- ip._inputhook_qt4 = inputhook_qt4
- ip.set_hook('pre_prompt_hook', preprompthook_qt4)
-
- return app, inputhook_qt4
+# -*- coding: utf-8 -*-
+"""
+Qt4's inputhook support function
+
+Author: Christian Boos
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+import signal
+import threading
+
+from IPython.core.interactiveshell import InteractiveShell
+from IPython.external.qt_for_kernel import QtCore, QtGui
+from IPython.lib.inputhook import allow_CTRL_C, ignore_CTRL_C, stdin_ready
+
+#-----------------------------------------------------------------------------
+# Module Globals
+#-----------------------------------------------------------------------------
+
+got_kbdint = False
+sigint_timer = None
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def create_inputhook_qt4(mgr, app=None):
+ """Create an input hook for running the Qt4 application event loop.
+
+ Parameters
+ ----------
+ mgr : an InputHookManager
+
+ app : Qt Application, optional.
+ Running application to use. If not given, we probe Qt for an
+ existing application object, and create a new one if none is found.
+
+ Returns
+ -------
+ A pair consisting of a Qt Application (either the one given or the
+ one found or created) and a inputhook.
+
+ Notes
+ -----
+ We use a custom input hook instead of PyQt4's default one, as it
+ interacts better with the readline packages (issue #481).
+
+ The inputhook function works in tandem with a 'pre_prompt_hook'
+ which automatically restores the hook as an inputhook in case the
+ latter has been temporarily disabled after having intercepted a
+ KeyboardInterrupt.
+ """
+
+ if app is None:
+ app = QtCore.QCoreApplication.instance()
+ if app is None:
+ app = QtGui.QApplication([" "])
+
+ # Re-use previously created inputhook if any
+ ip = InteractiveShell.instance()
+ if hasattr(ip, '_inputhook_qt4'):
+ return app, ip._inputhook_qt4
+
+ # Otherwise create the inputhook_qt4/preprompthook_qt4 pair of
+ # hooks (they both share the got_kbdint flag)
+
+ def inputhook_qt4():
+ """PyOS_InputHook python hook for Qt4.
+
+ Process pending Qt events and if there's no pending keyboard
+ input, spend a short slice of time (50ms) running the Qt event
+ loop.
+
+ As a Python ctypes callback can't raise an exception, we catch
+ the KeyboardInterrupt and temporarily deactivate the hook,
+ which will let a *second* CTRL+C be processed normally and go
+ back to a clean prompt line.
+ """
+ try:
+ allow_CTRL_C()
+ app = QtCore.QCoreApplication.instance()
+ if not app: # shouldn't happen, but safer if it happens anyway...
+ return 0
+ app.processEvents(QtCore.QEventLoop.AllEvents, 300)
+ if not stdin_ready():
+ # Generally a program would run QCoreApplication::exec()
+ # from main() to enter and process the Qt event loop until
+ # quit() or exit() is called and the program terminates.
+ #
+ # For our input hook integration, we need to repeatedly
+ # enter and process the Qt event loop for only a short
+ # amount of time (say 50ms) to ensure that Python stays
+ # responsive to other user inputs.
+ #
+ # A naive approach would be to repeatedly call
+ # QCoreApplication::exec(), using a timer to quit after a
+ # short amount of time. Unfortunately, QCoreApplication
+ # emits an aboutToQuit signal before stopping, which has
+ # the undesirable effect of closing all modal windows.
+ #
+ # To work around this problem, we instead create a
+ # QEventLoop and call QEventLoop::exec(). Other than
+ # setting some state variables which do not seem to be
+ # used anywhere, the only thing QCoreApplication adds is
+ # the aboutToQuit signal which is precisely what we are
+ # trying to avoid.
+ timer = QtCore.QTimer()
+ event_loop = QtCore.QEventLoop()
+ timer.timeout.connect(event_loop.quit)
+ while not stdin_ready():
+ timer.start(50)
+ event_loop.exec_()
+ timer.stop()
+ except KeyboardInterrupt:
+ global got_kbdint, sigint_timer
+
+ ignore_CTRL_C()
+ got_kbdint = True
+ mgr.clear_inputhook()
+
+ # This generates a second SIGINT so the user doesn't have to
+ # press CTRL+C twice to get a clean prompt.
+ #
+ # Since we can't catch the resulting KeyboardInterrupt here
+ # (because this is a ctypes callback), we use a timer to
+ # generate the SIGINT after we leave this callback.
+ #
+ # Unfortunately this doesn't work on Windows (SIGINT kills
+ # Python and CTRL_C_EVENT doesn't work).
+ if(os.name == 'posix'):
+ pid = os.getpid()
+ if(not sigint_timer):
+ sigint_timer = threading.Timer(.01, os.kill,
+ args=[pid, signal.SIGINT] )
+ sigint_timer.start()
+ else:
+ print("\nKeyboardInterrupt - Ctrl-C again for new prompt")
+
+
+ except: # NO exceptions are allowed to escape from a ctypes callback
+ ignore_CTRL_C()
+ from traceback import print_exc
+ print_exc()
+ print("Got exception from inputhook_qt4, unregistering.")
+ mgr.clear_inputhook()
+ finally:
+ allow_CTRL_C()
+ return 0
+
+ def preprompthook_qt4(ishell):
+ """'pre_prompt_hook' used to restore the Qt4 input hook
+
+ (in case the latter was temporarily deactivated after a
+ CTRL+C)
+ """
+ global got_kbdint, sigint_timer
+
+ if(sigint_timer):
+ sigint_timer.cancel()
+ sigint_timer = None
+
+ if got_kbdint:
+ mgr.set_inputhook(inputhook_qt4)
+ got_kbdint = False
+
+ ip._inputhook_qt4 = inputhook_qt4
+ ip.set_hook('pre_prompt_hook', preprompthook_qt4)
+
+ return app, inputhook_qt4
diff --git a/contrib/python/ipython/py2/IPython/lib/inputhookwx.py b/contrib/python/ipython/py2/IPython/lib/inputhookwx.py
index e054c6d915..3aac526131 100644
--- a/contrib/python/ipython/py2/IPython/lib/inputhookwx.py
+++ b/contrib/python/ipython/py2/IPython/lib/inputhookwx.py
@@ -1,167 +1,167 @@
-# encoding: utf-8
-
-"""
-Enable wxPython to be used interacive by setting PyOS_InputHook.
-
-Authors: Robin Dunn, Brian Granger, Ondrej Certik
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import sys
-import signal
-import time
-from timeit import default_timer as clock
-import wx
-
-from IPython.lib.inputhook import stdin_ready
-
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-def inputhook_wx1():
- """Run the wx event loop by processing pending events only.
-
- This approach seems to work, but its performance is not great as it
- relies on having PyOS_InputHook called regularly.
- """
- try:
- app = wx.GetApp()
- if app is not None:
- assert wx.Thread_IsMain()
-
- # Make a temporary event loop and process system events until
- # there are no more waiting, then allow idle events (which
- # will also deal with pending or posted wx events.)
- evtloop = wx.EventLoop()
- ea = wx.EventLoopActivator(evtloop)
- while evtloop.Pending():
- evtloop.Dispatch()
- app.ProcessIdle()
- del ea
- except KeyboardInterrupt:
- pass
- return 0
-
-class EventLoopTimer(wx.Timer):
-
- def __init__(self, func):
- self.func = func
- wx.Timer.__init__(self)
-
- def Notify(self):
- self.func()
-
-class EventLoopRunner(object):
-
- def Run(self, time):
- self.evtloop = wx.EventLoop()
- self.timer = EventLoopTimer(self.check_stdin)
- self.timer.Start(time)
- self.evtloop.Run()
-
- def check_stdin(self):
- if stdin_ready():
- self.timer.Stop()
- self.evtloop.Exit()
-
-def inputhook_wx2():
- """Run the wx event loop, polling for stdin.
-
- This version runs the wx eventloop for an undetermined amount of time,
- during which it periodically checks to see if anything is ready on
- stdin. If anything is ready on stdin, the event loop exits.
-
- The argument to elr.Run controls how often the event loop looks at stdin.
- This determines the responsiveness at the keyboard. A setting of 1000
- enables a user to type at most 1 char per second. I have found that a
- setting of 10 gives good keyboard response. We can shorten it further,
- but eventually performance would suffer from calling select/kbhit too
- often.
- """
- try:
- app = wx.GetApp()
- if app is not None:
- assert wx.Thread_IsMain()
- elr = EventLoopRunner()
- # As this time is made shorter, keyboard response improves, but idle
- # CPU load goes up. 10 ms seems like a good compromise.
- elr.Run(time=10) # CHANGE time here to control polling interval
- except KeyboardInterrupt:
- pass
- return 0
-
-def inputhook_wx3():
- """Run the wx event loop by processing pending events only.
-
- This is like inputhook_wx1, but it keeps processing pending events
- until stdin is ready. After processing all pending events, a call to
- time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
- This sleep time should be tuned though for best performance.
- """
- # We need to protect against a user pressing Control-C when IPython is
- # idle and this is running. We trap KeyboardInterrupt and pass.
- try:
- app = wx.GetApp()
- if app is not None:
- assert wx.Thread_IsMain()
-
- # The import of wx on Linux sets the handler for signal.SIGINT
- # to 0. This is a bug in wx or gtk. We fix by just setting it
- # back to the Python default.
- if not callable(signal.getsignal(signal.SIGINT)):
- signal.signal(signal.SIGINT, signal.default_int_handler)
-
- evtloop = wx.EventLoop()
- ea = wx.EventLoopActivator(evtloop)
- t = clock()
- while not stdin_ready():
- while evtloop.Pending():
- t = clock()
- evtloop.Dispatch()
- app.ProcessIdle()
- # We need to sleep at this point to keep the idle CPU load
- # low. However, if sleep to long, GUI response is poor. As
- # a compromise, we watch how often GUI events are being processed
- # and switch between a short and long sleep time. Here are some
- # stats useful in helping to tune this.
- # time CPU load
- # 0.001 13%
- # 0.005 3%
- # 0.01 1.5%
- # 0.05 0.5%
- used_time = clock() - t
- if used_time > 10.0:
- # print 'Sleep for 1 s' # dbg
- time.sleep(1.0)
- elif used_time > 0.1:
- # Few GUI events coming in, so we can sleep longer
- # print 'Sleep for 0.05 s' # dbg
- time.sleep(0.05)
- else:
- # Many GUI events coming in, so sleep only very little
- time.sleep(0.001)
- del ea
- except KeyboardInterrupt:
- pass
- return 0
-
-if sys.platform == 'darwin':
- # On OSX, evtloop.Pending() always returns True, regardless of there being
- # any events pending. As such we can't use implementations 1 or 3 of the
- # inputhook as those depend on a pending/dispatch loop.
- inputhook_wx = inputhook_wx2
-else:
- # This is our default implementation
- inputhook_wx = inputhook_wx3
+# encoding: utf-8
+
+"""
+Enable wxPython to be used interacive by setting PyOS_InputHook.
+
+Authors: Robin Dunn, Brian Granger, Ondrej Certik
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+import signal
+import time
+from timeit import default_timer as clock
+import wx
+
+from IPython.lib.inputhook import stdin_ready
+
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def inputhook_wx1():
+ """Run the wx event loop by processing pending events only.
+
+ This approach seems to work, but its performance is not great as it
+ relies on having PyOS_InputHook called regularly.
+ """
+ try:
+ app = wx.GetApp()
+ if app is not None:
+ assert wx.Thread_IsMain()
+
+ # Make a temporary event loop and process system events until
+ # there are no more waiting, then allow idle events (which
+ # will also deal with pending or posted wx events.)
+ evtloop = wx.EventLoop()
+ ea = wx.EventLoopActivator(evtloop)
+ while evtloop.Pending():
+ evtloop.Dispatch()
+ app.ProcessIdle()
+ del ea
+ except KeyboardInterrupt:
+ pass
+ return 0
+
+class EventLoopTimer(wx.Timer):
+
+ def __init__(self, func):
+ self.func = func
+ wx.Timer.__init__(self)
+
+ def Notify(self):
+ self.func()
+
+class EventLoopRunner(object):
+
+ def Run(self, time):
+ self.evtloop = wx.EventLoop()
+ self.timer = EventLoopTimer(self.check_stdin)
+ self.timer.Start(time)
+ self.evtloop.Run()
+
+ def check_stdin(self):
+ if stdin_ready():
+ self.timer.Stop()
+ self.evtloop.Exit()
+
+def inputhook_wx2():
+ """Run the wx event loop, polling for stdin.
+
+ This version runs the wx eventloop for an undetermined amount of time,
+ during which it periodically checks to see if anything is ready on
+ stdin. If anything is ready on stdin, the event loop exits.
+
+ The argument to elr.Run controls how often the event loop looks at stdin.
+ This determines the responsiveness at the keyboard. A setting of 1000
+ enables a user to type at most 1 char per second. I have found that a
+ setting of 10 gives good keyboard response. We can shorten it further,
+ but eventually performance would suffer from calling select/kbhit too
+ often.
+ """
+ try:
+ app = wx.GetApp()
+ if app is not None:
+ assert wx.Thread_IsMain()
+ elr = EventLoopRunner()
+ # As this time is made shorter, keyboard response improves, but idle
+ # CPU load goes up. 10 ms seems like a good compromise.
+ elr.Run(time=10) # CHANGE time here to control polling interval
+ except KeyboardInterrupt:
+ pass
+ return 0
+
+def inputhook_wx3():
+ """Run the wx event loop by processing pending events only.
+
+ This is like inputhook_wx1, but it keeps processing pending events
+ until stdin is ready. After processing all pending events, a call to
+ time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
+ This sleep time should be tuned though for best performance.
+ """
+ # We need to protect against a user pressing Control-C when IPython is
+ # idle and this is running. We trap KeyboardInterrupt and pass.
+ try:
+ app = wx.GetApp()
+ if app is not None:
+ assert wx.Thread_IsMain()
+
+ # The import of wx on Linux sets the handler for signal.SIGINT
+ # to 0. This is a bug in wx or gtk. We fix by just setting it
+ # back to the Python default.
+ if not callable(signal.getsignal(signal.SIGINT)):
+ signal.signal(signal.SIGINT, signal.default_int_handler)
+
+ evtloop = wx.EventLoop()
+ ea = wx.EventLoopActivator(evtloop)
+ t = clock()
+ while not stdin_ready():
+ while evtloop.Pending():
+ t = clock()
+ evtloop.Dispatch()
+ app.ProcessIdle()
+ # We need to sleep at this point to keep the idle CPU load
+ # low. However, if sleep to long, GUI response is poor. As
+ # a compromise, we watch how often GUI events are being processed
+ # and switch between a short and long sleep time. Here are some
+ # stats useful in helping to tune this.
+ # time CPU load
+ # 0.001 13%
+ # 0.005 3%
+ # 0.01 1.5%
+ # 0.05 0.5%
+ used_time = clock() - t
+ if used_time > 10.0:
+ # print 'Sleep for 1 s' # dbg
+ time.sleep(1.0)
+ elif used_time > 0.1:
+ # Few GUI events coming in, so we can sleep longer
+ # print 'Sleep for 0.05 s' # dbg
+ time.sleep(0.05)
+ else:
+ # Many GUI events coming in, so sleep only very little
+ time.sleep(0.001)
+ del ea
+ except KeyboardInterrupt:
+ pass
+ return 0
+
+if sys.platform == 'darwin':
+ # On OSX, evtloop.Pending() always returns True, regardless of there being
+ # any events pending. As such we can't use implementations 1 or 3 of the
+ # inputhook as those depend on a pending/dispatch loop.
+ inputhook_wx = inputhook_wx2
+else:
+ # This is our default implementation
+ inputhook_wx = inputhook_wx3
diff --git a/contrib/python/ipython/py2/IPython/lib/kernel.py b/contrib/python/ipython/py2/IPython/lib/kernel.py
index 7de2ea4b12..af9827667f 100644
--- a/contrib/python/ipython/py2/IPython/lib/kernel.py
+++ b/contrib/python/ipython/py2/IPython/lib/kernel.py
@@ -1,13 +1,13 @@
-"""[DEPRECATED] Utilities for connecting to kernels
-
-Moved to IPython.kernel.connect
-"""
-
-import warnings
-warnings.warn("IPython.lib.kernel moved to IPython.kernel.connect in IPython 1.0,"
- " and will be removed in IPython 6.0.",
- DeprecationWarning
-)
-
-from ipykernel.connect import *
-
+"""[DEPRECATED] Utilities for connecting to kernels
+
+Moved to IPython.kernel.connect
+"""
+
+import warnings
+warnings.warn("IPython.lib.kernel moved to IPython.kernel.connect in IPython 1.0,"
+ " and will be removed in IPython 6.0.",
+ DeprecationWarning
+)
+
+from ipykernel.connect import *
+
diff --git a/contrib/python/ipython/py2/IPython/lib/latextools.py b/contrib/python/ipython/py2/IPython/lib/latextools.py
index 4df8e562ed..c3230dd489 100644
--- a/contrib/python/ipython/py2/IPython/lib/latextools.py
+++ b/contrib/python/ipython/py2/IPython/lib/latextools.py
@@ -1,111 +1,111 @@
-# -*- coding: utf-8 -*-
-"""Tools for handling LaTeX."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from io import BytesIO, open
-import os
-import tempfile
-import shutil
-import subprocess
-
-from IPython.utils.process import find_cmd, FindCmdError
-from traitlets.config import get_config
-from traitlets.config.configurable import SingletonConfigurable
-from traitlets import List, Bool, Unicode
-from IPython.utils.py3compat import cast_unicode, cast_unicode_py2 as u, PY3
-
-try: # Py3
- from base64 import encodebytes
-except ImportError: # Py2
- from base64 import encodestring as encodebytes
-
-
-class LaTeXTool(SingletonConfigurable):
- """An object to store configuration of the LaTeX tool."""
- def _config_default(self):
- return get_config()
-
- backends = List(
- Unicode(), ["matplotlib", "dvipng"],
- help="Preferred backend to draw LaTeX math equations. "
- "Backends in the list are checked one by one and the first "
- "usable one is used. Note that `matplotlib` backend "
- "is usable only for inline style equations. To draw "
- "display style equations, `dvipng` backend must be specified. ",
- # It is a List instead of Enum, to make configuration more
- # flexible. For example, to use matplotlib mainly but dvipng
- # for display style, the default ["matplotlib", "dvipng"] can
- # be used. To NOT use dvipng so that other repr such as
- # unicode pretty printing is used, you can use ["matplotlib"].
+# -*- coding: utf-8 -*-
+"""Tools for handling LaTeX."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from io import BytesIO, open
+import os
+import tempfile
+import shutil
+import subprocess
+
+from IPython.utils.process import find_cmd, FindCmdError
+from traitlets.config import get_config
+from traitlets.config.configurable import SingletonConfigurable
+from traitlets import List, Bool, Unicode
+from IPython.utils.py3compat import cast_unicode, cast_unicode_py2 as u, PY3
+
+try: # Py3
+ from base64 import encodebytes
+except ImportError: # Py2
+ from base64 import encodestring as encodebytes
+
+
+class LaTeXTool(SingletonConfigurable):
+ """An object to store configuration of the LaTeX tool."""
+ def _config_default(self):
+ return get_config()
+
+ backends = List(
+ Unicode(), ["matplotlib", "dvipng"],
+ help="Preferred backend to draw LaTeX math equations. "
+ "Backends in the list are checked one by one and the first "
+ "usable one is used. Note that `matplotlib` backend "
+ "is usable only for inline style equations. To draw "
+ "display style equations, `dvipng` backend must be specified. ",
+ # It is a List instead of Enum, to make configuration more
+ # flexible. For example, to use matplotlib mainly but dvipng
+ # for display style, the default ["matplotlib", "dvipng"] can
+ # be used. To NOT use dvipng so that other repr such as
+ # unicode pretty printing is used, you can use ["matplotlib"].
).tag(config=True)
-
- use_breqn = Bool(
- True,
- help="Use breqn.sty to automatically break long equations. "
- "This configuration takes effect only for dvipng backend.",
+
+ use_breqn = Bool(
+ True,
+ help="Use breqn.sty to automatically break long equations. "
+ "This configuration takes effect only for dvipng backend.",
).tag(config=True)
-
- packages = List(
- ['amsmath', 'amsthm', 'amssymb', 'bm'],
- help="A list of packages to use for dvipng backend. "
- "'breqn' will be automatically appended when use_breqn=True.",
+
+ packages = List(
+ ['amsmath', 'amsthm', 'amssymb', 'bm'],
+ help="A list of packages to use for dvipng backend. "
+ "'breqn' will be automatically appended when use_breqn=True.",
).tag(config=True)
-
- preamble = Unicode(
- help="Additional preamble to use when generating LaTeX source "
- "for dvipng backend.",
+
+ preamble = Unicode(
+ help="Additional preamble to use when generating LaTeX source "
+ "for dvipng backend.",
).tag(config=True)
-
-
-def latex_to_png(s, encode=False, backend=None, wrap=False):
- """Render a LaTeX string to PNG.
-
- Parameters
- ----------
+
+
+def latex_to_png(s, encode=False, backend=None, wrap=False):
+ """Render a LaTeX string to PNG.
+
+ Parameters
+ ----------
s : str
- The raw string containing valid inline LaTeX.
- encode : bool, optional
- Should the PNG data base64 encoded to make it JSON'able.
- backend : {matplotlib, dvipng}
- Backend for producing PNG data.
- wrap : bool
- If true, Automatically wrap `s` as a LaTeX equation.
-
- None is returned when the backend cannot be used.
-
- """
- s = cast_unicode(s)
- allowed_backends = LaTeXTool.instance().backends
- if backend is None:
- backend = allowed_backends[0]
- if backend not in allowed_backends:
- return None
- if backend == 'matplotlib':
- f = latex_to_png_mpl
- elif backend == 'dvipng':
- f = latex_to_png_dvipng
- else:
- raise ValueError('No such backend {0}'.format(backend))
- bin_data = f(s, wrap)
- if encode and bin_data:
- bin_data = encodebytes(bin_data)
- return bin_data
-
-
-def latex_to_png_mpl(s, wrap):
- try:
- from matplotlib import mathtext
+ The raw string containing valid inline LaTeX.
+ encode : bool, optional
+ Should the PNG data base64 encoded to make it JSON'able.
+ backend : {matplotlib, dvipng}
+ Backend for producing PNG data.
+ wrap : bool
+ If true, Automatically wrap `s` as a LaTeX equation.
+
+ None is returned when the backend cannot be used.
+
+ """
+ s = cast_unicode(s)
+ allowed_backends = LaTeXTool.instance().backends
+ if backend is None:
+ backend = allowed_backends[0]
+ if backend not in allowed_backends:
+ return None
+ if backend == 'matplotlib':
+ f = latex_to_png_mpl
+ elif backend == 'dvipng':
+ f = latex_to_png_dvipng
+ else:
+ raise ValueError('No such backend {0}'.format(backend))
+ bin_data = f(s, wrap)
+ if encode and bin_data:
+ bin_data = encodebytes(bin_data)
+ return bin_data
+
+
+def latex_to_png_mpl(s, wrap):
+ try:
+ from matplotlib import mathtext
from pyparsing import ParseFatalException
- except ImportError:
- return None
-
- # mpl mathtext doesn't support display math, force inline
- s = s.replace('$$', '$')
- if wrap:
- s = u'${0}$'.format(s)
-
+ except ImportError:
+ return None
+
+ # mpl mathtext doesn't support display math, force inline
+ s = s.replace('$$', '$')
+ if wrap:
+ s = u'${0}$'.format(s)
+
try:
mt = mathtext.MathTextParser('bitmap')
f = BytesIO()
@@ -113,93 +113,93 @@ def latex_to_png_mpl(s, wrap):
return f.getvalue()
except (ValueError, RuntimeError, ParseFatalException):
return None
-
-
-def latex_to_png_dvipng(s, wrap):
- try:
- find_cmd('latex')
- find_cmd('dvipng')
- except FindCmdError:
- return None
- try:
- workdir = tempfile.mkdtemp()
- tmpfile = os.path.join(workdir, "tmp.tex")
- dvifile = os.path.join(workdir, "tmp.dvi")
- outfile = os.path.join(workdir, "tmp.png")
-
- with open(tmpfile, "w", encoding='utf8') as f:
- f.writelines(genelatex(s, wrap))
-
- with open(os.devnull, 'wb') as devnull:
- subprocess.check_call(
- ["latex", "-halt-on-error", "-interaction", "batchmode", tmpfile],
- cwd=workdir, stdout=devnull, stderr=devnull)
-
- subprocess.check_call(
- ["dvipng", "-T", "tight", "-x", "1500", "-z", "9",
- "-bg", "transparent", "-o", outfile, dvifile], cwd=workdir,
- stdout=devnull, stderr=devnull)
-
- with open(outfile, "rb") as f:
- return f.read()
+
+
+def latex_to_png_dvipng(s, wrap):
+ try:
+ find_cmd('latex')
+ find_cmd('dvipng')
+ except FindCmdError:
+ return None
+ try:
+ workdir = tempfile.mkdtemp()
+ tmpfile = os.path.join(workdir, "tmp.tex")
+ dvifile = os.path.join(workdir, "tmp.dvi")
+ outfile = os.path.join(workdir, "tmp.png")
+
+ with open(tmpfile, "w", encoding='utf8') as f:
+ f.writelines(genelatex(s, wrap))
+
+ with open(os.devnull, 'wb') as devnull:
+ subprocess.check_call(
+ ["latex", "-halt-on-error", "-interaction", "batchmode", tmpfile],
+ cwd=workdir, stdout=devnull, stderr=devnull)
+
+ subprocess.check_call(
+ ["dvipng", "-T", "tight", "-x", "1500", "-z", "9",
+ "-bg", "transparent", "-o", outfile, dvifile], cwd=workdir,
+ stdout=devnull, stderr=devnull)
+
+ with open(outfile, "rb") as f:
+ return f.read()
except subprocess.CalledProcessError:
return None
- finally:
- shutil.rmtree(workdir)
-
-
-def kpsewhich(filename):
- """Invoke kpsewhich command with an argument `filename`."""
- try:
- find_cmd("kpsewhich")
- proc = subprocess.Popen(
- ["kpsewhich", filename],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (stdout, stderr) = proc.communicate()
- return stdout.strip().decode('utf8', 'replace')
- except FindCmdError:
- pass
-
-
-def genelatex(body, wrap):
- """Generate LaTeX document for dvipng backend."""
- lt = LaTeXTool.instance()
- breqn = wrap and lt.use_breqn and kpsewhich("breqn.sty")
- yield u(r'\documentclass{article}')
- packages = lt.packages
- if breqn:
- packages = packages + ['breqn']
- for pack in packages:
- yield u(r'\usepackage{{{0}}}'.format(pack))
- yield u(r'\pagestyle{empty}')
- if lt.preamble:
- yield lt.preamble
- yield u(r'\begin{document}')
- if breqn:
- yield u(r'\begin{dmath*}')
- yield body
- yield u(r'\end{dmath*}')
- elif wrap:
- yield u'$${0}$$'.format(body)
- else:
- yield body
- yield u'\end{document}'
-
-
-_data_uri_template_png = u"""<img src="data:image/png;base64,%s" alt=%s />"""
-
-def latex_to_html(s, alt='image'):
- """Render LaTeX to HTML with embedded PNG data using data URIs.
-
- Parameters
- ----------
- s : str
- The raw string containing valid inline LateX.
- alt : str
- The alt text to use for the HTML.
- """
- base64_data = latex_to_png(s, encode=True).decode('ascii')
- if base64_data:
- return _data_uri_template_png % (base64_data, alt)
-
-
+ finally:
+ shutil.rmtree(workdir)
+
+
+def kpsewhich(filename):
+ """Invoke kpsewhich command with an argument `filename`."""
+ try:
+ find_cmd("kpsewhich")
+ proc = subprocess.Popen(
+ ["kpsewhich", filename],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate()
+ return stdout.strip().decode('utf8', 'replace')
+ except FindCmdError:
+ pass
+
+
+def genelatex(body, wrap):
+ """Generate LaTeX document for dvipng backend."""
+ lt = LaTeXTool.instance()
+ breqn = wrap and lt.use_breqn and kpsewhich("breqn.sty")
+ yield u(r'\documentclass{article}')
+ packages = lt.packages
+ if breqn:
+ packages = packages + ['breqn']
+ for pack in packages:
+ yield u(r'\usepackage{{{0}}}'.format(pack))
+ yield u(r'\pagestyle{empty}')
+ if lt.preamble:
+ yield lt.preamble
+ yield u(r'\begin{document}')
+ if breqn:
+ yield u(r'\begin{dmath*}')
+ yield body
+ yield u(r'\end{dmath*}')
+ elif wrap:
+ yield u'$${0}$$'.format(body)
+ else:
+ yield body
+ yield u'\end{document}'
+
+
+_data_uri_template_png = u"""<img src="data:image/png;base64,%s" alt=%s />"""
+
+def latex_to_html(s, alt='image'):
+ """Render LaTeX to HTML with embedded PNG data using data URIs.
+
+ Parameters
+ ----------
+ s : str
+ The raw string containing valid inline LateX.
+ alt : str
+ The alt text to use for the HTML.
+ """
+ base64_data = latex_to_png(s, encode=True).decode('ascii')
+ if base64_data:
+ return _data_uri_template_png % (base64_data, alt)
+
+
diff --git a/contrib/python/ipython/py2/IPython/lib/lexers.py b/contrib/python/ipython/py2/IPython/lib/lexers.py
index ec43856115..9160ae1245 100644
--- a/contrib/python/ipython/py2/IPython/lib/lexers.py
+++ b/contrib/python/ipython/py2/IPython/lib/lexers.py
@@ -1,517 +1,517 @@
-# -*- coding: utf-8 -*-
-"""
-Defines a variety of Pygments lexers for highlighting IPython code.
-
-This includes:
-
- IPythonLexer, IPython3Lexer
- Lexers for pure IPython (python + magic/shell commands)
-
- IPythonPartialTracebackLexer, IPythonTracebackLexer
- Supports 2.x and 3.x via keyword `python3`. The partial traceback
- lexer reads everything but the Python code appearing in a traceback.
- The full lexer combines the partial lexer with an IPython lexer.
-
- IPythonConsoleLexer
- A lexer for IPython console sessions, with support for tracebacks.
-
- IPyLexer
- A friendly lexer which examines the first line of text and from it,
- decides whether to use an IPython lexer or an IPython console lexer.
- This is probably the only lexer that needs to be explicitly added
- to Pygments.
-
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, the IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-# Standard library
-import re
-
-# Third party
+# -*- coding: utf-8 -*-
+"""
+Defines a variety of Pygments lexers for highlighting IPython code.
+
+This includes:
+
+ IPythonLexer, IPython3Lexer
+ Lexers for pure IPython (python + magic/shell commands)
+
+ IPythonPartialTracebackLexer, IPythonTracebackLexer
+ Supports 2.x and 3.x via keyword `python3`. The partial traceback
+ lexer reads everything but the Python code appearing in a traceback.
+ The full lexer combines the partial lexer with an IPython lexer.
+
+ IPythonConsoleLexer
+ A lexer for IPython console sessions, with support for tracebacks.
+
+ IPyLexer
+ A friendly lexer which examines the first line of text and from it,
+ decides whether to use an IPython lexer or an IPython console lexer.
+ This is probably the only lexer that needs to be explicitly added
+ to Pygments.
+
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, the IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+# Standard library
+import re
+
+# Third party
from pygments.lexers import BashLexer, Python3Lexer
try:
# PythonLexer was renamed to Python2Lexer in pygments 2.5
from pygments.lexers import Python2Lexer
except ImportError:
from pygments.lexers import PythonLexer as Python2Lexer
-from pygments.lexer import (
- Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using,
-)
-from pygments.token import (
+from pygments.lexer import (
+ Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using,
+)
+from pygments.token import (
Generic, Keyword, Literal, Name, Operator, Other, Text, Error,
-)
-from pygments.util import get_bool_opt
-
-# Local
-
-line_re = re.compile('.*?\n')
-
-__all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer',
- 'IPythonPartialTracebackLexer', 'IPythonTracebackLexer',
- 'IPythonConsoleLexer', 'IPyLexer']
-
-ipython_tokens = [
- (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
- (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))),
- (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)),
- (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)),
- (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword,
- using(BashLexer), Text)),
- (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)),
- (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
- (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
- (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)),
- (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)),
-]
-
-def build_ipy_lexer(python3):
- """Builds IPython lexers depending on the value of `python3`.
-
- The lexer inherits from an appropriate Python lexer and then adds
- information about IPython specific keywords (i.e. magic commands,
- shell commands, etc.)
-
- Parameters
- ----------
- python3 : bool
- If `True`, then build an IPython lexer from a Python 3 lexer.
-
- """
- # It would be nice to have a single IPython lexer class which takes
- # a boolean `python3`. But since there are two Python lexer classes,
- # we will also have two IPython lexer classes.
- if python3:
- PyLexer = Python3Lexer
- name = 'IPython3'
- aliases = ['ipython3']
- doc = """IPython3 Lexer"""
- else:
+)
+from pygments.util import get_bool_opt
+
+# Local
+
+line_re = re.compile('.*?\n')
+
+__all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer',
+ 'IPythonPartialTracebackLexer', 'IPythonTracebackLexer',
+ 'IPythonConsoleLexer', 'IPyLexer']
+
+ipython_tokens = [
+ (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
+ (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))),
+ (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)),
+ (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)),
+ (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword,
+ using(BashLexer), Text)),
+ (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)),
+ (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
+ (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
+ (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)),
+ (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)),
+]
+
+def build_ipy_lexer(python3):
+ """Builds IPython lexers depending on the value of `python3`.
+
+ The lexer inherits from an appropriate Python lexer and then adds
+ information about IPython specific keywords (i.e. magic commands,
+ shell commands, etc.)
+
+ Parameters
+ ----------
+ python3 : bool
+ If `True`, then build an IPython lexer from a Python 3 lexer.
+
+ """
+ # It would be nice to have a single IPython lexer class which takes
+ # a boolean `python3`. But since there are two Python lexer classes,
+ # we will also have two IPython lexer classes.
+ if python3:
+ PyLexer = Python3Lexer
+ name = 'IPython3'
+ aliases = ['ipython3']
+ doc = """IPython3 Lexer"""
+ else:
PyLexer = Python2Lexer
- name = 'IPython'
- aliases = ['ipython2', 'ipython']
- doc = """IPython Lexer"""
-
- tokens = PyLexer.tokens.copy()
- tokens['root'] = ipython_tokens + tokens['root']
-
- attrs = {'name': name, 'aliases': aliases, 'filenames': [],
- '__doc__': doc, 'tokens': tokens}
-
- return type(name, (PyLexer,), attrs)
-
-
-IPython3Lexer = build_ipy_lexer(python3=True)
-IPythonLexer = build_ipy_lexer(python3=False)
-
-
-class IPythonPartialTracebackLexer(RegexLexer):
- """
- Partial lexer for IPython tracebacks.
-
- Handles all the non-python output. This works for both Python 2.x and 3.x.
-
- """
- name = 'IPython Partial Traceback'
-
- tokens = {
- 'root': [
- # Tracebacks for syntax errors have a different style.
- # For both types of tracebacks, we mark the first line with
- # Generic.Traceback. For syntax errors, we mark the filename
- # as we mark the filenames for non-syntax tracebacks.
- #
- # These two regexps define how IPythonConsoleLexer finds a
- # traceback.
- #
- ## Non-syntax traceback
- (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)),
- ## Syntax traceback
- (r'^( File)(.*)(, line )(\d+\n)',
- bygroups(Generic.Traceback, Name.Namespace,
- Generic.Traceback, Literal.Number.Integer)),
-
- # (Exception Identifier)(Whitespace)(Traceback Message)
- (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)',
- bygroups(Name.Exception, Generic.Whitespace, Text)),
- # (Module/Filename)(Text)(Callee)(Function Signature)
- # Better options for callee and function signature?
- (r'(.*)( in )(.*)(\(.*\)\n)',
- bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)),
- # Regular line: (Whitespace)(Line Number)(Python Code)
- (r'(\s*?)(\d+)(.*?\n)',
- bygroups(Generic.Whitespace, Literal.Number.Integer, Other)),
- # Emphasized line: (Arrow)(Line Number)(Python Code)
- # Using Exception token so arrow color matches the Exception.
- (r'(-*>?\s?)(\d+)(.*?\n)',
- bygroups(Name.Exception, Literal.Number.Integer, Other)),
- # (Exception Identifier)(Message)
- (r'(?u)(^[^\d\W]\w*)(:.*?\n)',
- bygroups(Name.Exception, Text)),
- # Tag everything else as Other, will be handled later.
- (r'.*\n', Other),
- ],
- }
-
-
-class IPythonTracebackLexer(DelegatingLexer):
- """
- IPython traceback lexer.
-
- For doctests, the tracebacks can be snipped as much as desired with the
- exception to the lines that designate a traceback. For non-syntax error
- tracebacks, this is the line of hyphens. For syntax error tracebacks,
- this is the line which lists the File and line number.
-
- """
- # The lexer inherits from DelegatingLexer. The "root" lexer is an
- # appropriate IPython lexer, which depends on the value of the boolean
- # `python3`. First, we parse with the partial IPython traceback lexer.
- # Then, any code marked with the "Other" token is delegated to the root
- # lexer.
- #
- name = 'IPython Traceback'
- aliases = ['ipythontb']
-
- def __init__(self, **options):
- self.python3 = get_bool_opt(options, 'python3', False)
- if self.python3:
- self.aliases = ['ipython3tb']
- else:
- self.aliases = ['ipython2tb', 'ipythontb']
-
- if self.python3:
- IPyLexer = IPython3Lexer
- else:
- IPyLexer = IPythonLexer
-
- DelegatingLexer.__init__(self, IPyLexer,
- IPythonPartialTracebackLexer, **options)
-
-class IPythonConsoleLexer(Lexer):
- """
- An IPython console lexer for IPython code-blocks and doctests, such as:
-
- .. code-block:: rst
-
- .. code-block:: ipythonconsole
-
- In [1]: a = 'foo'
-
- In [2]: a
- Out[2]: 'foo'
-
- In [3]: print a
- foo
-
- In [4]: 1 / 0
-
-
- Support is also provided for IPython exceptions:
-
- .. code-block:: rst
-
- .. code-block:: ipythonconsole
-
- In [1]: raise Exception
-
- ---------------------------------------------------------------------------
- Exception Traceback (most recent call last)
- <ipython-input-1-fca2ab0ca76b> in <module>()
- ----> 1 raise Exception
-
- Exception:
-
- """
- name = 'IPython console session'
- aliases = ['ipythonconsole']
- mimetypes = ['text/x-ipython-console']
-
- # The regexps used to determine what is input and what is output.
- # The default prompts for IPython are:
- #
+ name = 'IPython'
+ aliases = ['ipython2', 'ipython']
+ doc = """IPython Lexer"""
+
+ tokens = PyLexer.tokens.copy()
+ tokens['root'] = ipython_tokens + tokens['root']
+
+ attrs = {'name': name, 'aliases': aliases, 'filenames': [],
+ '__doc__': doc, 'tokens': tokens}
+
+ return type(name, (PyLexer,), attrs)
+
+
+IPython3Lexer = build_ipy_lexer(python3=True)
+IPythonLexer = build_ipy_lexer(python3=False)
+
+
+class IPythonPartialTracebackLexer(RegexLexer):
+ """
+ Partial lexer for IPython tracebacks.
+
+ Handles all the non-python output. This works for both Python 2.x and 3.x.
+
+ """
+ name = 'IPython Partial Traceback'
+
+ tokens = {
+ 'root': [
+ # Tracebacks for syntax errors have a different style.
+ # For both types of tracebacks, we mark the first line with
+ # Generic.Traceback. For syntax errors, we mark the filename
+ # as we mark the filenames for non-syntax tracebacks.
+ #
+ # These two regexps define how IPythonConsoleLexer finds a
+ # traceback.
+ #
+ ## Non-syntax traceback
+ (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)),
+ ## Syntax traceback
+ (r'^( File)(.*)(, line )(\d+\n)',
+ bygroups(Generic.Traceback, Name.Namespace,
+ Generic.Traceback, Literal.Number.Integer)),
+
+ # (Exception Identifier)(Whitespace)(Traceback Message)
+ (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)',
+ bygroups(Name.Exception, Generic.Whitespace, Text)),
+ # (Module/Filename)(Text)(Callee)(Function Signature)
+ # Better options for callee and function signature?
+ (r'(.*)( in )(.*)(\(.*\)\n)',
+ bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)),
+ # Regular line: (Whitespace)(Line Number)(Python Code)
+ (r'(\s*?)(\d+)(.*?\n)',
+ bygroups(Generic.Whitespace, Literal.Number.Integer, Other)),
+ # Emphasized line: (Arrow)(Line Number)(Python Code)
+ # Using Exception token so arrow color matches the Exception.
+ (r'(-*>?\s?)(\d+)(.*?\n)',
+ bygroups(Name.Exception, Literal.Number.Integer, Other)),
+ # (Exception Identifier)(Message)
+ (r'(?u)(^[^\d\W]\w*)(:.*?\n)',
+ bygroups(Name.Exception, Text)),
+ # Tag everything else as Other, will be handled later.
+ (r'.*\n', Other),
+ ],
+ }
+
+
+class IPythonTracebackLexer(DelegatingLexer):
+ """
+ IPython traceback lexer.
+
+ For doctests, the tracebacks can be snipped as much as desired with the
+ exception to the lines that designate a traceback. For non-syntax error
+ tracebacks, this is the line of hyphens. For syntax error tracebacks,
+ this is the line which lists the File and line number.
+
+ """
+ # The lexer inherits from DelegatingLexer. The "root" lexer is an
+ # appropriate IPython lexer, which depends on the value of the boolean
+ # `python3`. First, we parse with the partial IPython traceback lexer.
+ # Then, any code marked with the "Other" token is delegated to the root
+ # lexer.
+ #
+ name = 'IPython Traceback'
+ aliases = ['ipythontb']
+
+ def __init__(self, **options):
+ self.python3 = get_bool_opt(options, 'python3', False)
+ if self.python3:
+ self.aliases = ['ipython3tb']
+ else:
+ self.aliases = ['ipython2tb', 'ipythontb']
+
+ if self.python3:
+ IPyLexer = IPython3Lexer
+ else:
+ IPyLexer = IPythonLexer
+
+ DelegatingLexer.__init__(self, IPyLexer,
+ IPythonPartialTracebackLexer, **options)
+
+class IPythonConsoleLexer(Lexer):
+ """
+ An IPython console lexer for IPython code-blocks and doctests, such as:
+
+ .. code-block:: rst
+
+ .. code-block:: ipythonconsole
+
+ In [1]: a = 'foo'
+
+ In [2]: a
+ Out[2]: 'foo'
+
+ In [3]: print a
+ foo
+
+ In [4]: 1 / 0
+
+
+ Support is also provided for IPython exceptions:
+
+ .. code-block:: rst
+
+ .. code-block:: ipythonconsole
+
+ In [1]: raise Exception
+
+ ---------------------------------------------------------------------------
+ Exception Traceback (most recent call last)
+ <ipython-input-1-fca2ab0ca76b> in <module>()
+ ----> 1 raise Exception
+
+ Exception:
+
+ """
+ name = 'IPython console session'
+ aliases = ['ipythonconsole']
+ mimetypes = ['text/x-ipython-console']
+
+ # The regexps used to determine what is input and what is output.
+ # The default prompts for IPython are:
+ #
# in = 'In [#]: '
# continuation = ' .D.: '
# template = 'Out[#]: '
- #
+ #
# Where '#' is the 'prompt number' or 'execution count' and 'D'
# D is a number of dots matching the width of the execution count
#
- in1_regex = r'In \[[0-9]+\]: '
- in2_regex = r' \.\.+\.: '
- out_regex = r'Out\[[0-9]+\]: '
-
- #: The regex to determine when a traceback starts.
- ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)')
-
- def __init__(self, **options):
- """Initialize the IPython console lexer.
-
- Parameters
- ----------
- python3 : bool
- If `True`, then the console inputs are parsed using a Python 3
- lexer. Otherwise, they are parsed using a Python 2 lexer.
- in1_regex : RegexObject
- The compiled regular expression used to detect the start
- of inputs. Although the IPython configuration setting may have a
- trailing whitespace, do not include it in the regex. If `None`,
- then the default input prompt is assumed.
- in2_regex : RegexObject
- The compiled regular expression used to detect the continuation
- of inputs. Although the IPython configuration setting may have a
- trailing whitespace, do not include it in the regex. If `None`,
- then the default input prompt is assumed.
- out_regex : RegexObject
- The compiled regular expression used to detect outputs. If `None`,
- then the default output prompt is assumed.
-
- """
- self.python3 = get_bool_opt(options, 'python3', False)
- if self.python3:
- self.aliases = ['ipython3console']
- else:
- self.aliases = ['ipython2console', 'ipythonconsole']
-
- in1_regex = options.get('in1_regex', self.in1_regex)
- in2_regex = options.get('in2_regex', self.in2_regex)
- out_regex = options.get('out_regex', self.out_regex)
-
- # So that we can work with input and output prompts which have been
- # rstrip'd (possibly by editors) we also need rstrip'd variants. If
- # we do not do this, then such prompts will be tagged as 'output'.
- # The reason can't just use the rstrip'd variants instead is because
- # we want any whitespace associated with the prompt to be inserted
- # with the token. This allows formatted code to be modified so as hide
- # the appearance of prompts, with the whitespace included. One example
- # use of this is in copybutton.js from the standard lib Python docs.
- in1_regex_rstrip = in1_regex.rstrip() + '\n'
- in2_regex_rstrip = in2_regex.rstrip() + '\n'
- out_regex_rstrip = out_regex.rstrip() + '\n'
-
- # Compile and save them all.
- attrs = ['in1_regex', 'in2_regex', 'out_regex',
- 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip']
- for attr in attrs:
- self.__setattr__(attr, re.compile(locals()[attr]))
-
- Lexer.__init__(self, **options)
-
- if self.python3:
- pylexer = IPython3Lexer
- tblexer = IPythonTracebackLexer
- else:
- pylexer = IPythonLexer
- tblexer = IPythonTracebackLexer
-
- self.pylexer = pylexer(**options)
- self.tblexer = tblexer(**options)
-
- self.reset()
-
- def reset(self):
- self.mode = 'output'
- self.index = 0
- self.buffer = u''
- self.insertions = []
-
- def buffered_tokens(self):
- """
- Generator of unprocessed tokens after doing insertions and before
- changing to a new state.
-
- """
- if self.mode == 'output':
- tokens = [(0, Generic.Output, self.buffer)]
- elif self.mode == 'input':
- tokens = self.pylexer.get_tokens_unprocessed(self.buffer)
- else: # traceback
- tokens = self.tblexer.get_tokens_unprocessed(self.buffer)
-
- for i, t, v in do_insertions(self.insertions, tokens):
- # All token indexes are relative to the buffer.
- yield self.index + i, t, v
-
- # Clear it all
- self.index += len(self.buffer)
- self.buffer = u''
- self.insertions = []
-
- def get_mci(self, line):
- """
- Parses the line and returns a 3-tuple: (mode, code, insertion).
-
- `mode` is the next mode (or state) of the lexer, and is always equal
- to 'input', 'output', or 'tb'.
-
- `code` is a portion of the line that should be added to the buffer
- corresponding to the next mode and eventually lexed by another lexer.
- For example, `code` could be Python code if `mode` were 'input'.
-
- `insertion` is a 3-tuple (index, token, text) representing an
- unprocessed "token" that will be inserted into the stream of tokens
- that are created from the buffer once we change modes. This is usually
- the input or output prompt.
-
- In general, the next mode depends on current mode and on the contents
- of `line`.
-
- """
- # To reduce the number of regex match checks, we have multiple
- # 'if' blocks instead of 'if-elif' blocks.
-
- # Check for possible end of input
- in2_match = self.in2_regex.match(line)
- in2_match_rstrip = self.in2_regex_rstrip.match(line)
- if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \
- in2_match_rstrip:
- end_input = True
- else:
- end_input = False
- if end_input and self.mode != 'tb':
- # Only look for an end of input when not in tb mode.
- # An ellipsis could appear within the traceback.
- mode = 'output'
- code = u''
- insertion = (0, Generic.Prompt, line)
- return mode, code, insertion
-
- # Check for output prompt
- out_match = self.out_regex.match(line)
- out_match_rstrip = self.out_regex_rstrip.match(line)
- if out_match or out_match_rstrip:
- mode = 'output'
- if out_match:
- idx = out_match.end()
- else:
- idx = out_match_rstrip.end()
- code = line[idx:]
- # Use the 'heading' token for output. We cannot use Generic.Error
- # since it would conflict with exceptions.
- insertion = (0, Generic.Heading, line[:idx])
- return mode, code, insertion
-
-
- # Check for input or continuation prompt (non stripped version)
- in1_match = self.in1_regex.match(line)
- if in1_match or (in2_match and self.mode != 'tb'):
- # New input or when not in tb, continued input.
- # We do not check for continued input when in tb since it is
- # allowable to replace a long stack with an ellipsis.
- mode = 'input'
- if in1_match:
- idx = in1_match.end()
- else: # in2_match
- idx = in2_match.end()
- code = line[idx:]
- insertion = (0, Generic.Prompt, line[:idx])
- return mode, code, insertion
-
- # Check for input or continuation prompt (stripped version)
- in1_match_rstrip = self.in1_regex_rstrip.match(line)
- if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'):
- # New input or when not in tb, continued input.
- # We do not check for continued input when in tb since it is
- # allowable to replace a long stack with an ellipsis.
- mode = 'input'
- if in1_match_rstrip:
- idx = in1_match_rstrip.end()
- else: # in2_match
- idx = in2_match_rstrip.end()
- code = line[idx:]
- insertion = (0, Generic.Prompt, line[:idx])
- return mode, code, insertion
-
- # Check for traceback
- if self.ipytb_start.match(line):
- mode = 'tb'
- code = line
- insertion = None
- return mode, code, insertion
-
- # All other stuff...
- if self.mode in ('input', 'output'):
- # We assume all other text is output. Multiline input that
- # does not use the continuation marker cannot be detected.
- # For example, the 3 in the following is clearly output:
- #
- # In [1]: print 3
- # 3
- #
- # But the following second line is part of the input:
- #
- # In [2]: while True:
- # print True
- #
- # In both cases, the 2nd line will be 'output'.
- #
- mode = 'output'
- else:
- mode = 'tb'
-
- code = line
- insertion = None
-
- return mode, code, insertion
-
- def get_tokens_unprocessed(self, text):
- self.reset()
- for match in line_re.finditer(text):
- line = match.group()
- mode, code, insertion = self.get_mci(line)
-
- if mode != self.mode:
- # Yield buffered tokens before transitioning to new mode.
- for token in self.buffered_tokens():
- yield token
- self.mode = mode
-
- if insertion:
- self.insertions.append((len(self.buffer), [insertion]))
- self.buffer += code
-
+ in1_regex = r'In \[[0-9]+\]: '
+ in2_regex = r' \.\.+\.: '
+ out_regex = r'Out\[[0-9]+\]: '
+
+ #: The regex to determine when a traceback starts.
+ ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)')
+
+ def __init__(self, **options):
+ """Initialize the IPython console lexer.
+
+ Parameters
+ ----------
+ python3 : bool
+ If `True`, then the console inputs are parsed using a Python 3
+ lexer. Otherwise, they are parsed using a Python 2 lexer.
+ in1_regex : RegexObject
+ The compiled regular expression used to detect the start
+ of inputs. Although the IPython configuration setting may have a
+ trailing whitespace, do not include it in the regex. If `None`,
+ then the default input prompt is assumed.
+ in2_regex : RegexObject
+ The compiled regular expression used to detect the continuation
+ of inputs. Although the IPython configuration setting may have a
+ trailing whitespace, do not include it in the regex. If `None`,
+ then the default input prompt is assumed.
+ out_regex : RegexObject
+ The compiled regular expression used to detect outputs. If `None`,
+ then the default output prompt is assumed.
+
+ """
+ self.python3 = get_bool_opt(options, 'python3', False)
+ if self.python3:
+ self.aliases = ['ipython3console']
+ else:
+ self.aliases = ['ipython2console', 'ipythonconsole']
+
+ in1_regex = options.get('in1_regex', self.in1_regex)
+ in2_regex = options.get('in2_regex', self.in2_regex)
+ out_regex = options.get('out_regex', self.out_regex)
+
+ # So that we can work with input and output prompts which have been
+ # rstrip'd (possibly by editors) we also need rstrip'd variants. If
+ # we do not do this, then such prompts will be tagged as 'output'.
+ # The reason can't just use the rstrip'd variants instead is because
+ # we want any whitespace associated with the prompt to be inserted
+ # with the token. This allows formatted code to be modified so as hide
+ # the appearance of prompts, with the whitespace included. One example
+ # use of this is in copybutton.js from the standard lib Python docs.
+ in1_regex_rstrip = in1_regex.rstrip() + '\n'
+ in2_regex_rstrip = in2_regex.rstrip() + '\n'
+ out_regex_rstrip = out_regex.rstrip() + '\n'
+
+ # Compile and save them all.
+ attrs = ['in1_regex', 'in2_regex', 'out_regex',
+ 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip']
+ for attr in attrs:
+ self.__setattr__(attr, re.compile(locals()[attr]))
+
+ Lexer.__init__(self, **options)
+
+ if self.python3:
+ pylexer = IPython3Lexer
+ tblexer = IPythonTracebackLexer
+ else:
+ pylexer = IPythonLexer
+ tblexer = IPythonTracebackLexer
+
+ self.pylexer = pylexer(**options)
+ self.tblexer = tblexer(**options)
+
+ self.reset()
+
+ def reset(self):
+ self.mode = 'output'
+ self.index = 0
+ self.buffer = u''
+ self.insertions = []
+
+ def buffered_tokens(self):
+ """
+ Generator of unprocessed tokens after doing insertions and before
+ changing to a new state.
+
+ """
+ if self.mode == 'output':
+ tokens = [(0, Generic.Output, self.buffer)]
+ elif self.mode == 'input':
+ tokens = self.pylexer.get_tokens_unprocessed(self.buffer)
+ else: # traceback
+ tokens = self.tblexer.get_tokens_unprocessed(self.buffer)
+
+ for i, t, v in do_insertions(self.insertions, tokens):
+ # All token indexes are relative to the buffer.
+ yield self.index + i, t, v
+
+ # Clear it all
+ self.index += len(self.buffer)
+ self.buffer = u''
+ self.insertions = []
+
+ def get_mci(self, line):
+ """
+ Parses the line and returns a 3-tuple: (mode, code, insertion).
+
+ `mode` is the next mode (or state) of the lexer, and is always equal
+ to 'input', 'output', or 'tb'.
+
+ `code` is a portion of the line that should be added to the buffer
+ corresponding to the next mode and eventually lexed by another lexer.
+ For example, `code` could be Python code if `mode` were 'input'.
+
+ `insertion` is a 3-tuple (index, token, text) representing an
+ unprocessed "token" that will be inserted into the stream of tokens
+ that are created from the buffer once we change modes. This is usually
+ the input or output prompt.
+
+ In general, the next mode depends on current mode and on the contents
+ of `line`.
+
+ """
+ # To reduce the number of regex match checks, we have multiple
+ # 'if' blocks instead of 'if-elif' blocks.
+
+ # Check for possible end of input
+ in2_match = self.in2_regex.match(line)
+ in2_match_rstrip = self.in2_regex_rstrip.match(line)
+ if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \
+ in2_match_rstrip:
+ end_input = True
+ else:
+ end_input = False
+ if end_input and self.mode != 'tb':
+ # Only look for an end of input when not in tb mode.
+ # An ellipsis could appear within the traceback.
+ mode = 'output'
+ code = u''
+ insertion = (0, Generic.Prompt, line)
+ return mode, code, insertion
+
+ # Check for output prompt
+ out_match = self.out_regex.match(line)
+ out_match_rstrip = self.out_regex_rstrip.match(line)
+ if out_match or out_match_rstrip:
+ mode = 'output'
+ if out_match:
+ idx = out_match.end()
+ else:
+ idx = out_match_rstrip.end()
+ code = line[idx:]
+ # Use the 'heading' token for output. We cannot use Generic.Error
+ # since it would conflict with exceptions.
+ insertion = (0, Generic.Heading, line[:idx])
+ return mode, code, insertion
+
+
+ # Check for input or continuation prompt (non stripped version)
+ in1_match = self.in1_regex.match(line)
+ if in1_match or (in2_match and self.mode != 'tb'):
+ # New input or when not in tb, continued input.
+ # We do not check for continued input when in tb since it is
+ # allowable to replace a long stack with an ellipsis.
+ mode = 'input'
+ if in1_match:
+ idx = in1_match.end()
+ else: # in2_match
+ idx = in2_match.end()
+ code = line[idx:]
+ insertion = (0, Generic.Prompt, line[:idx])
+ return mode, code, insertion
+
+ # Check for input or continuation prompt (stripped version)
+ in1_match_rstrip = self.in1_regex_rstrip.match(line)
+ if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'):
+ # New input or when not in tb, continued input.
+ # We do not check for continued input when in tb since it is
+ # allowable to replace a long stack with an ellipsis.
+ mode = 'input'
+ if in1_match_rstrip:
+ idx = in1_match_rstrip.end()
+ else: # in2_match
+ idx = in2_match_rstrip.end()
+ code = line[idx:]
+ insertion = (0, Generic.Prompt, line[:idx])
+ return mode, code, insertion
+
+ # Check for traceback
+ if self.ipytb_start.match(line):
+ mode = 'tb'
+ code = line
+ insertion = None
+ return mode, code, insertion
+
+ # All other stuff...
+ if self.mode in ('input', 'output'):
+ # We assume all other text is output. Multiline input that
+ # does not use the continuation marker cannot be detected.
+ # For example, the 3 in the following is clearly output:
+ #
+ # In [1]: print 3
+ # 3
+ #
+ # But the following second line is part of the input:
+ #
+ # In [2]: while True:
+ # print True
+ #
+ # In both cases, the 2nd line will be 'output'.
+ #
+ mode = 'output'
+ else:
+ mode = 'tb'
+
+ code = line
+ insertion = None
+
+ return mode, code, insertion
+
+ def get_tokens_unprocessed(self, text):
+ self.reset()
+ for match in line_re.finditer(text):
+ line = match.group()
+ mode, code, insertion = self.get_mci(line)
+
+ if mode != self.mode:
+ # Yield buffered tokens before transitioning to new mode.
+ for token in self.buffered_tokens():
+ yield token
+ self.mode = mode
+
+ if insertion:
+ self.insertions.append((len(self.buffer), [insertion]))
+ self.buffer += code
+
for token in self.buffered_tokens():
yield token
-class IPyLexer(Lexer):
- """
- Primary lexer for all IPython-like code.
-
- This is a simple helper lexer. If the first line of the text begins with
- "In \[[0-9]+\]:", then the entire text is parsed with an IPython console
- lexer. If not, then the entire text is parsed with an IPython lexer.
-
- The goal is to reduce the number of lexers that are registered
- with Pygments.
-
- """
- name = 'IPy session'
- aliases = ['ipy']
-
- def __init__(self, **options):
- self.python3 = get_bool_opt(options, 'python3', False)
- if self.python3:
- self.aliases = ['ipy3']
- else:
- self.aliases = ['ipy2', 'ipy']
-
- Lexer.__init__(self, **options)
-
- self.IPythonLexer = IPythonLexer(**options)
- self.IPythonConsoleLexer = IPythonConsoleLexer(**options)
-
- def get_tokens_unprocessed(self, text):
- # Search for the input prompt anywhere...this allows code blocks to
- # begin with comments as well.
- if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL):
- lex = self.IPythonConsoleLexer
- else:
- lex = self.IPythonLexer
- for token in lex.get_tokens_unprocessed(text):
- yield token
-
+class IPyLexer(Lexer):
+ """
+ Primary lexer for all IPython-like code.
+
+ This is a simple helper lexer. If the first line of the text begins with
+ "In \[[0-9]+\]:", then the entire text is parsed with an IPython console
+ lexer. If not, then the entire text is parsed with an IPython lexer.
+
+ The goal is to reduce the number of lexers that are registered
+ with Pygments.
+
+ """
+ name = 'IPy session'
+ aliases = ['ipy']
+
+ def __init__(self, **options):
+ self.python3 = get_bool_opt(options, 'python3', False)
+ if self.python3:
+ self.aliases = ['ipy3']
+ else:
+ self.aliases = ['ipy2', 'ipy']
+
+ Lexer.__init__(self, **options)
+
+ self.IPythonLexer = IPythonLexer(**options)
+ self.IPythonConsoleLexer = IPythonConsoleLexer(**options)
+
+ def get_tokens_unprocessed(self, text):
+ # Search for the input prompt anywhere...this allows code blocks to
+ # begin with comments as well.
+ if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL):
+ lex = self.IPythonConsoleLexer
+ else:
+ lex = self.IPythonLexer
+ for token in lex.get_tokens_unprocessed(text):
+ yield token
+
diff --git a/contrib/python/ipython/py2/IPython/lib/pretty.py b/contrib/python/ipython/py2/IPython/lib/pretty.py
index 385365fc3c..28eee523c5 100644
--- a/contrib/python/ipython/py2/IPython/lib/pretty.py
+++ b/contrib/python/ipython/py2/IPython/lib/pretty.py
@@ -1,127 +1,127 @@
-# -*- coding: utf-8 -*-
-"""
-Python advanced pretty printer. This pretty printer is intended to
-replace the old `pprint` python module which does not allow developers
-to provide their own pretty print callbacks.
-
-This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.
-
-
-Example Usage
--------------
-
-To directly print the representation of an object use `pprint`::
-
- from pretty import pprint
- pprint(complex_object)
-
-To get a string of the output use `pretty`::
-
- from pretty import pretty
- string = pretty(complex_object)
-
-
-Extending
----------
-
-The pretty library allows developers to add pretty printing rules for their
-own objects. This process is straightforward. All you have to do is to
-add a `_repr_pretty_` method to your object and call the methods on the
-pretty printer passed::
-
- class MyObject(object):
-
- def _repr_pretty_(self, p, cycle):
- ...
-
-Here is an example implementation of a `_repr_pretty_` method for a list
-subclass::
-
- class MyList(list):
-
- def _repr_pretty_(self, p, cycle):
- if cycle:
- p.text('MyList(...)')
- else:
- with p.group(8, 'MyList([', '])'):
- for idx, item in enumerate(self):
- if idx:
- p.text(',')
- p.breakable()
- p.pretty(item)
-
-The `cycle` parameter is `True` if pretty detected a cycle. You *have* to
-react to that or the result is an infinite loop. `p.text()` just adds
-non breaking text to the output, `p.breakable()` either adds a whitespace
-or breaks here. If you pass it an argument it's used instead of the
-default space. `p.pretty` prettyprints another object using the pretty print
-method.
-
-The first parameter to the `group` function specifies the extra indentation
-of the next line. In this example the next item will either be on the same
-line (if the items are short enough) or aligned with the right edge of the
-opening bracket of `MyList`.
-
-If you just want to indent something you can use the group function
-without open / close parameters. You can also use this code::
-
- with p.indent(2):
- ...
-
-Inheritance diagram:
-
-.. inheritance-diagram:: IPython.lib.pretty
- :parts: 3
-
-:copyright: 2007 by Armin Ronacher.
- Portions (c) 2009 by Robert Kern.
-:license: BSD License.
-"""
-from __future__ import print_function
-from contextlib import contextmanager
-import sys
-import types
-import re
-import datetime
-from collections import deque
-
+# -*- coding: utf-8 -*-
+"""
+Python advanced pretty printer. This pretty printer is intended to
+replace the old `pprint` python module which does not allow developers
+to provide their own pretty print callbacks.
+
+This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.
+
+
+Example Usage
+-------------
+
+To directly print the representation of an object use `pprint`::
+
+ from pretty import pprint
+ pprint(complex_object)
+
+To get a string of the output use `pretty`::
+
+ from pretty import pretty
+ string = pretty(complex_object)
+
+
+Extending
+---------
+
+The pretty library allows developers to add pretty printing rules for their
+own objects. This process is straightforward. All you have to do is to
+add a `_repr_pretty_` method to your object and call the methods on the
+pretty printer passed::
+
+ class MyObject(object):
+
+ def _repr_pretty_(self, p, cycle):
+ ...
+
+Here is an example implementation of a `_repr_pretty_` method for a list
+subclass::
+
+ class MyList(list):
+
+ def _repr_pretty_(self, p, cycle):
+ if cycle:
+ p.text('MyList(...)')
+ else:
+ with p.group(8, 'MyList([', '])'):
+ for idx, item in enumerate(self):
+ if idx:
+ p.text(',')
+ p.breakable()
+ p.pretty(item)
+
+The `cycle` parameter is `True` if pretty detected a cycle. You *have* to
+react to that or the result is an infinite loop. `p.text()` just adds
+non breaking text to the output, `p.breakable()` either adds a whitespace
+or breaks here. If you pass it an argument it's used instead of the
+default space. `p.pretty` prettyprints another object using the pretty print
+method.
+
+The first parameter to the `group` function specifies the extra indentation
+of the next line. In this example the next item will either be on the same
+line (if the items are short enough) or aligned with the right edge of the
+opening bracket of `MyList`.
+
+If you just want to indent something you can use the group function
+without open / close parameters. You can also use this code::
+
+ with p.indent(2):
+ ...
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.lib.pretty
+ :parts: 3
+
+:copyright: 2007 by Armin Ronacher.
+ Portions (c) 2009 by Robert Kern.
+:license: BSD License.
+"""
+from __future__ import print_function
+from contextlib import contextmanager
+import sys
+import types
+import re
+import datetime
+from collections import deque
+
from IPython.utils.py3compat import PY3, PYPY, cast_unicode, string_types
-from IPython.utils.encoding import get_stream_enc
-
-from io import StringIO
-
-
-__all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter',
- 'for_type', 'for_type_by_name']
-
-
-MAX_SEQ_LENGTH = 1000
+from IPython.utils.encoding import get_stream_enc
+
+from io import StringIO
+
+
+__all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter',
+ 'for_type', 'for_type_by_name']
+
+
+MAX_SEQ_LENGTH = 1000
# The language spec says that dicts preserve order from 3.7, but CPython
# does so from 3.6, so it seems likely that people will expect that.
DICT_IS_ORDERED = sys.version_info >= (3, 6)
-_re_pattern_type = type(re.compile(''))
-
-def _safe_getattr(obj, attr, default=None):
- """Safe version of getattr.
-
- Same as getattr, but will return ``default`` on any Exception,
- rather than raising.
- """
- try:
- return getattr(obj, attr, default)
- except Exception:
- return default
-
-if PY3:
- CUnicodeIO = StringIO
-else:
- class CUnicodeIO(StringIO):
- """StringIO that casts str to unicode on Python 2"""
- def write(self, text):
- return super(CUnicodeIO, self).write(
- cast_unicode(text, encoding=get_stream_enc(sys.stdout)))
-
-
+_re_pattern_type = type(re.compile(''))
+
+def _safe_getattr(obj, attr, default=None):
+ """Safe version of getattr.
+
+ Same as getattr, but will return ``default`` on any Exception,
+ rather than raising.
+ """
+ try:
+ return getattr(obj, attr, default)
+ except Exception:
+ return default
+
+if PY3:
+ CUnicodeIO = StringIO
+else:
+ class CUnicodeIO(StringIO):
+ """StringIO that casts str to unicode on Python 2"""
+ def write(self, text):
+ return super(CUnicodeIO, self).write(
+ cast_unicode(text, encoding=get_stream_enc(sys.stdout)))
+
+
def _sorted_for_pprint(items):
"""
Sort the given items for pretty printing. Since some predictable
@@ -137,734 +137,734 @@ def _sorted_for_pprint(items):
except Exception:
return items
-def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
- """
- Pretty print the object's representation.
- """
- stream = CUnicodeIO()
- printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length=max_seq_length)
- printer.pretty(obj)
- printer.flush()
- return stream.getvalue()
-
-
-def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
- """
- Like `pretty` but print to stdout.
- """
- printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length)
- printer.pretty(obj)
- printer.flush()
- sys.stdout.write(newline)
- sys.stdout.flush()
-
-class _PrettyPrinterBase(object):
-
- @contextmanager
- def indent(self, indent):
- """with statement support for indenting/dedenting."""
- self.indentation += indent
- try:
- yield
- finally:
- self.indentation -= indent
-
- @contextmanager
- def group(self, indent=0, open='', close=''):
- """like begin_group / end_group but for the with statement."""
- self.begin_group(indent, open)
- try:
- yield
- finally:
- self.end_group(indent, close)
-
-class PrettyPrinter(_PrettyPrinterBase):
- """
- Baseclass for the `RepresentationPrinter` prettyprinter that is used to
- generate pretty reprs of objects. Contrary to the `RepresentationPrinter`
- this printer knows nothing about the default pprinters or the `_repr_pretty_`
- callback method.
- """
-
- def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
- self.output = output
- self.max_width = max_width
- self.newline = newline
- self.max_seq_length = max_seq_length
- self.output_width = 0
- self.buffer_width = 0
- self.buffer = deque()
-
- root_group = Group(0)
- self.group_stack = [root_group]
- self.group_queue = GroupQueue(root_group)
- self.indentation = 0
-
- def _break_outer_groups(self):
- while self.max_width < self.output_width + self.buffer_width:
- group = self.group_queue.deq()
- if not group:
- return
- while group.breakables:
- x = self.buffer.popleft()
- self.output_width = x.output(self.output, self.output_width)
- self.buffer_width -= x.width
- while self.buffer and isinstance(self.buffer[0], Text):
- x = self.buffer.popleft()
- self.output_width = x.output(self.output, self.output_width)
- self.buffer_width -= x.width
-
- def text(self, obj):
- """Add literal text to the output."""
- width = len(obj)
- if self.buffer:
- text = self.buffer[-1]
- if not isinstance(text, Text):
- text = Text()
- self.buffer.append(text)
- text.add(obj, width)
- self.buffer_width += width
- self._break_outer_groups()
- else:
- self.output.write(obj)
- self.output_width += width
-
- def breakable(self, sep=' '):
- """
- Add a breakable separator to the output. This does not mean that it
- will automatically break here. If no breaking on this position takes
- place the `sep` is inserted which default to one space.
- """
- width = len(sep)
- group = self.group_stack[-1]
- if group.want_break:
- self.flush()
- self.output.write(self.newline)
- self.output.write(' ' * self.indentation)
- self.output_width = self.indentation
- self.buffer_width = 0
- else:
- self.buffer.append(Breakable(sep, width, self))
- self.buffer_width += width
- self._break_outer_groups()
-
- def break_(self):
- """
- Explicitly insert a newline into the output, maintaining correct indentation.
- """
- self.flush()
- self.output.write(self.newline)
- self.output.write(' ' * self.indentation)
- self.output_width = self.indentation
- self.buffer_width = 0
-
-
- def begin_group(self, indent=0, open=''):
- """
- Begin a group. If you want support for python < 2.5 which doesn't has
- the with statement this is the preferred way:
-
- p.begin_group(1, '{')
- ...
- p.end_group(1, '}')
-
- The python 2.5 expression would be this:
-
- with p.group(1, '{', '}'):
- ...
-
- The first parameter specifies the indentation for the next line (usually
- the width of the opening text), the second the opening text. All
- parameters are optional.
- """
- if open:
- self.text(open)
- group = Group(self.group_stack[-1].depth + 1)
- self.group_stack.append(group)
- self.group_queue.enq(group)
- self.indentation += indent
-
- def _enumerate(self, seq):
- """like enumerate, but with an upper limit on the number of items"""
- for idx, x in enumerate(seq):
- if self.max_seq_length and idx >= self.max_seq_length:
- self.text(',')
- self.breakable()
- self.text('...')
- return
- yield idx, x
-
- def end_group(self, dedent=0, close=''):
- """End a group. See `begin_group` for more details."""
- self.indentation -= dedent
- group = self.group_stack.pop()
- if not group.breakables:
- self.group_queue.remove(group)
- if close:
- self.text(close)
-
- def flush(self):
- """Flush data that is left in the buffer."""
- for data in self.buffer:
- self.output_width += data.output(self.output, self.output_width)
- self.buffer.clear()
- self.buffer_width = 0
-
-
-def _get_mro(obj_class):
- """ Get a reasonable method resolution order of a class and its superclasses
- for both old-style and new-style classes.
- """
- if not hasattr(obj_class, '__mro__'):
- # Old-style class. Mix in object to make a fake new-style class.
- try:
- obj_class = type(obj_class.__name__, (obj_class, object), {})
- except TypeError:
- # Old-style extension type that does not descend from object.
- # FIXME: try to construct a more thorough MRO.
- mro = [obj_class]
- else:
- mro = obj_class.__mro__[1:-1]
- else:
- mro = obj_class.__mro__
- return mro
-
-
-class RepresentationPrinter(PrettyPrinter):
- """
- Special pretty printer that has a `pretty` method that calls the pretty
- printer for a python object.
-
- This class stores processing data on `self` so you must *never* use
- this class in a threaded environment. Always lock it or reinstanciate
- it.
-
- Instances also have a verbose flag callbacks can access to control their
- output. For example the default instance repr prints all attributes and
- methods that are not prefixed by an underscore if the printer is in
- verbose mode.
- """
-
- def __init__(self, output, verbose=False, max_width=79, newline='\n',
- singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None,
- max_seq_length=MAX_SEQ_LENGTH):
-
- PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length)
- self.verbose = verbose
- self.stack = []
- if singleton_pprinters is None:
- singleton_pprinters = _singleton_pprinters.copy()
- self.singleton_pprinters = singleton_pprinters
- if type_pprinters is None:
- type_pprinters = _type_pprinters.copy()
- self.type_pprinters = type_pprinters
- if deferred_pprinters is None:
- deferred_pprinters = _deferred_type_pprinters.copy()
- self.deferred_pprinters = deferred_pprinters
-
- def pretty(self, obj):
- """Pretty print the given object."""
- obj_id = id(obj)
- cycle = obj_id in self.stack
- self.stack.append(obj_id)
- self.begin_group()
- try:
- obj_class = _safe_getattr(obj, '__class__', None) or type(obj)
- # First try to find registered singleton printers for the type.
- try:
- printer = self.singleton_pprinters[obj_id]
- except (TypeError, KeyError):
- pass
- else:
- return printer(obj, self, cycle)
- # Next walk the mro and check for either:
- # 1) a registered printer
- # 2) a _repr_pretty_ method
- for cls in _get_mro(obj_class):
- if cls in self.type_pprinters:
- # printer registered in self.type_pprinters
- return self.type_pprinters[cls](obj, self, cycle)
- else:
- # deferred printer
- printer = self._in_deferred_types(cls)
- if printer is not None:
- return printer(obj, self, cycle)
- else:
- # Finally look for special method names.
- # Some objects automatically create any requested
- # attribute. Try to ignore most of them by checking for
- # callability.
- if '_repr_pretty_' in cls.__dict__:
- meth = cls._repr_pretty_
- if callable(meth):
- return meth(obj, self, cycle)
+def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
+ """
+ Pretty print the object's representation.
+ """
+ stream = CUnicodeIO()
+ printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length=max_seq_length)
+ printer.pretty(obj)
+ printer.flush()
+ return stream.getvalue()
+
+
+def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
+ """
+ Like `pretty` but print to stdout.
+ """
+ printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length)
+ printer.pretty(obj)
+ printer.flush()
+ sys.stdout.write(newline)
+ sys.stdout.flush()
+
+class _PrettyPrinterBase(object):
+
+ @contextmanager
+ def indent(self, indent):
+ """with statement support for indenting/dedenting."""
+ self.indentation += indent
+ try:
+ yield
+ finally:
+ self.indentation -= indent
+
+ @contextmanager
+ def group(self, indent=0, open='', close=''):
+ """like begin_group / end_group but for the with statement."""
+ self.begin_group(indent, open)
+ try:
+ yield
+ finally:
+ self.end_group(indent, close)
+
+class PrettyPrinter(_PrettyPrinterBase):
+ """
+ Baseclass for the `RepresentationPrinter` prettyprinter that is used to
+ generate pretty reprs of objects. Contrary to the `RepresentationPrinter`
+ this printer knows nothing about the default pprinters or the `_repr_pretty_`
+ callback method.
+ """
+
+ def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
+ self.output = output
+ self.max_width = max_width
+ self.newline = newline
+ self.max_seq_length = max_seq_length
+ self.output_width = 0
+ self.buffer_width = 0
+ self.buffer = deque()
+
+ root_group = Group(0)
+ self.group_stack = [root_group]
+ self.group_queue = GroupQueue(root_group)
+ self.indentation = 0
+
+ def _break_outer_groups(self):
+ while self.max_width < self.output_width + self.buffer_width:
+ group = self.group_queue.deq()
+ if not group:
+ return
+ while group.breakables:
+ x = self.buffer.popleft()
+ self.output_width = x.output(self.output, self.output_width)
+ self.buffer_width -= x.width
+ while self.buffer and isinstance(self.buffer[0], Text):
+ x = self.buffer.popleft()
+ self.output_width = x.output(self.output, self.output_width)
+ self.buffer_width -= x.width
+
+ def text(self, obj):
+ """Add literal text to the output."""
+ width = len(obj)
+ if self.buffer:
+ text = self.buffer[-1]
+ if not isinstance(text, Text):
+ text = Text()
+ self.buffer.append(text)
+ text.add(obj, width)
+ self.buffer_width += width
+ self._break_outer_groups()
+ else:
+ self.output.write(obj)
+ self.output_width += width
+
+ def breakable(self, sep=' '):
+ """
+ Add a breakable separator to the output. This does not mean that it
+ will automatically break here. If no breaking on this position takes
+ place the `sep` is inserted which default to one space.
+ """
+ width = len(sep)
+ group = self.group_stack[-1]
+ if group.want_break:
+ self.flush()
+ self.output.write(self.newline)
+ self.output.write(' ' * self.indentation)
+ self.output_width = self.indentation
+ self.buffer_width = 0
+ else:
+ self.buffer.append(Breakable(sep, width, self))
+ self.buffer_width += width
+ self._break_outer_groups()
+
+ def break_(self):
+ """
+ Explicitly insert a newline into the output, maintaining correct indentation.
+ """
+ self.flush()
+ self.output.write(self.newline)
+ self.output.write(' ' * self.indentation)
+ self.output_width = self.indentation
+ self.buffer_width = 0
+
+
+ def begin_group(self, indent=0, open=''):
+ """
+ Begin a group. If you want support for python < 2.5 which doesn't has
+ the with statement this is the preferred way:
+
+ p.begin_group(1, '{')
+ ...
+ p.end_group(1, '}')
+
+ The python 2.5 expression would be this:
+
+ with p.group(1, '{', '}'):
+ ...
+
+ The first parameter specifies the indentation for the next line (usually
+ the width of the opening text), the second the opening text. All
+ parameters are optional.
+ """
+ if open:
+ self.text(open)
+ group = Group(self.group_stack[-1].depth + 1)
+ self.group_stack.append(group)
+ self.group_queue.enq(group)
+ self.indentation += indent
+
+ def _enumerate(self, seq):
+ """like enumerate, but with an upper limit on the number of items"""
+ for idx, x in enumerate(seq):
+ if self.max_seq_length and idx >= self.max_seq_length:
+ self.text(',')
+ self.breakable()
+ self.text('...')
+ return
+ yield idx, x
+
+ def end_group(self, dedent=0, close=''):
+ """End a group. See `begin_group` for more details."""
+ self.indentation -= dedent
+ group = self.group_stack.pop()
+ if not group.breakables:
+ self.group_queue.remove(group)
+ if close:
+ self.text(close)
+
+ def flush(self):
+ """Flush data that is left in the buffer."""
+ for data in self.buffer:
+ self.output_width += data.output(self.output, self.output_width)
+ self.buffer.clear()
+ self.buffer_width = 0
+
+
+def _get_mro(obj_class):
+ """ Get a reasonable method resolution order of a class and its superclasses
+ for both old-style and new-style classes.
+ """
+ if not hasattr(obj_class, '__mro__'):
+ # Old-style class. Mix in object to make a fake new-style class.
+ try:
+ obj_class = type(obj_class.__name__, (obj_class, object), {})
+ except TypeError:
+ # Old-style extension type that does not descend from object.
+ # FIXME: try to construct a more thorough MRO.
+ mro = [obj_class]
+ else:
+ mro = obj_class.__mro__[1:-1]
+ else:
+ mro = obj_class.__mro__
+ return mro
+
+
+class RepresentationPrinter(PrettyPrinter):
+ """
+ Special pretty printer that has a `pretty` method that calls the pretty
+ printer for a python object.
+
+ This class stores processing data on `self` so you must *never* use
+ this class in a threaded environment. Always lock it or reinstanciate
+ it.
+
+ Instances also have a verbose flag callbacks can access to control their
+ output. For example the default instance repr prints all attributes and
+ methods that are not prefixed by an underscore if the printer is in
+ verbose mode.
+ """
+
+ def __init__(self, output, verbose=False, max_width=79, newline='\n',
+ singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None,
+ max_seq_length=MAX_SEQ_LENGTH):
+
+ PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length)
+ self.verbose = verbose
+ self.stack = []
+ if singleton_pprinters is None:
+ singleton_pprinters = _singleton_pprinters.copy()
+ self.singleton_pprinters = singleton_pprinters
+ if type_pprinters is None:
+ type_pprinters = _type_pprinters.copy()
+ self.type_pprinters = type_pprinters
+ if deferred_pprinters is None:
+ deferred_pprinters = _deferred_type_pprinters.copy()
+ self.deferred_pprinters = deferred_pprinters
+
+ def pretty(self, obj):
+ """Pretty print the given object."""
+ obj_id = id(obj)
+ cycle = obj_id in self.stack
+ self.stack.append(obj_id)
+ self.begin_group()
+ try:
+ obj_class = _safe_getattr(obj, '__class__', None) or type(obj)
+ # First try to find registered singleton printers for the type.
+ try:
+ printer = self.singleton_pprinters[obj_id]
+ except (TypeError, KeyError):
+ pass
+ else:
+ return printer(obj, self, cycle)
+ # Next walk the mro and check for either:
+ # 1) a registered printer
+ # 2) a _repr_pretty_ method
+ for cls in _get_mro(obj_class):
+ if cls in self.type_pprinters:
+ # printer registered in self.type_pprinters
+ return self.type_pprinters[cls](obj, self, cycle)
+ else:
+ # deferred printer
+ printer = self._in_deferred_types(cls)
+ if printer is not None:
+ return printer(obj, self, cycle)
+ else:
+ # Finally look for special method names.
+ # Some objects automatically create any requested
+ # attribute. Try to ignore most of them by checking for
+ # callability.
+ if '_repr_pretty_' in cls.__dict__:
+ meth = cls._repr_pretty_
+ if callable(meth):
+ return meth(obj, self, cycle)
if cls is not object \
and callable(cls.__dict__.get('__repr__')):
return _repr_pprint(obj, self, cycle)
- return _default_pprint(obj, self, cycle)
- finally:
- self.end_group()
- self.stack.pop()
-
- def _in_deferred_types(self, cls):
- """
- Check if the given class is specified in the deferred type registry.
-
- Returns the printer from the registry if it exists, and None if the
- class is not in the registry. Successful matches will be moved to the
- regular type registry for future use.
- """
- mod = _safe_getattr(cls, '__module__', None)
- name = _safe_getattr(cls, '__name__', None)
- key = (mod, name)
- printer = None
- if key in self.deferred_pprinters:
- # Move the printer over to the regular registry.
- printer = self.deferred_pprinters.pop(key)
- self.type_pprinters[cls] = printer
- return printer
-
-
-class Printable(object):
-
- def output(self, stream, output_width):
- return output_width
-
-
-class Text(Printable):
-
- def __init__(self):
- self.objs = []
- self.width = 0
-
- def output(self, stream, output_width):
- for obj in self.objs:
- stream.write(obj)
- return output_width + self.width
-
- def add(self, obj, width):
- self.objs.append(obj)
- self.width += width
-
-
-class Breakable(Printable):
-
- def __init__(self, seq, width, pretty):
- self.obj = seq
- self.width = width
- self.pretty = pretty
- self.indentation = pretty.indentation
- self.group = pretty.group_stack[-1]
- self.group.breakables.append(self)
-
- def output(self, stream, output_width):
- self.group.breakables.popleft()
- if self.group.want_break:
- stream.write(self.pretty.newline)
- stream.write(' ' * self.indentation)
- return self.indentation
- if not self.group.breakables:
- self.pretty.group_queue.remove(self.group)
- stream.write(self.obj)
- return output_width + self.width
-
-
-class Group(Printable):
-
- def __init__(self, depth):
- self.depth = depth
- self.breakables = deque()
- self.want_break = False
-
-
-class GroupQueue(object):
-
- def __init__(self, *groups):
- self.queue = []
- for group in groups:
- self.enq(group)
-
- def enq(self, group):
- depth = group.depth
- while depth > len(self.queue) - 1:
- self.queue.append([])
- self.queue[depth].append(group)
-
- def deq(self):
- for stack in self.queue:
- for idx, group in enumerate(reversed(stack)):
- if group.breakables:
- del stack[idx]
- group.want_break = True
- return group
- for group in stack:
- group.want_break = True
- del stack[:]
-
- def remove(self, group):
- try:
- self.queue[group.depth].remove(group)
- except ValueError:
- pass
-
-try:
- _baseclass_reprs = (object.__repr__, types.InstanceType.__repr__)
-except AttributeError: # Python 3
- _baseclass_reprs = (object.__repr__,)
-
-
-def _default_pprint(obj, p, cycle):
- """
- The default print function. Used if an object does not provide one and
- it's none of the builtin objects.
- """
- klass = _safe_getattr(obj, '__class__', None) or type(obj)
- if _safe_getattr(klass, '__repr__', None) not in _baseclass_reprs:
- # A user-provided repr. Find newlines and replace them with p.break_()
- _repr_pprint(obj, p, cycle)
- return
- p.begin_group(1, '<')
- p.pretty(klass)
- p.text(' at 0x%x' % id(obj))
- if cycle:
- p.text(' ...')
- elif p.verbose:
- first = True
- for key in dir(obj):
- if not key.startswith('_'):
- try:
- value = getattr(obj, key)
- except AttributeError:
- continue
- if isinstance(value, types.MethodType):
- continue
- if not first:
- p.text(',')
- p.breakable()
- p.text(key)
- p.text('=')
- step = len(key) + 1
- p.indentation += step
- p.pretty(value)
- p.indentation -= step
- first = False
- p.end_group(1, '>')
-
-
+ return _default_pprint(obj, self, cycle)
+ finally:
+ self.end_group()
+ self.stack.pop()
+
+ def _in_deferred_types(self, cls):
+ """
+ Check if the given class is specified in the deferred type registry.
+
+ Returns the printer from the registry if it exists, and None if the
+ class is not in the registry. Successful matches will be moved to the
+ regular type registry for future use.
+ """
+ mod = _safe_getattr(cls, '__module__', None)
+ name = _safe_getattr(cls, '__name__', None)
+ key = (mod, name)
+ printer = None
+ if key in self.deferred_pprinters:
+ # Move the printer over to the regular registry.
+ printer = self.deferred_pprinters.pop(key)
+ self.type_pprinters[cls] = printer
+ return printer
+
+
+class Printable(object):
+
+ def output(self, stream, output_width):
+ return output_width
+
+
+class Text(Printable):
+
+ def __init__(self):
+ self.objs = []
+ self.width = 0
+
+ def output(self, stream, output_width):
+ for obj in self.objs:
+ stream.write(obj)
+ return output_width + self.width
+
+ def add(self, obj, width):
+ self.objs.append(obj)
+ self.width += width
+
+
+class Breakable(Printable):
+
+ def __init__(self, seq, width, pretty):
+ self.obj = seq
+ self.width = width
+ self.pretty = pretty
+ self.indentation = pretty.indentation
+ self.group = pretty.group_stack[-1]
+ self.group.breakables.append(self)
+
+ def output(self, stream, output_width):
+ self.group.breakables.popleft()
+ if self.group.want_break:
+ stream.write(self.pretty.newline)
+ stream.write(' ' * self.indentation)
+ return self.indentation
+ if not self.group.breakables:
+ self.pretty.group_queue.remove(self.group)
+ stream.write(self.obj)
+ return output_width + self.width
+
+
+class Group(Printable):
+
+ def __init__(self, depth):
+ self.depth = depth
+ self.breakables = deque()
+ self.want_break = False
+
+
+class GroupQueue(object):
+
+ def __init__(self, *groups):
+ self.queue = []
+ for group in groups:
+ self.enq(group)
+
+ def enq(self, group):
+ depth = group.depth
+ while depth > len(self.queue) - 1:
+ self.queue.append([])
+ self.queue[depth].append(group)
+
+ def deq(self):
+ for stack in self.queue:
+ for idx, group in enumerate(reversed(stack)):
+ if group.breakables:
+ del stack[idx]
+ group.want_break = True
+ return group
+ for group in stack:
+ group.want_break = True
+ del stack[:]
+
+ def remove(self, group):
+ try:
+ self.queue[group.depth].remove(group)
+ except ValueError:
+ pass
+
+try:
+ _baseclass_reprs = (object.__repr__, types.InstanceType.__repr__)
+except AttributeError: # Python 3
+ _baseclass_reprs = (object.__repr__,)
+
+
+def _default_pprint(obj, p, cycle):
+ """
+ The default print function. Used if an object does not provide one and
+ it's none of the builtin objects.
+ """
+ klass = _safe_getattr(obj, '__class__', None) or type(obj)
+ if _safe_getattr(klass, '__repr__', None) not in _baseclass_reprs:
+ # A user-provided repr. Find newlines and replace them with p.break_()
+ _repr_pprint(obj, p, cycle)
+ return
+ p.begin_group(1, '<')
+ p.pretty(klass)
+ p.text(' at 0x%x' % id(obj))
+ if cycle:
+ p.text(' ...')
+ elif p.verbose:
+ first = True
+ for key in dir(obj):
+ if not key.startswith('_'):
+ try:
+ value = getattr(obj, key)
+ except AttributeError:
+ continue
+ if isinstance(value, types.MethodType):
+ continue
+ if not first:
+ p.text(',')
+ p.breakable()
+ p.text(key)
+ p.text('=')
+ step = len(key) + 1
+ p.indentation += step
+ p.pretty(value)
+ p.indentation -= step
+ first = False
+ p.end_group(1, '>')
+
+
def _seq_pprinter_factory(start, end):
- """
- Factory that returns a pprint function useful for sequences. Used by
- the default pprint for tuples, dicts, and lists.
- """
- def inner(obj, p, cycle):
- if cycle:
- return p.text(start + '...' + end)
- step = len(start)
- p.begin_group(step, start)
- for idx, x in p._enumerate(obj):
- if idx:
- p.text(',')
- p.breakable()
- p.pretty(x)
- if len(obj) == 1 and type(obj) is tuple:
- # Special case for 1-item tuples.
- p.text(',')
- p.end_group(step, end)
- return inner
-
-
+ """
+ Factory that returns a pprint function useful for sequences. Used by
+ the default pprint for tuples, dicts, and lists.
+ """
+ def inner(obj, p, cycle):
+ if cycle:
+ return p.text(start + '...' + end)
+ step = len(start)
+ p.begin_group(step, start)
+ for idx, x in p._enumerate(obj):
+ if idx:
+ p.text(',')
+ p.breakable()
+ p.pretty(x)
+ if len(obj) == 1 and type(obj) is tuple:
+ # Special case for 1-item tuples.
+ p.text(',')
+ p.end_group(step, end)
+ return inner
+
+
def _set_pprinter_factory(start, end):
- """
- Factory that returns a pprint function useful for sets and frozensets.
- """
- def inner(obj, p, cycle):
- if cycle:
- return p.text(start + '...' + end)
- if len(obj) == 0:
- # Special case.
+ """
+ Factory that returns a pprint function useful for sets and frozensets.
+ """
+ def inner(obj, p, cycle):
+ if cycle:
+ return p.text(start + '...' + end)
+ if len(obj) == 0:
+ # Special case.
p.text(type(obj).__name__ + '()')
- else:
- step = len(start)
- p.begin_group(step, start)
- # Like dictionary keys, we will try to sort the items if there aren't too many
- if not (p.max_seq_length and len(obj) >= p.max_seq_length):
+ else:
+ step = len(start)
+ p.begin_group(step, start)
+ # Like dictionary keys, we will try to sort the items if there aren't too many
+ if not (p.max_seq_length and len(obj) >= p.max_seq_length):
items = _sorted_for_pprint(obj)
else:
items = obj
- for idx, x in p._enumerate(items):
- if idx:
- p.text(',')
- p.breakable()
- p.pretty(x)
- p.end_group(step, end)
- return inner
-
-
+ for idx, x in p._enumerate(items):
+ if idx:
+ p.text(',')
+ p.breakable()
+ p.pretty(x)
+ p.end_group(step, end)
+ return inner
+
+
def _dict_pprinter_factory(start, end):
- """
- Factory that returns a pprint function used by the default pprint of
- dicts and dict proxies.
- """
- def inner(obj, p, cycle):
- if cycle:
- return p.text('{...}')
+ """
+ Factory that returns a pprint function used by the default pprint of
+ dicts and dict proxies.
+ """
+ def inner(obj, p, cycle):
+ if cycle:
+ return p.text('{...}')
step = len(start)
p.begin_group(step, start)
- keys = obj.keys()
- # if dict isn't large enough to be truncated, sort keys before displaying
+ keys = obj.keys()
+ # if dict isn't large enough to be truncated, sort keys before displaying
# From Python 3.7, dicts preserve order by definition, so we don't sort.
if not DICT_IS_ORDERED \
and not (p.max_seq_length and len(obj) >= p.max_seq_length):
keys = _sorted_for_pprint(keys)
- for idx, key in p._enumerate(keys):
- if idx:
- p.text(',')
- p.breakable()
- p.pretty(key)
- p.text(': ')
- p.pretty(obj[key])
+ for idx, key in p._enumerate(keys):
+ if idx:
+ p.text(',')
+ p.breakable()
+ p.pretty(key)
+ p.text(': ')
+ p.pretty(obj[key])
p.end_group(step, end)
- return inner
-
-
-def _super_pprint(obj, p, cycle):
- """The pprint for the super type."""
- p.begin_group(8, '<super: ')
- p.pretty(obj.__thisclass__)
- p.text(',')
- p.breakable()
+ return inner
+
+
+def _super_pprint(obj, p, cycle):
+ """The pprint for the super type."""
+ p.begin_group(8, '<super: ')
+ p.pretty(obj.__thisclass__)
+ p.text(',')
+ p.breakable()
if PYPY: # In PyPy, super() objects don't have __self__ attributes
dself = obj.__repr__.__self__
p.pretty(None if dself is obj else dself)
else:
p.pretty(obj.__self__)
- p.end_group(8, '>')
-
-
-def _re_pattern_pprint(obj, p, cycle):
- """The pprint function for regular expression patterns."""
- p.text('re.compile(')
- pattern = repr(obj.pattern)
- if pattern[:1] in 'uU':
- pattern = pattern[1:]
- prefix = 'ur'
- else:
- prefix = 'r'
- pattern = prefix + pattern.replace('\\\\', '\\')
- p.text(pattern)
- if obj.flags:
- p.text(',')
- p.breakable()
- done_one = False
- for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
- 'UNICODE', 'VERBOSE', 'DEBUG'):
- if obj.flags & getattr(re, flag):
- if done_one:
- p.text('|')
- p.text('re.' + flag)
- done_one = True
- p.text(')')
-
-
-def _type_pprint(obj, p, cycle):
- """The pprint for classes and types."""
- # Heap allocated types might not have the module attribute,
- # and others may set it to None.
-
+ p.end_group(8, '>')
+
+
+def _re_pattern_pprint(obj, p, cycle):
+ """The pprint function for regular expression patterns."""
+ p.text('re.compile(')
+ pattern = repr(obj.pattern)
+ if pattern[:1] in 'uU':
+ pattern = pattern[1:]
+ prefix = 'ur'
+ else:
+ prefix = 'r'
+ pattern = prefix + pattern.replace('\\\\', '\\')
+ p.text(pattern)
+ if obj.flags:
+ p.text(',')
+ p.breakable()
+ done_one = False
+ for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
+ 'UNICODE', 'VERBOSE', 'DEBUG'):
+ if obj.flags & getattr(re, flag):
+ if done_one:
+ p.text('|')
+ p.text('re.' + flag)
+ done_one = True
+ p.text(')')
+
+
+def _type_pprint(obj, p, cycle):
+ """The pprint for classes and types."""
+ # Heap allocated types might not have the module attribute,
+ # and others may set it to None.
+
# Checks for a __repr__ override in the metaclass. Can't compare the
# type(obj).__repr__ directly because in PyPy the representation function
# inherited from type isn't the same type.__repr__
if [m for m in _get_mro(type(obj)) if "__repr__" in vars(m)][:1] != [type]:
- _repr_pprint(obj, p, cycle)
- return
-
- mod = _safe_getattr(obj, '__module__', None)
- try:
- name = obj.__qualname__
- if not isinstance(name, string_types):
- # This can happen if the type implements __qualname__ as a property
- # or other descriptor in Python 2.
- raise Exception("Try __name__")
- except Exception:
- name = obj.__name__
- if not isinstance(name, string_types):
- name = '<unknown type>'
-
- if mod in (None, '__builtin__', 'builtins', 'exceptions'):
- p.text(name)
- else:
- p.text(mod + '.' + name)
-
-
-def _repr_pprint(obj, p, cycle):
- """A pprint that just redirects to the normal repr function."""
- # Find newlines and replace them with p.break_()
- output = repr(obj)
- for idx,output_line in enumerate(output.splitlines()):
- if idx:
- p.break_()
- p.text(output_line)
-
-
-def _function_pprint(obj, p, cycle):
- """Base pprint for all functions and builtin functions."""
- name = _safe_getattr(obj, '__qualname__', obj.__name__)
- mod = obj.__module__
- if mod and mod not in ('__builtin__', 'builtins', 'exceptions'):
- name = mod + '.' + name
- p.text('<function %s>' % name)
-
-
-def _exception_pprint(obj, p, cycle):
- """Base pprint for all exceptions."""
- name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__)
- if obj.__class__.__module__ not in ('exceptions', 'builtins'):
- name = '%s.%s' % (obj.__class__.__module__, name)
- step = len(name) + 1
- p.begin_group(step, name + '(')
- for idx, arg in enumerate(getattr(obj, 'args', ())):
- if idx:
- p.text(',')
- p.breakable()
- p.pretty(arg)
- p.end_group(step, ')')
-
-
-#: the exception base
-try:
- _exception_base = BaseException
-except NameError:
- _exception_base = Exception
-
-
-#: printers for builtin types
-_type_pprinters = {
- int: _repr_pprint,
- float: _repr_pprint,
- str: _repr_pprint,
+ _repr_pprint(obj, p, cycle)
+ return
+
+ mod = _safe_getattr(obj, '__module__', None)
+ try:
+ name = obj.__qualname__
+ if not isinstance(name, string_types):
+ # This can happen if the type implements __qualname__ as a property
+ # or other descriptor in Python 2.
+ raise Exception("Try __name__")
+ except Exception:
+ name = obj.__name__
+ if not isinstance(name, string_types):
+ name = '<unknown type>'
+
+ if mod in (None, '__builtin__', 'builtins', 'exceptions'):
+ p.text(name)
+ else:
+ p.text(mod + '.' + name)
+
+
+def _repr_pprint(obj, p, cycle):
+ """A pprint that just redirects to the normal repr function."""
+ # Find newlines and replace them with p.break_()
+ output = repr(obj)
+ for idx,output_line in enumerate(output.splitlines()):
+ if idx:
+ p.break_()
+ p.text(output_line)
+
+
+def _function_pprint(obj, p, cycle):
+ """Base pprint for all functions and builtin functions."""
+ name = _safe_getattr(obj, '__qualname__', obj.__name__)
+ mod = obj.__module__
+ if mod and mod not in ('__builtin__', 'builtins', 'exceptions'):
+ name = mod + '.' + name
+ p.text('<function %s>' % name)
+
+
+def _exception_pprint(obj, p, cycle):
+ """Base pprint for all exceptions."""
+ name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__)
+ if obj.__class__.__module__ not in ('exceptions', 'builtins'):
+ name = '%s.%s' % (obj.__class__.__module__, name)
+ step = len(name) + 1
+ p.begin_group(step, name + '(')
+ for idx, arg in enumerate(getattr(obj, 'args', ())):
+ if idx:
+ p.text(',')
+ p.breakable()
+ p.pretty(arg)
+ p.end_group(step, ')')
+
+
+#: the exception base
+try:
+ _exception_base = BaseException
+except NameError:
+ _exception_base = Exception
+
+
+#: printers for builtin types
+_type_pprinters = {
+ int: _repr_pprint,
+ float: _repr_pprint,
+ str: _repr_pprint,
tuple: _seq_pprinter_factory('(', ')'),
list: _seq_pprinter_factory('[', ']'),
dict: _dict_pprinter_factory('{', '}'),
-
+
set: _set_pprinter_factory('{', '}'),
frozenset: _set_pprinter_factory('frozenset({', '})'),
- super: _super_pprint,
- _re_pattern_type: _re_pattern_pprint,
- type: _type_pprint,
- types.FunctionType: _function_pprint,
- types.BuiltinFunctionType: _function_pprint,
- types.MethodType: _repr_pprint,
-
- datetime.datetime: _repr_pprint,
- datetime.timedelta: _repr_pprint,
- _exception_base: _exception_pprint
-}
-
-try:
+ super: _super_pprint,
+ _re_pattern_type: _re_pattern_pprint,
+ type: _type_pprint,
+ types.FunctionType: _function_pprint,
+ types.BuiltinFunctionType: _function_pprint,
+ types.MethodType: _repr_pprint,
+
+ datetime.datetime: _repr_pprint,
+ datetime.timedelta: _repr_pprint,
+ _exception_base: _exception_pprint
+}
+
+try:
# In PyPy, types.DictProxyType is dict, setting the dictproxy printer
# using dict.setdefault avoids overwritting the dict printer
_type_pprinters.setdefault(types.DictProxyType,
_dict_pprinter_factory('dict_proxy({', '})'))
- _type_pprinters[types.ClassType] = _type_pprint
- _type_pprinters[types.SliceType] = _repr_pprint
-except AttributeError: # Python 3
+ _type_pprinters[types.ClassType] = _type_pprint
+ _type_pprinters[types.SliceType] = _repr_pprint
+except AttributeError: # Python 3
_type_pprinters[types.MappingProxyType] = \
_dict_pprinter_factory('mappingproxy({', '})')
- _type_pprinters[slice] = _repr_pprint
-
-try:
- _type_pprinters[xrange] = _repr_pprint
- _type_pprinters[long] = _repr_pprint
- _type_pprinters[unicode] = _repr_pprint
-except NameError:
- _type_pprinters[range] = _repr_pprint
- _type_pprinters[bytes] = _repr_pprint
-
-#: printers for types specified by name
-_deferred_type_pprinters = {
-}
-
-def for_type(typ, func):
- """
- Add a pretty printer for a given type.
- """
- oldfunc = _type_pprinters.get(typ, None)
- if func is not None:
- # To support easy restoration of old pprinters, we need to ignore Nones.
- _type_pprinters[typ] = func
- return oldfunc
-
-def for_type_by_name(type_module, type_name, func):
- """
- Add a pretty printer for a type specified by the module and name of a type
- rather than the type object itself.
- """
- key = (type_module, type_name)
- oldfunc = _deferred_type_pprinters.get(key, None)
- if func is not None:
- # To support easy restoration of old pprinters, we need to ignore Nones.
- _deferred_type_pprinters[key] = func
- return oldfunc
-
-
-#: printers for the default singletons
-_singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis,
- NotImplemented]), _repr_pprint)
-
-
-def _defaultdict_pprint(obj, p, cycle):
- name = obj.__class__.__name__
- with p.group(len(name) + 1, name + '(', ')'):
- if cycle:
- p.text('...')
- else:
- p.pretty(obj.default_factory)
- p.text(',')
- p.breakable()
- p.pretty(dict(obj))
-
-def _ordereddict_pprint(obj, p, cycle):
- name = obj.__class__.__name__
- with p.group(len(name) + 1, name + '(', ')'):
- if cycle:
- p.text('...')
- elif len(obj):
- p.pretty(list(obj.items()))
-
-def _deque_pprint(obj, p, cycle):
- name = obj.__class__.__name__
- with p.group(len(name) + 1, name + '(', ')'):
- if cycle:
- p.text('...')
- else:
- p.pretty(list(obj))
-
-
-def _counter_pprint(obj, p, cycle):
- name = obj.__class__.__name__
- with p.group(len(name) + 1, name + '(', ')'):
- if cycle:
- p.text('...')
- elif len(obj):
- p.pretty(dict(obj))
-
-for_type_by_name('collections', 'defaultdict', _defaultdict_pprint)
-for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint)
-for_type_by_name('collections', 'deque', _deque_pprint)
-for_type_by_name('collections', 'Counter', _counter_pprint)
-
-if __name__ == '__main__':
- from random import randrange
- class Foo(object):
- def __init__(self):
- self.foo = 1
- self.bar = re.compile(r'\s+')
- self.blub = dict.fromkeys(range(30), randrange(1, 40))
- self.hehe = 23424.234234
- self.list = ["blub", "blah", self]
-
- def get_foo(self):
- print("foo")
-
- pprint(Foo(), verbose=True)
+ _type_pprinters[slice] = _repr_pprint
+
+try:
+ _type_pprinters[xrange] = _repr_pprint
+ _type_pprinters[long] = _repr_pprint
+ _type_pprinters[unicode] = _repr_pprint
+except NameError:
+ _type_pprinters[range] = _repr_pprint
+ _type_pprinters[bytes] = _repr_pprint
+
+#: printers for types specified by name
+_deferred_type_pprinters = {
+}
+
+def for_type(typ, func):
+ """
+ Add a pretty printer for a given type.
+ """
+ oldfunc = _type_pprinters.get(typ, None)
+ if func is not None:
+ # To support easy restoration of old pprinters, we need to ignore Nones.
+ _type_pprinters[typ] = func
+ return oldfunc
+
+def for_type_by_name(type_module, type_name, func):
+ """
+ Add a pretty printer for a type specified by the module and name of a type
+ rather than the type object itself.
+ """
+ key = (type_module, type_name)
+ oldfunc = _deferred_type_pprinters.get(key, None)
+ if func is not None:
+ # To support easy restoration of old pprinters, we need to ignore Nones.
+ _deferred_type_pprinters[key] = func
+ return oldfunc
+
+
+#: printers for the default singletons
+_singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis,
+ NotImplemented]), _repr_pprint)
+
+
+def _defaultdict_pprint(obj, p, cycle):
+ name = obj.__class__.__name__
+ with p.group(len(name) + 1, name + '(', ')'):
+ if cycle:
+ p.text('...')
+ else:
+ p.pretty(obj.default_factory)
+ p.text(',')
+ p.breakable()
+ p.pretty(dict(obj))
+
+def _ordereddict_pprint(obj, p, cycle):
+ name = obj.__class__.__name__
+ with p.group(len(name) + 1, name + '(', ')'):
+ if cycle:
+ p.text('...')
+ elif len(obj):
+ p.pretty(list(obj.items()))
+
+def _deque_pprint(obj, p, cycle):
+ name = obj.__class__.__name__
+ with p.group(len(name) + 1, name + '(', ')'):
+ if cycle:
+ p.text('...')
+ else:
+ p.pretty(list(obj))
+
+
+def _counter_pprint(obj, p, cycle):
+ name = obj.__class__.__name__
+ with p.group(len(name) + 1, name + '(', ')'):
+ if cycle:
+ p.text('...')
+ elif len(obj):
+ p.pretty(dict(obj))
+
+for_type_by_name('collections', 'defaultdict', _defaultdict_pprint)
+for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint)
+for_type_by_name('collections', 'deque', _deque_pprint)
+for_type_by_name('collections', 'Counter', _counter_pprint)
+
+if __name__ == '__main__':
+ from random import randrange
+ class Foo(object):
+ def __init__(self):
+ self.foo = 1
+ self.bar = re.compile(r'\s+')
+ self.blub = dict.fromkeys(range(30), randrange(1, 40))
+ self.hehe = 23424.234234
+ self.list = ["blub", "blah", self]
+
+ def get_foo(self):
+ print("foo")
+
+ pprint(Foo(), verbose=True)
diff --git a/contrib/python/ipython/py2/IPython/lib/security.py b/contrib/python/ipython/py2/IPython/lib/security.py
index a20fcc132e..8429c2a4be 100644
--- a/contrib/python/ipython/py2/IPython/lib/security.py
+++ b/contrib/python/ipython/py2/IPython/lib/security.py
@@ -1,114 +1,114 @@
-"""
-Password generation for the IPython notebook.
-"""
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-# Stdlib
-import getpass
-import hashlib
-import random
-
-# Our own
-from IPython.core.error import UsageError
-from IPython.utils.py3compat import cast_bytes, str_to_bytes
-
-#-----------------------------------------------------------------------------
-# Globals
-#-----------------------------------------------------------------------------
-
-# Length of the salt in nr of hex chars, which implies salt_len * 4
-# bits of randomness.
-salt_len = 12
-
-#-----------------------------------------------------------------------------
-# Functions
-#-----------------------------------------------------------------------------
-
-def passwd(passphrase=None, algorithm='sha1'):
- """Generate hashed password and salt for use in notebook configuration.
-
- In the notebook configuration, set `c.NotebookApp.password` to
- the generated string.
-
- Parameters
- ----------
- passphrase : str
- Password to hash. If unspecified, the user is asked to input
- and verify a password.
- algorithm : str
- Hashing algorithm to use (e.g, 'sha1' or any argument supported
- by :func:`hashlib.new`).
-
- Returns
- -------
- hashed_passphrase : str
- Hashed password, in the format 'hash_algorithm:salt:passphrase_hash'.
-
- Examples
- --------
- >>> passwd('mypassword')
- 'sha1:7cf3:b7d6da294ea9592a9480c8f52e63cd42cfb9dd12'
-
- """
- if passphrase is None:
- for i in range(3):
- p0 = getpass.getpass('Enter password: ')
- p1 = getpass.getpass('Verify password: ')
- if p0 == p1:
- passphrase = p0
- break
- else:
- print('Passwords do not match.')
- else:
- raise UsageError('No matching passwords found. Giving up.')
-
- h = hashlib.new(algorithm)
- salt = ('%0' + str(salt_len) + 'x') % random.getrandbits(4 * salt_len)
- h.update(cast_bytes(passphrase, 'utf-8') + str_to_bytes(salt, 'ascii'))
-
- return ':'.join((algorithm, salt, h.hexdigest()))
-
-
-def passwd_check(hashed_passphrase, passphrase):
- """Verify that a given passphrase matches its hashed version.
-
- Parameters
- ----------
- hashed_passphrase : str
- Hashed password, in the format returned by `passwd`.
- passphrase : str
- Passphrase to validate.
-
- Returns
- -------
- valid : bool
- True if the passphrase matches the hash.
-
- Examples
- --------
- >>> from IPython.lib.security import passwd_check
- >>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a',
- ... 'mypassword')
- True
-
- >>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a',
- ... 'anotherpassword')
- False
- """
- try:
- algorithm, salt, pw_digest = hashed_passphrase.split(':', 2)
- except (ValueError, TypeError):
- return False
-
- try:
- h = hashlib.new(algorithm)
- except ValueError:
- return False
-
- if len(pw_digest) == 0:
- return False
-
- h.update(cast_bytes(passphrase, 'utf-8') + cast_bytes(salt, 'ascii'))
-
- return h.hexdigest() == pw_digest
+"""
+Password generation for the IPython notebook.
+"""
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+# Stdlib
+import getpass
+import hashlib
+import random
+
+# Our own
+from IPython.core.error import UsageError
+from IPython.utils.py3compat import cast_bytes, str_to_bytes
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+
+# Length of the salt in nr of hex chars, which implies salt_len * 4
+# bits of randomness.
+salt_len = 12
+
+#-----------------------------------------------------------------------------
+# Functions
+#-----------------------------------------------------------------------------
+
+def passwd(passphrase=None, algorithm='sha1'):
+ """Generate hashed password and salt for use in notebook configuration.
+
+ In the notebook configuration, set `c.NotebookApp.password` to
+ the generated string.
+
+ Parameters
+ ----------
+ passphrase : str
+ Password to hash. If unspecified, the user is asked to input
+ and verify a password.
+ algorithm : str
+ Hashing algorithm to use (e.g, 'sha1' or any argument supported
+ by :func:`hashlib.new`).
+
+ Returns
+ -------
+ hashed_passphrase : str
+ Hashed password, in the format 'hash_algorithm:salt:passphrase_hash'.
+
+ Examples
+ --------
+ >>> passwd('mypassword')
+ 'sha1:7cf3:b7d6da294ea9592a9480c8f52e63cd42cfb9dd12'
+
+ """
+ if passphrase is None:
+ for i in range(3):
+ p0 = getpass.getpass('Enter password: ')
+ p1 = getpass.getpass('Verify password: ')
+ if p0 == p1:
+ passphrase = p0
+ break
+ else:
+ print('Passwords do not match.')
+ else:
+ raise UsageError('No matching passwords found. Giving up.')
+
+ h = hashlib.new(algorithm)
+ salt = ('%0' + str(salt_len) + 'x') % random.getrandbits(4 * salt_len)
+ h.update(cast_bytes(passphrase, 'utf-8') + str_to_bytes(salt, 'ascii'))
+
+ return ':'.join((algorithm, salt, h.hexdigest()))
+
+
+def passwd_check(hashed_passphrase, passphrase):
+ """Verify that a given passphrase matches its hashed version.
+
+ Parameters
+ ----------
+ hashed_passphrase : str
+ Hashed password, in the format returned by `passwd`.
+ passphrase : str
+ Passphrase to validate.
+
+ Returns
+ -------
+ valid : bool
+ True if the passphrase matches the hash.
+
+ Examples
+ --------
+ >>> from IPython.lib.security import passwd_check
+ >>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a',
+ ... 'mypassword')
+ True
+
+ >>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a',
+ ... 'anotherpassword')
+ False
+ """
+ try:
+ algorithm, salt, pw_digest = hashed_passphrase.split(':', 2)
+ except (ValueError, TypeError):
+ return False
+
+ try:
+ h = hashlib.new(algorithm)
+ except ValueError:
+ return False
+
+ if len(pw_digest) == 0:
+ return False
+
+ h.update(cast_bytes(passphrase, 'utf-8') + cast_bytes(salt, 'ascii'))
+
+ return h.hexdigest() == pw_digest
diff --git a/contrib/python/ipython/py2/IPython/nbconvert.py b/contrib/python/ipython/py2/IPython/nbconvert.py
index 3d0a6c5076..2de4ee50bc 100644
--- a/contrib/python/ipython/py2/IPython/nbconvert.py
+++ b/contrib/python/ipython/py2/IPython/nbconvert.py
@@ -1,19 +1,19 @@
-"""
-Shim to maintain backwards compatibility with old IPython.nbconvert imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-from warnings import warn
-
-from IPython.utils.shimmodule import ShimModule, ShimWarning
-
+"""
+Shim to maintain backwards compatibility with old IPython.nbconvert imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from warnings import warn
+
+from IPython.utils.shimmodule import ShimModule, ShimWarning
+
warn("The `IPython.nbconvert` package has been deprecated since IPython 4.0. "
- "You should import from nbconvert instead.", ShimWarning)
-
-# Unconditionally insert the shim into sys.modules so that further import calls
-# trigger the custom attribute access above
-
-sys.modules['IPython.nbconvert'] = ShimModule(
- src='IPython.nbconvert', mirror='nbconvert')
+ "You should import from nbconvert instead.", ShimWarning)
+
+# Unconditionally insert the shim into sys.modules so that further import calls
+# trigger the custom attribute access above
+
+sys.modules['IPython.nbconvert'] = ShimModule(
+ src='IPython.nbconvert', mirror='nbconvert')
diff --git a/contrib/python/ipython/py2/IPython/nbformat.py b/contrib/python/ipython/py2/IPython/nbformat.py
index 2626d50e0c..310277de00 100644
--- a/contrib/python/ipython/py2/IPython/nbformat.py
+++ b/contrib/python/ipython/py2/IPython/nbformat.py
@@ -1,19 +1,19 @@
-"""
-Shim to maintain backwards compatibility with old IPython.nbformat imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-from warnings import warn
-
-from IPython.utils.shimmodule import ShimModule, ShimWarning
-
+"""
+Shim to maintain backwards compatibility with old IPython.nbformat imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from warnings import warn
+
+from IPython.utils.shimmodule import ShimModule, ShimWarning
+
warn("The `IPython.nbformat` package has been deprecated since IPython 4.0. "
- "You should import from nbformat instead.", ShimWarning)
-
-# Unconditionally insert the shim into sys.modules so that further import calls
-# trigger the custom attribute access above
-
-sys.modules['IPython.nbformat'] = ShimModule(
- src='IPython.nbformat', mirror='nbformat')
+ "You should import from nbformat instead.", ShimWarning)
+
+# Unconditionally insert the shim into sys.modules so that further import calls
+# trigger the custom attribute access above
+
+sys.modules['IPython.nbformat'] = ShimModule(
+ src='IPython.nbformat', mirror='nbformat')
diff --git a/contrib/python/ipython/py2/IPython/parallel.py b/contrib/python/ipython/py2/IPython/parallel.py
index ea25050476..0f10012783 100644
--- a/contrib/python/ipython/py2/IPython/parallel.py
+++ b/contrib/python/ipython/py2/IPython/parallel.py
@@ -1,20 +1,20 @@
-"""
-Shim to maintain backwards compatibility with old IPython.parallel imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-from warnings import warn
-
-from IPython.utils.shimmodule import ShimModule, ShimWarning
-
+"""
+Shim to maintain backwards compatibility with old IPython.parallel imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from warnings import warn
+
+from IPython.utils.shimmodule import ShimModule, ShimWarning
+
warn("The `IPython.parallel` package has been deprecated since IPython 4.0. "
- "You should import from ipyparallel instead.", ShimWarning)
-
-# Unconditionally insert the shim into sys.modules so that further import calls
-# trigger the custom attribute access above
-
-sys.modules['IPython.parallel'] = ShimModule(
- src='IPython.parallel', mirror='ipyparallel')
-
+ "You should import from ipyparallel instead.", ShimWarning)
+
+# Unconditionally insert the shim into sys.modules so that further import calls
+# trigger the custom attribute access above
+
+sys.modules['IPython.parallel'] = ShimModule(
+ src='IPython.parallel', mirror='ipyparallel')
+
diff --git a/contrib/python/ipython/py2/IPython/paths.py b/contrib/python/ipython/py2/IPython/paths.py
index d14f26c1fb..59787722a5 100644
--- a/contrib/python/ipython/py2/IPython/paths.py
+++ b/contrib/python/ipython/py2/IPython/paths.py
@@ -1,120 +1,120 @@
-"""Find files and directories which IPython uses.
-"""
-import os.path
-import shutil
-import tempfile
-from warnings import warn
-
-import IPython
-from IPython.utils.importstring import import_item
-from IPython.utils.path import (
- get_home_dir, get_xdg_dir, get_xdg_cache_dir, compress_user, _writable_dir,
- ensure_dir_exists, fs_encoding, filefind
-)
-from IPython.utils import py3compat
-
-def get_ipython_dir():
- """Get the IPython directory for this platform and user.
-
- This uses the logic in `get_home_dir` to find the home directory
- and then adds .ipython to the end of the path.
- """
-
- env = os.environ
- pjoin = os.path.join
-
-
- ipdir_def = '.ipython'
-
- home_dir = get_home_dir()
- xdg_dir = get_xdg_dir()
-
- # import pdb; pdb.set_trace() # dbg
- if 'IPYTHON_DIR' in env:
- warn('The environment variable IPYTHON_DIR is deprecated. '
- 'Please use IPYTHONDIR instead.')
- ipdir = env.get('IPYTHONDIR', env.get('IPYTHON_DIR', None))
- if ipdir is None:
- # not set explicitly, use ~/.ipython
- ipdir = pjoin(home_dir, ipdir_def)
- if xdg_dir:
- # Several IPython versions (up to 1.x) defaulted to .config/ipython
- # on Linux. We have decided to go back to using .ipython everywhere
- xdg_ipdir = pjoin(xdg_dir, 'ipython')
-
- if _writable_dir(xdg_ipdir):
- cu = compress_user
- if os.path.exists(ipdir):
- warn(('Ignoring {0} in favour of {1}. Remove {0} to '
- 'get rid of this message').format(cu(xdg_ipdir), cu(ipdir)))
- elif os.path.islink(xdg_ipdir):
- warn(('{0} is deprecated. Move link to {1} to '
- 'get rid of this message').format(cu(xdg_ipdir), cu(ipdir)))
- else:
- warn('Moving {0} to {1}'.format(cu(xdg_ipdir), cu(ipdir)))
- shutil.move(xdg_ipdir, ipdir)
-
- ipdir = os.path.normpath(os.path.expanduser(ipdir))
-
- if os.path.exists(ipdir) and not _writable_dir(ipdir):
- # ipdir exists, but is not writable
- warn("IPython dir '{0}' is not a writable location,"
- " using a temp directory.".format(ipdir))
- ipdir = tempfile.mkdtemp()
- elif not os.path.exists(ipdir):
- parent = os.path.dirname(ipdir)
- if not _writable_dir(parent):
- # ipdir does not exist and parent isn't writable
- warn("IPython parent '{0}' is not a writable location,"
- " using a temp directory.".format(parent))
- ipdir = tempfile.mkdtemp()
-
- return py3compat.cast_unicode(ipdir, fs_encoding)
-
-
-def get_ipython_cache_dir():
- """Get the cache directory it is created if it does not exist."""
- xdgdir = get_xdg_cache_dir()
- if xdgdir is None:
- return get_ipython_dir()
- ipdir = os.path.join(xdgdir, "ipython")
- if not os.path.exists(ipdir) and _writable_dir(xdgdir):
- ensure_dir_exists(ipdir)
- elif not _writable_dir(xdgdir):
- return get_ipython_dir()
-
- return py3compat.cast_unicode(ipdir, fs_encoding)
-
-
-def get_ipython_package_dir():
- """Get the base directory where IPython itself is installed."""
- ipdir = os.path.dirname(IPython.__file__)
- return py3compat.cast_unicode(ipdir, fs_encoding)
-
-
-def get_ipython_module_path(module_str):
- """Find the path to an IPython module in this version of IPython.
-
- This will always find the version of the module that is in this importable
- IPython package. This will always return the path to the ``.py``
- version of the module.
- """
- if module_str == 'IPython':
- return os.path.join(get_ipython_package_dir(), '__init__.py')
- mod = import_item(module_str)
- the_path = mod.__file__.replace('.pyc', '.py')
- the_path = the_path.replace('.pyo', '.py')
- return py3compat.cast_unicode(the_path, fs_encoding)
-
-def locate_profile(profile='default'):
- """Find the path to the folder associated with a given profile.
-
- I.e. find $IPYTHONDIR/profile_whatever.
- """
- from IPython.core.profiledir import ProfileDir, ProfileDirError
- try:
- pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
- except ProfileDirError:
- # IOError makes more sense when people are expecting a path
- raise IOError("Couldn't find profile %r" % profile)
- return pd.location
+"""Find files and directories which IPython uses.
+"""
+import os.path
+import shutil
+import tempfile
+from warnings import warn
+
+import IPython
+from IPython.utils.importstring import import_item
+from IPython.utils.path import (
+ get_home_dir, get_xdg_dir, get_xdg_cache_dir, compress_user, _writable_dir,
+ ensure_dir_exists, fs_encoding, filefind
+)
+from IPython.utils import py3compat
+
+def get_ipython_dir():
+ """Get the IPython directory for this platform and user.
+
+ This uses the logic in `get_home_dir` to find the home directory
+ and then adds .ipython to the end of the path.
+ """
+
+ env = os.environ
+ pjoin = os.path.join
+
+
+ ipdir_def = '.ipython'
+
+ home_dir = get_home_dir()
+ xdg_dir = get_xdg_dir()
+
+ # import pdb; pdb.set_trace() # dbg
+ if 'IPYTHON_DIR' in env:
+ warn('The environment variable IPYTHON_DIR is deprecated. '
+ 'Please use IPYTHONDIR instead.')
+ ipdir = env.get('IPYTHONDIR', env.get('IPYTHON_DIR', None))
+ if ipdir is None:
+ # not set explicitly, use ~/.ipython
+ ipdir = pjoin(home_dir, ipdir_def)
+ if xdg_dir:
+ # Several IPython versions (up to 1.x) defaulted to .config/ipython
+ # on Linux. We have decided to go back to using .ipython everywhere
+ xdg_ipdir = pjoin(xdg_dir, 'ipython')
+
+ if _writable_dir(xdg_ipdir):
+ cu = compress_user
+ if os.path.exists(ipdir):
+ warn(('Ignoring {0} in favour of {1}. Remove {0} to '
+ 'get rid of this message').format(cu(xdg_ipdir), cu(ipdir)))
+ elif os.path.islink(xdg_ipdir):
+ warn(('{0} is deprecated. Move link to {1} to '
+ 'get rid of this message').format(cu(xdg_ipdir), cu(ipdir)))
+ else:
+ warn('Moving {0} to {1}'.format(cu(xdg_ipdir), cu(ipdir)))
+ shutil.move(xdg_ipdir, ipdir)
+
+ ipdir = os.path.normpath(os.path.expanduser(ipdir))
+
+ if os.path.exists(ipdir) and not _writable_dir(ipdir):
+ # ipdir exists, but is not writable
+ warn("IPython dir '{0}' is not a writable location,"
+ " using a temp directory.".format(ipdir))
+ ipdir = tempfile.mkdtemp()
+ elif not os.path.exists(ipdir):
+ parent = os.path.dirname(ipdir)
+ if not _writable_dir(parent):
+ # ipdir does not exist and parent isn't writable
+ warn("IPython parent '{0}' is not a writable location,"
+ " using a temp directory.".format(parent))
+ ipdir = tempfile.mkdtemp()
+
+ return py3compat.cast_unicode(ipdir, fs_encoding)
+
+
+def get_ipython_cache_dir():
+ """Get the cache directory it is created if it does not exist."""
+ xdgdir = get_xdg_cache_dir()
+ if xdgdir is None:
+ return get_ipython_dir()
+ ipdir = os.path.join(xdgdir, "ipython")
+ if not os.path.exists(ipdir) and _writable_dir(xdgdir):
+ ensure_dir_exists(ipdir)
+ elif not _writable_dir(xdgdir):
+ return get_ipython_dir()
+
+ return py3compat.cast_unicode(ipdir, fs_encoding)
+
+
+def get_ipython_package_dir():
+ """Get the base directory where IPython itself is installed."""
+ ipdir = os.path.dirname(IPython.__file__)
+ return py3compat.cast_unicode(ipdir, fs_encoding)
+
+
+def get_ipython_module_path(module_str):
+ """Find the path to an IPython module in this version of IPython.
+
+ This will always find the version of the module that is in this importable
+ IPython package. This will always return the path to the ``.py``
+ version of the module.
+ """
+ if module_str == 'IPython':
+ return os.path.join(get_ipython_package_dir(), '__init__.py')
+ mod = import_item(module_str)
+ the_path = mod.__file__.replace('.pyc', '.py')
+ the_path = the_path.replace('.pyo', '.py')
+ return py3compat.cast_unicode(the_path, fs_encoding)
+
+def locate_profile(profile='default'):
+ """Find the path to the folder associated with a given profile.
+
+ I.e. find $IPYTHONDIR/profile_whatever.
+ """
+ from IPython.core.profiledir import ProfileDir, ProfileDirError
+ try:
+ pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
+ except ProfileDirError:
+ # IOError makes more sense when people are expecting a path
+ raise IOError("Couldn't find profile %r" % profile)
+ return pd.location
diff --git a/contrib/python/ipython/py2/IPython/qt.py b/contrib/python/ipython/py2/IPython/qt.py
index 9b032e271e..7557a3f329 100644
--- a/contrib/python/ipython/py2/IPython/qt.py
+++ b/contrib/python/ipython/py2/IPython/qt.py
@@ -1,24 +1,24 @@
-"""
-Shim to maintain backwards compatibility with old IPython.qt imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-from warnings import warn
-
-from IPython.utils.shimmodule import ShimModule, ShimWarning
-
+"""
+Shim to maintain backwards compatibility with old IPython.qt imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from warnings import warn
+
+from IPython.utils.shimmodule import ShimModule, ShimWarning
+
warn("The `IPython.qt` package has been deprecated since IPython 4.0. "
- "You should import from qtconsole instead.", ShimWarning)
-
-# Unconditionally insert the shim into sys.modules so that further import calls
-# trigger the custom attribute access above
-
-_console = sys.modules['IPython.qt.console'] = ShimModule(
- src='IPython.qt.console', mirror='qtconsole')
-
-_qt = ShimModule(src='IPython.qt', mirror='qtconsole')
-
-_qt.console = _console
-sys.modules['IPython.qt'] = _qt
+ "You should import from qtconsole instead.", ShimWarning)
+
+# Unconditionally insert the shim into sys.modules so that further import calls
+# trigger the custom attribute access above
+
+_console = sys.modules['IPython.qt.console'] = ShimModule(
+ src='IPython.qt.console', mirror='qtconsole')
+
+_qt = ShimModule(src='IPython.qt', mirror='qtconsole')
+
+_qt.console = _console
+sys.modules['IPython.qt'] = _qt
diff --git a/contrib/python/ipython/py2/IPython/sphinxext/custom_doctests.py b/contrib/python/ipython/py2/IPython/sphinxext/custom_doctests.py
index 65d7051f4e..7678fd6801 100644
--- a/contrib/python/ipython/py2/IPython/sphinxext/custom_doctests.py
+++ b/contrib/python/ipython/py2/IPython/sphinxext/custom_doctests.py
@@ -1,155 +1,155 @@
-"""
-Handlers for IPythonDirective's @doctest pseudo-decorator.
-
-The Sphinx extension that provides support for embedded IPython code provides
-a pseudo-decorator @doctest, which treats the input/output block as a
-doctest, raising a RuntimeError during doc generation if the actual output
-(after running the input) does not match the expected output.
-
-An example usage is:
-
-.. code-block:: rst
-
- .. ipython::
-
- In [1]: x = 1
-
- @doctest
- In [2]: x + 2
- Out[3]: 3
-
-One can also provide arguments to the decorator. The first argument should be
-the name of a custom handler. The specification of any other arguments is
-determined by the handler. For example,
-
-.. code-block:: rst
-
- .. ipython::
-
- @doctest float
- In [154]: 0.1 + 0.2
- Out[154]: 0.3
-
-allows the actual output ``0.30000000000000004`` to match the expected output
-due to a comparison with `np.allclose`.
-
-This module contains handlers for the @doctest pseudo-decorator. Handlers
-should have the following function signature::
-
- handler(sphinx_shell, args, input_lines, found, submitted)
-
-where `sphinx_shell` is the embedded Sphinx shell, `args` contains the list
-of arguments that follow: '@doctest handler_name', `input_lines` contains
-a list of the lines relevant to the current doctest, `found` is a string
-containing the output from the IPython shell, and `submitted` is a string
-containing the expected output from the IPython shell.
-
-Handlers must be registered in the `doctests` dict at the end of this module.
-
-"""
-
-def str_to_array(s):
- """
- Simplistic converter of strings from repr to float NumPy arrays.
-
- If the repr representation has ellipsis in it, then this will fail.
-
- Parameters
- ----------
- s : str
- The repr version of a NumPy array.
-
- Examples
- --------
- >>> s = "array([ 0.3, inf, nan])"
- >>> a = str_to_array(s)
-
- """
- import numpy as np
-
- # Need to make sure eval() knows about inf and nan.
- # This also assumes default printoptions for NumPy.
- from numpy import inf, nan
-
- if s.startswith(u'array'):
- # Remove array( and )
- s = s[6:-1]
-
- if s.startswith(u'['):
- a = np.array(eval(s), dtype=float)
- else:
- # Assume its a regular float. Force 1D so we can index into it.
- a = np.atleast_1d(float(s))
- return a
-
-def float_doctest(sphinx_shell, args, input_lines, found, submitted):
- """
- Doctest which allow the submitted output to vary slightly from the input.
-
- Here is how it might appear in an rst file:
-
- .. code-block:: rst
-
- .. ipython::
-
- @doctest float
- In [1]: 0.1 + 0.2
- Out[1]: 0.3
-
- """
- import numpy as np
-
- if len(args) == 2:
- rtol = 1e-05
- atol = 1e-08
- else:
- # Both must be specified if any are specified.
- try:
- rtol = float(args[2])
- atol = float(args[3])
- except IndexError:
- e = ("Both `rtol` and `atol` must be specified "
- "if either are specified: {0}".format(args))
- raise IndexError(e)
-
- try:
- submitted = str_to_array(submitted)
- found = str_to_array(found)
- except:
- # For example, if the array is huge and there are ellipsis in it.
- error = True
- else:
- found_isnan = np.isnan(found)
- submitted_isnan = np.isnan(submitted)
- error = not np.allclose(found_isnan, submitted_isnan)
- error |= not np.allclose(found[~found_isnan],
- submitted[~submitted_isnan],
- rtol=rtol, atol=atol)
-
- TAB = ' ' * 4
- directive = sphinx_shell.directive
- if directive is None:
- source = 'Unavailable'
- content = 'Unavailable'
- else:
- source = directive.state.document.current_source
- # Add tabs and make into a single string.
- content = '\n'.join([TAB + line for line in directive.content])
-
- if error:
-
- e = ('doctest float comparison failure\n\n'
- 'Document source: {0}\n\n'
- 'Raw content: \n{1}\n\n'
- 'On input line(s):\n{TAB}{2}\n\n'
- 'we found output:\n{TAB}{3}\n\n'
- 'instead of the expected:\n{TAB}{4}\n\n')
- e = e.format(source, content, '\n'.join(input_lines), repr(found),
- repr(submitted), TAB=TAB)
- raise RuntimeError(e)
-
-# dict of allowable doctest handlers. The key represents the first argument
-# that must be given to @doctest in order to activate the handler.
-doctests = {
- 'float': float_doctest,
-}
+"""
+Handlers for IPythonDirective's @doctest pseudo-decorator.
+
+The Sphinx extension that provides support for embedded IPython code provides
+a pseudo-decorator @doctest, which treats the input/output block as a
+doctest, raising a RuntimeError during doc generation if the actual output
+(after running the input) does not match the expected output.
+
+An example usage is:
+
+.. code-block:: rst
+
+ .. ipython::
+
+ In [1]: x = 1
+
+ @doctest
+ In [2]: x + 2
+ Out[3]: 3
+
+One can also provide arguments to the decorator. The first argument should be
+the name of a custom handler. The specification of any other arguments is
+determined by the handler. For example,
+
+.. code-block:: rst
+
+ .. ipython::
+
+ @doctest float
+ In [154]: 0.1 + 0.2
+ Out[154]: 0.3
+
+allows the actual output ``0.30000000000000004`` to match the expected output
+due to a comparison with `np.allclose`.
+
+This module contains handlers for the @doctest pseudo-decorator. Handlers
+should have the following function signature::
+
+ handler(sphinx_shell, args, input_lines, found, submitted)
+
+where `sphinx_shell` is the embedded Sphinx shell, `args` contains the list
+of arguments that follow: '@doctest handler_name', `input_lines` contains
+a list of the lines relevant to the current doctest, `found` is a string
+containing the output from the IPython shell, and `submitted` is a string
+containing the expected output from the IPython shell.
+
+Handlers must be registered in the `doctests` dict at the end of this module.
+
+"""
+
+def str_to_array(s):
+ """
+ Simplistic converter of strings from repr to float NumPy arrays.
+
+ If the repr representation has ellipsis in it, then this will fail.
+
+ Parameters
+ ----------
+ s : str
+ The repr version of a NumPy array.
+
+ Examples
+ --------
+ >>> s = "array([ 0.3, inf, nan])"
+ >>> a = str_to_array(s)
+
+ """
+ import numpy as np
+
+ # Need to make sure eval() knows about inf and nan.
+ # This also assumes default printoptions for NumPy.
+ from numpy import inf, nan
+
+ if s.startswith(u'array'):
+ # Remove array( and )
+ s = s[6:-1]
+
+ if s.startswith(u'['):
+ a = np.array(eval(s), dtype=float)
+ else:
+ # Assume its a regular float. Force 1D so we can index into it.
+ a = np.atleast_1d(float(s))
+ return a
+
+def float_doctest(sphinx_shell, args, input_lines, found, submitted):
+ """
+ Doctest which allow the submitted output to vary slightly from the input.
+
+ Here is how it might appear in an rst file:
+
+ .. code-block:: rst
+
+ .. ipython::
+
+ @doctest float
+ In [1]: 0.1 + 0.2
+ Out[1]: 0.3
+
+ """
+ import numpy as np
+
+ if len(args) == 2:
+ rtol = 1e-05
+ atol = 1e-08
+ else:
+ # Both must be specified if any are specified.
+ try:
+ rtol = float(args[2])
+ atol = float(args[3])
+ except IndexError:
+ e = ("Both `rtol` and `atol` must be specified "
+ "if either are specified: {0}".format(args))
+ raise IndexError(e)
+
+ try:
+ submitted = str_to_array(submitted)
+ found = str_to_array(found)
+ except:
+ # For example, if the array is huge and there are ellipsis in it.
+ error = True
+ else:
+ found_isnan = np.isnan(found)
+ submitted_isnan = np.isnan(submitted)
+ error = not np.allclose(found_isnan, submitted_isnan)
+ error |= not np.allclose(found[~found_isnan],
+ submitted[~submitted_isnan],
+ rtol=rtol, atol=atol)
+
+ TAB = ' ' * 4
+ directive = sphinx_shell.directive
+ if directive is None:
+ source = 'Unavailable'
+ content = 'Unavailable'
+ else:
+ source = directive.state.document.current_source
+ # Add tabs and make into a single string.
+ content = '\n'.join([TAB + line for line in directive.content])
+
+ if error:
+
+ e = ('doctest float comparison failure\n\n'
+ 'Document source: {0}\n\n'
+ 'Raw content: \n{1}\n\n'
+ 'On input line(s):\n{TAB}{2}\n\n'
+ 'we found output:\n{TAB}{3}\n\n'
+ 'instead of the expected:\n{TAB}{4}\n\n')
+ e = e.format(source, content, '\n'.join(input_lines), repr(found),
+ repr(submitted), TAB=TAB)
+ raise RuntimeError(e)
+
+# dict of allowable doctest handlers. The key represents the first argument
+# that must be given to @doctest in order to activate the handler.
+doctests = {
+ 'float': float_doctest,
+}
diff --git a/contrib/python/ipython/py2/IPython/sphinxext/ipython_console_highlighting.py b/contrib/python/ipython/py2/IPython/sphinxext/ipython_console_highlighting.py
index bc64087797..b93a151fb3 100644
--- a/contrib/python/ipython/py2/IPython/sphinxext/ipython_console_highlighting.py
+++ b/contrib/python/ipython/py2/IPython/sphinxext/ipython_console_highlighting.py
@@ -1,28 +1,28 @@
-"""
-reST directive for syntax-highlighting ipython interactive sessions.
-
-"""
-
-from sphinx import highlighting
-from IPython.lib.lexers import IPyLexer
-
-def setup(app):
- """Setup as a sphinx extension."""
-
- # This is only a lexer, so adding it below to pygments appears sufficient.
- # But if somebody knows what the right API usage should be to do that via
- # sphinx, by all means fix it here. At least having this setup.py
- # suppresses the sphinx warning we'd get without it.
- metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
- return metadata
-
-# Register the extension as a valid pygments lexer.
-# Alternatively, we could register the lexer with pygments instead. This would
-# require using setuptools entrypoints: http://pygments.org/docs/plugins
-
-ipy2 = IPyLexer(python3=False)
-ipy3 = IPyLexer(python3=True)
-
-highlighting.lexers['ipython'] = ipy2
-highlighting.lexers['ipython2'] = ipy2
-highlighting.lexers['ipython3'] = ipy3
+"""
+reST directive for syntax-highlighting ipython interactive sessions.
+
+"""
+
+from sphinx import highlighting
+from IPython.lib.lexers import IPyLexer
+
+def setup(app):
+ """Setup as a sphinx extension."""
+
+ # This is only a lexer, so adding it below to pygments appears sufficient.
+ # But if somebody knows what the right API usage should be to do that via
+ # sphinx, by all means fix it here. At least having this setup.py
+ # suppresses the sphinx warning we'd get without it.
+ metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
+ return metadata
+
+# Register the extension as a valid pygments lexer.
+# Alternatively, we could register the lexer with pygments instead. This would
+# require using setuptools entrypoints: http://pygments.org/docs/plugins
+
+ipy2 = IPyLexer(python3=False)
+ipy3 = IPyLexer(python3=True)
+
+highlighting.lexers['ipython'] = ipy2
+highlighting.lexers['ipython2'] = ipy2
+highlighting.lexers['ipython3'] = ipy3
diff --git a/contrib/python/ipython/py2/IPython/sphinxext/ipython_directive.py b/contrib/python/ipython/py2/IPython/sphinxext/ipython_directive.py
index a9e9b65108..8df9ace1f3 100644
--- a/contrib/python/ipython/py2/IPython/sphinxext/ipython_directive.py
+++ b/contrib/python/ipython/py2/IPython/sphinxext/ipython_directive.py
@@ -1,1178 +1,1178 @@
-# -*- coding: utf-8 -*-
-"""
-Sphinx directive to support embedded IPython code.
-
-This directive allows pasting of entire interactive IPython sessions, prompts
-and all, and their code will actually get re-executed at doc build time, with
-all prompts renumbered sequentially. It also allows you to input code as a pure
-python input by giving the argument python to the directive. The output looks
-like an interactive ipython section.
-
-To enable this directive, simply list it in your Sphinx ``conf.py`` file
-(making sure the directory where you placed it is visible to sphinx, as is
-needed for all Sphinx directives). For example, to enable syntax highlighting
-and the IPython directive::
-
- extensions = ['IPython.sphinxext.ipython_console_highlighting',
- 'IPython.sphinxext.ipython_directive']
-
-The IPython directive outputs code-blocks with the language 'ipython'. So
-if you do not have the syntax highlighting extension enabled as well, then
-all rendered code-blocks will be uncolored. By default this directive assumes
-that your prompts are unchanged IPython ones, but this can be customized.
-The configurable options that can be placed in conf.py are:
-
-ipython_savefig_dir:
- The directory in which to save the figures. This is relative to the
- Sphinx source directory. The default is `html_static_path`.
-ipython_rgxin:
- The compiled regular expression to denote the start of IPython input
- lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
- shouldn't need to change this.
-ipython_rgxout:
- The compiled regular expression to denote the start of IPython output
- lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
- shouldn't need to change this.
-ipython_promptin:
- The string to represent the IPython input prompt in the generated ReST.
- The default is 'In [%d]:'. This expects that the line numbers are used
- in the prompt.
-ipython_promptout:
- The string to represent the IPython prompt in the generated ReST. The
- default is 'Out [%d]:'. This expects that the line numbers are used
- in the prompt.
-ipython_mplbackend:
- The string which specifies if the embedded Sphinx shell should import
- Matplotlib and set the backend. The value specifies a backend that is
- passed to `matplotlib.use()` before any lines in `ipython_execlines` are
- executed. If not specified in conf.py, then the default value of 'agg' is
- used. To use the IPython directive without matplotlib as a dependency, set
- the value to `None`. It may end up that matplotlib is still imported
- if the user specifies so in `ipython_execlines` or makes use of the
- @savefig pseudo decorator.
-ipython_execlines:
- A list of strings to be exec'd in the embedded Sphinx shell. Typical
- usage is to make certain packages always available. Set this to an empty
- list if you wish to have no imports always available. If specified in
- conf.py as `None`, then it has the effect of making no imports available.
- If omitted from conf.py altogether, then the default value of
- ['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
-ipython_holdcount
- When the @suppress pseudo-decorator is used, the execution count can be
- incremented or not. The default behavior is to hold the execution count,
- corresponding to a value of `True`. Set this to `False` to increment
- the execution count after each suppressed command.
-
-As an example, to use the IPython directive when `matplotlib` is not available,
-one sets the backend to `None`::
-
- ipython_mplbackend = None
-
-An example usage of the directive is:
-
-.. code-block:: rst
-
- .. ipython::
-
- In [1]: x = 1
-
- In [2]: y = x**2
-
- In [3]: print(y)
-
-See http://matplotlib.org/sampledoc/ipython_directive.html for additional
-documentation.
-
-Pseudo-Decorators
-=================
-
-Note: Only one decorator is supported per input. If more than one decorator
-is specified, then only the last one is used.
-
-In addition to the Pseudo-Decorators/options described at the above link,
-several enhancements have been made. The directive will emit a message to the
-console at build-time if code-execution resulted in an exception or warning.
-You can suppress these on a per-block basis by specifying the :okexcept:
-or :okwarning: options:
-
-.. code-block:: rst
-
- .. ipython::
- :okexcept:
- :okwarning:
-
- In [1]: 1/0
- In [2]: # raise warning.
-
+# -*- coding: utf-8 -*-
+"""
+Sphinx directive to support embedded IPython code.
+
+This directive allows pasting of entire interactive IPython sessions, prompts
+and all, and their code will actually get re-executed at doc build time, with
+all prompts renumbered sequentially. It also allows you to input code as a pure
+python input by giving the argument python to the directive. The output looks
+like an interactive ipython section.
+
+To enable this directive, simply list it in your Sphinx ``conf.py`` file
+(making sure the directory where you placed it is visible to sphinx, as is
+needed for all Sphinx directives). For example, to enable syntax highlighting
+and the IPython directive::
+
+ extensions = ['IPython.sphinxext.ipython_console_highlighting',
+ 'IPython.sphinxext.ipython_directive']
+
+The IPython directive outputs code-blocks with the language 'ipython'. So
+if you do not have the syntax highlighting extension enabled as well, then
+all rendered code-blocks will be uncolored. By default this directive assumes
+that your prompts are unchanged IPython ones, but this can be customized.
+The configurable options that can be placed in conf.py are:
+
+ipython_savefig_dir:
+ The directory in which to save the figures. This is relative to the
+ Sphinx source directory. The default is `html_static_path`.
+ipython_rgxin:
+ The compiled regular expression to denote the start of IPython input
+ lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
+ shouldn't need to change this.
+ipython_rgxout:
+ The compiled regular expression to denote the start of IPython output
+ lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
+ shouldn't need to change this.
+ipython_promptin:
+ The string to represent the IPython input prompt in the generated ReST.
+ The default is 'In [%d]:'. This expects that the line numbers are used
+ in the prompt.
+ipython_promptout:
+ The string to represent the IPython prompt in the generated ReST. The
+ default is 'Out [%d]:'. This expects that the line numbers are used
+ in the prompt.
+ipython_mplbackend:
+ The string which specifies if the embedded Sphinx shell should import
+ Matplotlib and set the backend. The value specifies a backend that is
+ passed to `matplotlib.use()` before any lines in `ipython_execlines` are
+ executed. If not specified in conf.py, then the default value of 'agg' is
+ used. To use the IPython directive without matplotlib as a dependency, set
+ the value to `None`. It may end up that matplotlib is still imported
+ if the user specifies so in `ipython_execlines` or makes use of the
+ @savefig pseudo decorator.
+ipython_execlines:
+ A list of strings to be exec'd in the embedded Sphinx shell. Typical
+ usage is to make certain packages always available. Set this to an empty
+ list if you wish to have no imports always available. If specified in
+ conf.py as `None`, then it has the effect of making no imports available.
+ If omitted from conf.py altogether, then the default value of
+ ['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
+ipython_holdcount
+ When the @suppress pseudo-decorator is used, the execution count can be
+ incremented or not. The default behavior is to hold the execution count,
+ corresponding to a value of `True`. Set this to `False` to increment
+ the execution count after each suppressed command.
+
+As an example, to use the IPython directive when `matplotlib` is not available,
+one sets the backend to `None`::
+
+ ipython_mplbackend = None
+
+An example usage of the directive is:
+
+.. code-block:: rst
+
+ .. ipython::
+
+ In [1]: x = 1
+
+ In [2]: y = x**2
+
+ In [3]: print(y)
+
+See http://matplotlib.org/sampledoc/ipython_directive.html for additional
+documentation.
+
+Pseudo-Decorators
+=================
+
+Note: Only one decorator is supported per input. If more than one decorator
+is specified, then only the last one is used.
+
+In addition to the Pseudo-Decorators/options described at the above link,
+several enhancements have been made. The directive will emit a message to the
+console at build-time if code-execution resulted in an exception or warning.
+You can suppress these on a per-block basis by specifying the :okexcept:
+or :okwarning: options:
+
+.. code-block:: rst
+
+ .. ipython::
+ :okexcept:
+ :okwarning:
+
+ In [1]: 1/0
+ In [2]: # raise warning.
+
To Do
-----
-
-- Turn the ad-hoc test() function into a real test suite.
-- Break up ipython-specific functionality from matplotlib stuff into better
- separated code.
-
-Authors
--------
-
-- John D Hunter: orignal author.
-- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
-- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
-- Skipper Seabold, refactoring, cleanups, pure python addition
-"""
-from __future__ import print_function
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Stdlib
-import atexit
+
+- Turn the ad-hoc test() function into a real test suite.
+- Break up ipython-specific functionality from matplotlib stuff into better
+ separated code.
+
+Authors
+-------
+
+- John D Hunter: orignal author.
+- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
+- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
+- Skipper Seabold, refactoring, cleanups, pure python addition
+"""
+from __future__ import print_function
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import atexit
import errno
-import os
-import re
-import sys
-import tempfile
-import ast
-import warnings
-import shutil
-
-
-# Third-party
-from docutils.parsers.rst import directives
+import os
+import re
+import sys
+import tempfile
+import ast
+import warnings
+import shutil
+
+
+# Third-party
+from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive
-
-# Our own
-from traitlets.config import Config
-from IPython import InteractiveShell
-from IPython.core.profiledir import ProfileDir
-from IPython.utils import io
-from IPython.utils.py3compat import PY3
-
-if PY3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
-#-----------------------------------------------------------------------------
-# Globals
-#-----------------------------------------------------------------------------
-# for tokenizing blocks
-COMMENT, INPUT, OUTPUT = range(3)
-
-#-----------------------------------------------------------------------------
-# Functions and class declarations
-#-----------------------------------------------------------------------------
-
-def block_parser(part, rgxin, rgxout, fmtin, fmtout):
- """
- part is a string of ipython text, comprised of at most one
- input, one output, comments, and blank lines. The block parser
- parses the text into a list of::
-
- blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
-
- where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
- data is, depending on the type of token::
-
- COMMENT : the comment string
-
- INPUT: the (DECORATOR, INPUT_LINE, REST) where
- DECORATOR: the input decorator (or None)
- INPUT_LINE: the input as string (possibly multi-line)
- REST : any stdout generated by the input line (not OUTPUT)
-
- OUTPUT: the output string, possibly multi-line
-
- """
- block = []
- lines = part.split('\n')
- N = len(lines)
- i = 0
- decorator = None
- while 1:
-
- if i==N:
- # nothing left to parse -- the last line
- break
-
- line = lines[i]
- i += 1
- line_stripped = line.strip()
- if line_stripped.startswith('#'):
- block.append((COMMENT, line))
- continue
-
- if line_stripped.startswith('@'):
- # Here is where we assume there is, at most, one decorator.
- # Might need to rethink this.
- decorator = line_stripped
- continue
-
- # does this look like an input line?
- matchin = rgxin.match(line)
- if matchin:
- lineno, inputline = int(matchin.group(1)), matchin.group(2)
-
- # the ....: continuation string
- continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
- Nc = len(continuation)
- # input lines can continue on for more than one line, if
- # we have a '\' line continuation char or a function call
- # echo line 'print'. The input line can only be
- # terminated by the end of the block or an output line, so
- # we parse out the rest of the input line if it is
- # multiline as well as any echo text
-
- rest = []
- while i<N:
-
- # look ahead; if the next line is blank, or a comment, or
- # an output line, we're done
-
- nextline = lines[i]
- matchout = rgxout.match(nextline)
- #print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
- if matchout or nextline.startswith('#'):
- break
- elif nextline.startswith(continuation):
- # The default ipython_rgx* treat the space following the colon as optional.
- # However, If the space is there we must consume it or code
- # employing the cython_magic extension will fail to execute.
- #
- # This works with the default ipython_rgx* patterns,
- # If you modify them, YMMV.
- nextline = nextline[Nc:]
- if nextline and nextline[0] == ' ':
- nextline = nextline[1:]
-
- inputline += '\n' + nextline
- else:
- rest.append(nextline)
- i+= 1
-
- block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
- continue
-
- # if it looks like an output line grab all the text to the end
- # of the block
- matchout = rgxout.match(line)
- if matchout:
- lineno, output = int(matchout.group(1)), matchout.group(2)
- if i<N-1:
- output = '\n'.join([output] + lines[i:])
-
- block.append((OUTPUT, output))
- break
-
- return block
-
-
-class EmbeddedSphinxShell(object):
- """An embedded IPython instance to run inside Sphinx"""
-
- def __init__(self, exec_lines=None):
-
- self.cout = StringIO()
-
- if exec_lines is None:
- exec_lines = []
-
- # Create config object for IPython
- config = Config()
- config.HistoryManager.hist_file = ':memory:'
- config.InteractiveShell.autocall = False
- config.InteractiveShell.autoindent = False
- config.InteractiveShell.colors = 'NoColor'
-
- # create a profile so instance history isn't saved
- tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
- profname = 'auto_profile_sphinx_build'
- pdir = os.path.join(tmp_profile_dir,profname)
- profile = ProfileDir.create_profile_dir(pdir)
-
- # Create and initialize global ipython, but don't start its mainloop.
- # This will persist across different EmbededSphinxShell instances.
- IP = InteractiveShell.instance(config=config, profile_dir=profile)
- atexit.register(self.cleanup)
-
- # Store a few parts of IPython we'll need.
- self.IP = IP
- self.user_ns = self.IP.user_ns
- self.user_global_ns = self.IP.user_global_ns
-
- self.input = ''
- self.output = ''
- self.tmp_profile_dir = tmp_profile_dir
-
- self.is_verbatim = False
- self.is_doctest = False
- self.is_suppress = False
-
- # Optionally, provide more detailed information to shell.
- # this is assigned by the SetUp method of IPythonDirective
- # to point at itself.
- #
- # So, you can access handy things at self.directive.state
- self.directive = None
-
- # on the first call to the savefig decorator, we'll import
- # pyplot as plt so we can make a call to the plt.gcf().savefig
- self._pyplot_imported = False
-
- # Prepopulate the namespace.
- for line in exec_lines:
- self.process_input_line(line, store_history=False)
-
- def cleanup(self):
- shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)
-
- def clear_cout(self):
- self.cout.seek(0)
- self.cout.truncate(0)
-
- def process_input_line(self, line, store_history=True):
- """process the input, capturing stdout"""
-
- stdout = sys.stdout
- splitter = self.IP.input_splitter
- try:
- sys.stdout = self.cout
- splitter.push(line)
- more = splitter.push_accepts_more()
- if not more:
- source_raw = splitter.raw_reset()
- self.IP.run_cell(source_raw, store_history=store_history)
- finally:
- sys.stdout = stdout
-
- def process_image(self, decorator):
- """
- # build out an image directive like
- # .. image:: somefile.png
- # :width 4in
- #
- # from an input like
- # savefig somefile.png width=4in
- """
- savefig_dir = self.savefig_dir
- source_dir = self.source_dir
- saveargs = decorator.split(' ')
- filename = saveargs[1]
+
+# Our own
+from traitlets.config import Config
+from IPython import InteractiveShell
+from IPython.core.profiledir import ProfileDir
+from IPython.utils import io
+from IPython.utils.py3compat import PY3
+
+if PY3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+#-----------------------------------------------------------------------------
+# Globals
+#-----------------------------------------------------------------------------
+# for tokenizing blocks
+COMMENT, INPUT, OUTPUT = range(3)
+
+#-----------------------------------------------------------------------------
+# Functions and class declarations
+#-----------------------------------------------------------------------------
+
+def block_parser(part, rgxin, rgxout, fmtin, fmtout):
+ """
+ part is a string of ipython text, comprised of at most one
+ input, one output, comments, and blank lines. The block parser
+ parses the text into a list of::
+
+ blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
+
+ where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
+ data is, depending on the type of token::
+
+ COMMENT : the comment string
+
+ INPUT: the (DECORATOR, INPUT_LINE, REST) where
+ DECORATOR: the input decorator (or None)
+ INPUT_LINE: the input as string (possibly multi-line)
+ REST : any stdout generated by the input line (not OUTPUT)
+
+ OUTPUT: the output string, possibly multi-line
+
+ """
+ block = []
+ lines = part.split('\n')
+ N = len(lines)
+ i = 0
+ decorator = None
+ while 1:
+
+ if i==N:
+ # nothing left to parse -- the last line
+ break
+
+ line = lines[i]
+ i += 1
+ line_stripped = line.strip()
+ if line_stripped.startswith('#'):
+ block.append((COMMENT, line))
+ continue
+
+ if line_stripped.startswith('@'):
+ # Here is where we assume there is, at most, one decorator.
+ # Might need to rethink this.
+ decorator = line_stripped
+ continue
+
+ # does this look like an input line?
+ matchin = rgxin.match(line)
+ if matchin:
+ lineno, inputline = int(matchin.group(1)), matchin.group(2)
+
+ # the ....: continuation string
+ continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
+ Nc = len(continuation)
+ # input lines can continue on for more than one line, if
+ # we have a '\' line continuation char or a function call
+ # echo line 'print'. The input line can only be
+ # terminated by the end of the block or an output line, so
+ # we parse out the rest of the input line if it is
+ # multiline as well as any echo text
+
+ rest = []
+ while i<N:
+
+ # look ahead; if the next line is blank, or a comment, or
+ # an output line, we're done
+
+ nextline = lines[i]
+ matchout = rgxout.match(nextline)
+ #print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
+ if matchout or nextline.startswith('#'):
+ break
+ elif nextline.startswith(continuation):
+ # The default ipython_rgx* treat the space following the colon as optional.
+ # However, If the space is there we must consume it or code
+ # employing the cython_magic extension will fail to execute.
+ #
+ # This works with the default ipython_rgx* patterns,
+ # If you modify them, YMMV.
+ nextline = nextline[Nc:]
+ if nextline and nextline[0] == ' ':
+ nextline = nextline[1:]
+
+ inputline += '\n' + nextline
+ else:
+ rest.append(nextline)
+ i+= 1
+
+ block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
+ continue
+
+ # if it looks like an output line grab all the text to the end
+ # of the block
+ matchout = rgxout.match(line)
+ if matchout:
+ lineno, output = int(matchout.group(1)), matchout.group(2)
+ if i<N-1:
+ output = '\n'.join([output] + lines[i:])
+
+ block.append((OUTPUT, output))
+ break
+
+ return block
+
+
+class EmbeddedSphinxShell(object):
+ """An embedded IPython instance to run inside Sphinx"""
+
+ def __init__(self, exec_lines=None):
+
+ self.cout = StringIO()
+
+ if exec_lines is None:
+ exec_lines = []
+
+ # Create config object for IPython
+ config = Config()
+ config.HistoryManager.hist_file = ':memory:'
+ config.InteractiveShell.autocall = False
+ config.InteractiveShell.autoindent = False
+ config.InteractiveShell.colors = 'NoColor'
+
+ # create a profile so instance history isn't saved
+ tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
+ profname = 'auto_profile_sphinx_build'
+ pdir = os.path.join(tmp_profile_dir,profname)
+ profile = ProfileDir.create_profile_dir(pdir)
+
+ # Create and initialize global ipython, but don't start its mainloop.
+ # This will persist across different EmbededSphinxShell instances.
+ IP = InteractiveShell.instance(config=config, profile_dir=profile)
+ atexit.register(self.cleanup)
+
+ # Store a few parts of IPython we'll need.
+ self.IP = IP
+ self.user_ns = self.IP.user_ns
+ self.user_global_ns = self.IP.user_global_ns
+
+ self.input = ''
+ self.output = ''
+ self.tmp_profile_dir = tmp_profile_dir
+
+ self.is_verbatim = False
+ self.is_doctest = False
+ self.is_suppress = False
+
+ # Optionally, provide more detailed information to shell.
+ # this is assigned by the SetUp method of IPythonDirective
+ # to point at itself.
+ #
+ # So, you can access handy things at self.directive.state
+ self.directive = None
+
+ # on the first call to the savefig decorator, we'll import
+ # pyplot as plt so we can make a call to the plt.gcf().savefig
+ self._pyplot_imported = False
+
+ # Prepopulate the namespace.
+ for line in exec_lines:
+ self.process_input_line(line, store_history=False)
+
+ def cleanup(self):
+ shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)
+
+ def clear_cout(self):
+ self.cout.seek(0)
+ self.cout.truncate(0)
+
+ def process_input_line(self, line, store_history=True):
+ """process the input, capturing stdout"""
+
+ stdout = sys.stdout
+ splitter = self.IP.input_splitter
+ try:
+ sys.stdout = self.cout
+ splitter.push(line)
+ more = splitter.push_accepts_more()
+ if not more:
+ source_raw = splitter.raw_reset()
+ self.IP.run_cell(source_raw, store_history=store_history)
+ finally:
+ sys.stdout = stdout
+
+ def process_image(self, decorator):
+ """
+ # build out an image directive like
+ # .. image:: somefile.png
+ # :width 4in
+ #
+ # from an input like
+ # savefig somefile.png width=4in
+ """
+ savefig_dir = self.savefig_dir
+ source_dir = self.source_dir
+ saveargs = decorator.split(' ')
+ filename = saveargs[1]
# insert relative path to image file in source (as absolute path for Sphinx)
outfile = '/' + os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
-
- imagerows = ['.. image:: %s'%outfile]
-
- for kwarg in saveargs[2:]:
- arg, val = kwarg.split('=')
- arg = arg.strip()
- val = val.strip()
- imagerows.append(' :%s: %s'%(arg, val))
-
- image_file = os.path.basename(outfile) # only return file name
- image_directive = '\n'.join(imagerows)
- return image_file, image_directive
-
- # Callbacks for each type of token
- def process_input(self, data, input_prompt, lineno):
- """
- Process data block for INPUT token.
-
- """
- decorator, input, rest = data
- image_file = None
- image_directive = None
-
- is_verbatim = decorator=='@verbatim' or self.is_verbatim
- is_doctest = (decorator is not None and \
- decorator.startswith('@doctest')) or self.is_doctest
- is_suppress = decorator=='@suppress' or self.is_suppress
- is_okexcept = decorator=='@okexcept' or self.is_okexcept
- is_okwarning = decorator=='@okwarning' or self.is_okwarning
- is_savefig = decorator is not None and \
- decorator.startswith('@savefig')
-
- input_lines = input.split('\n')
- if len(input_lines) > 1:
- if input_lines[-1] != "":
- input_lines.append('') # make sure there's a blank line
- # so splitter buffer gets reset
-
- continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
-
- if is_savefig:
- image_file, image_directive = self.process_image(decorator)
-
- ret = []
- is_semicolon = False
-
- # Hold the execution count, if requested to do so.
- if is_suppress and self.hold_count:
- store_history = False
- else:
- store_history = True
-
- # Note: catch_warnings is not thread safe
- with warnings.catch_warnings(record=True) as ws:
- for i, line in enumerate(input_lines):
- if line.endswith(';'):
- is_semicolon = True
-
- if i == 0:
- # process the first input line
- if is_verbatim:
- self.process_input_line('')
- self.IP.execution_count += 1 # increment it anyway
- else:
- # only submit the line in non-verbatim mode
- self.process_input_line(line, store_history=store_history)
- formatted_line = '%s %s'%(input_prompt, line)
- else:
- # process a continuation line
- if not is_verbatim:
- self.process_input_line(line, store_history=store_history)
-
- formatted_line = '%s %s'%(continuation, line)
-
- if not is_suppress:
- ret.append(formatted_line)
-
- if not is_suppress and len(rest.strip()) and is_verbatim:
- # The "rest" is the standard output of the input. This needs to be
- # added when in verbatim mode. If there is no "rest", then we don't
- # add it, as the new line will be added by the processed output.
- ret.append(rest)
-
- # Fetch the processed output. (This is not the submitted output.)
- self.cout.seek(0)
- processed_output = self.cout.read()
- if not is_suppress and not is_semicolon:
- #
- # In IPythonDirective.run, the elements of `ret` are eventually
- # combined such that '' entries correspond to newlines. So if
- # `processed_output` is equal to '', then the adding it to `ret`
- # ensures that there is a blank line between consecutive inputs
- # that have no outputs, as in:
- #
- # In [1]: x = 4
- #
- # In [2]: x = 5
- #
- # When there is processed output, it has a '\n' at the tail end. So
- # adding the output to `ret` will provide the necessary spacing
- # between consecutive input/output blocks, as in:
- #
- # In [1]: x
- # Out[1]: 5
- #
- # In [2]: x
- # Out[2]: 5
- #
- # When there is stdout from the input, it also has a '\n' at the
- # tail end, and so this ensures proper spacing as well. E.g.:
- #
- # In [1]: print x
- # 5
- #
- # In [2]: x = 5
- #
- # When in verbatim mode, `processed_output` is empty (because
- # nothing was passed to IP. Sometimes the submitted code block has
- # an Out[] portion and sometimes it does not. When it does not, we
- # need to ensure proper spacing, so we have to add '' to `ret`.
- # However, if there is an Out[] in the submitted code, then we do
- # not want to add a newline as `process_output` has stuff to add.
- # The difficulty is that `process_input` doesn't know if
- # `process_output` will be called---so it doesn't know if there is
- # Out[] in the code block. The requires that we include a hack in
- # `process_block`. See the comments there.
- #
- ret.append(processed_output)
- elif is_semicolon:
- # Make sure there is a newline after the semicolon.
- ret.append('')
-
- # context information
- filename = "Unknown"
- lineno = 0
- if self.directive.state:
- filename = self.directive.state.document.current_source
- lineno = self.directive.state.document.current_line
-
- # output any exceptions raised during execution to stdout
- # unless :okexcept: has been specified.
- if not is_okexcept and "Traceback" in processed_output:
- s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
- s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
- sys.stdout.write('\n\n>>>' + ('-' * 73))
- sys.stdout.write(s)
- sys.stdout.write(processed_output)
- sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
-
- # output any warning raised during execution to stdout
- # unless :okwarning: has been specified.
- if not is_okwarning:
- for w in ws:
- s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
- s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
- sys.stdout.write('\n\n>>>' + ('-' * 73))
- sys.stdout.write(s)
- sys.stdout.write(('-' * 76) + '\n')
- s=warnings.formatwarning(w.message, w.category,
- w.filename, w.lineno, w.line)
- sys.stdout.write(s)
- sys.stdout.write('<<<' + ('-' * 73) + '\n')
-
- self.cout.truncate(0)
-
- return (ret, input_lines, processed_output,
- is_doctest, decorator, image_file, image_directive)
-
-
- def process_output(self, data, output_prompt, input_lines, output,
- is_doctest, decorator, image_file):
- """
- Process data block for OUTPUT token.
-
- """
- # Recall: `data` is the submitted output, and `output` is the processed
- # output from `input_lines`.
-
- TAB = ' ' * 4
-
- if is_doctest and output is not None:
-
- found = output # This is the processed output
- found = found.strip()
- submitted = data.strip()
-
- if self.directive is None:
- source = 'Unavailable'
- content = 'Unavailable'
- else:
- source = self.directive.state.document.current_source
- content = self.directive.content
- # Add tabs and join into a single string.
- content = '\n'.join([TAB + line for line in content])
-
- # Make sure the output contains the output prompt.
- ind = found.find(output_prompt)
- if ind < 0:
- e = ('output does not contain output prompt\n\n'
- 'Document source: {0}\n\n'
- 'Raw content: \n{1}\n\n'
- 'Input line(s):\n{TAB}{2}\n\n'
- 'Output line(s):\n{TAB}{3}\n\n')
- e = e.format(source, content, '\n'.join(input_lines),
- repr(found), TAB=TAB)
- raise RuntimeError(e)
- found = found[len(output_prompt):].strip()
-
- # Handle the actual doctest comparison.
- if decorator.strip() == '@doctest':
- # Standard doctest
- if found != submitted:
- e = ('doctest failure\n\n'
- 'Document source: {0}\n\n'
- 'Raw content: \n{1}\n\n'
- 'On input line(s):\n{TAB}{2}\n\n'
- 'we found output:\n{TAB}{3}\n\n'
- 'instead of the expected:\n{TAB}{4}\n\n')
- e = e.format(source, content, '\n'.join(input_lines),
- repr(found), repr(submitted), TAB=TAB)
- raise RuntimeError(e)
- else:
- self.custom_doctest(decorator, input_lines, found, submitted)
-
- # When in verbatim mode, this holds additional submitted output
- # to be written in the final Sphinx output.
- # https://github.com/ipython/ipython/issues/5776
- out_data = []
-
- is_verbatim = decorator=='@verbatim' or self.is_verbatim
- if is_verbatim and data.strip():
- # Note that `ret` in `process_block` has '' as its last element if
- # the code block was in verbatim mode. So if there is no submitted
- # output, then we will have proper spacing only if we do not add
- # an additional '' to `out_data`. This is why we condition on
- # `and data.strip()`.
-
- # The submitted output has no output prompt. If we want the
- # prompt and the code to appear, we need to join them now
- # instead of adding them separately---as this would create an
- # undesired newline. How we do this ultimately depends on the
- # format of the output regex. I'll do what works for the default
- # prompt for now, and we might have to adjust if it doesn't work
- # in other cases. Finally, the submitted output does not have
- # a trailing newline, so we must add it manually.
- out_data.append("{0} {1}\n".format(output_prompt, data))
-
- return out_data
-
- def process_comment(self, data):
- """Process data fPblock for COMMENT token."""
- if not self.is_suppress:
- return [data]
-
- def save_image(self, image_file):
- """
- Saves the image file to disk.
- """
- self.ensure_pyplot()
- command = 'plt.gcf().savefig("%s")'%image_file
- #print 'SAVEFIG', command # dbg
- self.process_input_line('bookmark ipy_thisdir', store_history=False)
- self.process_input_line('cd -b ipy_savedir', store_history=False)
- self.process_input_line(command, store_history=False)
- self.process_input_line('cd -b ipy_thisdir', store_history=False)
- self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
- self.clear_cout()
-
- def process_block(self, block):
- """
- process block from the block_parser and return a list of processed lines
- """
- ret = []
- output = None
- input_lines = None
- lineno = self.IP.execution_count
-
- input_prompt = self.promptin % lineno
- output_prompt = self.promptout % lineno
- image_file = None
- image_directive = None
-
- found_input = False
- for token, data in block:
- if token == COMMENT:
- out_data = self.process_comment(data)
- elif token == INPUT:
- found_input = True
- (out_data, input_lines, output, is_doctest,
- decorator, image_file, image_directive) = \
- self.process_input(data, input_prompt, lineno)
- elif token == OUTPUT:
- if not found_input:
-
- TAB = ' ' * 4
- linenumber = 0
- source = 'Unavailable'
- content = 'Unavailable'
- if self.directive:
- linenumber = self.directive.state.document.current_line
- source = self.directive.state.document.current_source
- content = self.directive.content
- # Add tabs and join into a single string.
- content = '\n'.join([TAB + line for line in content])
-
- e = ('\n\nInvalid block: Block contains an output prompt '
- 'without an input prompt.\n\n'
- 'Document source: {0}\n\n'
- 'Content begins at line {1}: \n\n{2}\n\n'
- 'Problematic block within content: \n\n{TAB}{3}\n\n')
- e = e.format(source, linenumber, content, block, TAB=TAB)
-
- # Write, rather than include in exception, since Sphinx
- # will truncate tracebacks.
- sys.stdout.write(e)
- raise RuntimeError('An invalid block was detected.')
-
- out_data = \
- self.process_output(data, output_prompt, input_lines,
- output, is_doctest, decorator,
- image_file)
- if out_data:
- # Then there was user submitted output in verbatim mode.
- # We need to remove the last element of `ret` that was
- # added in `process_input`, as it is '' and would introduce
- # an undesirable newline.
- assert(ret[-1] == '')
- del ret[-1]
-
- if out_data:
- ret.extend(out_data)
-
- # save the image files
- if image_file is not None:
- self.save_image(image_file)
-
- return ret, image_directive
-
- def ensure_pyplot(self):
- """
- Ensures that pyplot has been imported into the embedded IPython shell.
-
- Also, makes sure to set the backend appropriately if not set already.
-
- """
- # We are here if the @figure pseudo decorator was used. Thus, it's
- # possible that we could be here even if python_mplbackend were set to
- # `None`. That's also strange and perhaps worthy of raising an
- # exception, but for now, we just set the backend to 'agg'.
-
- if not self._pyplot_imported:
- if 'matplotlib.backends' not in sys.modules:
- # Then ipython_matplotlib was set to None but there was a
- # call to the @figure decorator (and ipython_execlines did
- # not set a backend).
- #raise Exception("No backend was set, but @figure was used!")
- import matplotlib
- matplotlib.use('agg')
-
- # Always import pyplot into embedded shell.
- self.process_input_line('import matplotlib.pyplot as plt',
- store_history=False)
- self._pyplot_imported = True
-
- def process_pure_python(self, content):
- """
- content is a list of strings. it is unedited directive content
-
- This runs it line by line in the InteractiveShell, prepends
- prompts as needed capturing stderr and stdout, then returns
- the content as a list as if it were ipython code
- """
- output = []
- savefig = False # keep up with this to clear figure
- multiline = False # to handle line continuation
- multiline_start = None
- fmtin = self.promptin
-
- ct = 0
-
- for lineno, line in enumerate(content):
-
- line_stripped = line.strip()
- if not len(line):
- output.append(line)
- continue
-
- # handle decorators
- if line_stripped.startswith('@'):
- output.extend([line])
- if 'savefig' in line:
- savefig = True # and need to clear figure
- continue
-
- # handle comments
- if line_stripped.startswith('#'):
- output.extend([line])
- continue
-
- # deal with lines checking for multiline
- continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
- if not multiline:
- modified = u"%s %s" % (fmtin % ct, line_stripped)
- output.append(modified)
- ct += 1
- try:
- ast.parse(line_stripped)
- output.append(u'')
- except Exception: # on a multiline
- multiline = True
- multiline_start = lineno
- else: # still on a multiline
- modified = u'%s %s' % (continuation, line)
- output.append(modified)
-
- # if the next line is indented, it should be part of multiline
- if len(content) > lineno + 1:
- nextline = content[lineno + 1]
- if len(nextline) - len(nextline.lstrip()) > 3:
- continue
- try:
- mod = ast.parse(
- '\n'.join(content[multiline_start:lineno+1]))
- if isinstance(mod.body[0], ast.FunctionDef):
- # check to see if we have the whole function
- for element in mod.body[0].body:
- if isinstance(element, ast.Return):
- multiline = False
- else:
- output.append(u'')
- multiline = False
- except Exception:
- pass
-
- if savefig: # clear figure if plotted
- self.ensure_pyplot()
- self.process_input_line('plt.clf()', store_history=False)
- self.clear_cout()
- savefig = False
-
- return output
-
- def custom_doctest(self, decorator, input_lines, found, submitted):
- """
- Perform a specialized doctest.
-
- """
- from .custom_doctests import doctests
-
- args = decorator.split()
- doctest_type = args[1]
- if doctest_type in doctests:
- doctests[doctest_type](self, args, input_lines, found, submitted)
- else:
- e = "Invalid option to @doctest: {0}".format(doctest_type)
- raise Exception(e)
-
-
-class IPythonDirective(Directive):
-
- has_content = True
- required_arguments = 0
- optional_arguments = 4 # python, suppress, verbatim, doctest
- final_argumuent_whitespace = True
- option_spec = { 'python': directives.unchanged,
- 'suppress' : directives.flag,
- 'verbatim' : directives.flag,
- 'doctest' : directives.flag,
- 'okexcept': directives.flag,
- 'okwarning': directives.flag
- }
-
- shell = None
-
- seen_docs = set()
-
- def get_config_options(self):
- # contains sphinx configuration variables
- config = self.state.document.settings.env.config
-
- # get config variables to set figure output directory
- savefig_dir = config.ipython_savefig_dir
+
+ imagerows = ['.. image:: %s'%outfile]
+
+ for kwarg in saveargs[2:]:
+ arg, val = kwarg.split('=')
+ arg = arg.strip()
+ val = val.strip()
+ imagerows.append(' :%s: %s'%(arg, val))
+
+ image_file = os.path.basename(outfile) # only return file name
+ image_directive = '\n'.join(imagerows)
+ return image_file, image_directive
+
+ # Callbacks for each type of token
+ def process_input(self, data, input_prompt, lineno):
+ """
+ Process data block for INPUT token.
+
+ """
+ decorator, input, rest = data
+ image_file = None
+ image_directive = None
+
+ is_verbatim = decorator=='@verbatim' or self.is_verbatim
+ is_doctest = (decorator is not None and \
+ decorator.startswith('@doctest')) or self.is_doctest
+ is_suppress = decorator=='@suppress' or self.is_suppress
+ is_okexcept = decorator=='@okexcept' or self.is_okexcept
+ is_okwarning = decorator=='@okwarning' or self.is_okwarning
+ is_savefig = decorator is not None and \
+ decorator.startswith('@savefig')
+
+ input_lines = input.split('\n')
+ if len(input_lines) > 1:
+ if input_lines[-1] != "":
+ input_lines.append('') # make sure there's a blank line
+ # so splitter buffer gets reset
+
+ continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
+
+ if is_savefig:
+ image_file, image_directive = self.process_image(decorator)
+
+ ret = []
+ is_semicolon = False
+
+ # Hold the execution count, if requested to do so.
+ if is_suppress and self.hold_count:
+ store_history = False
+ else:
+ store_history = True
+
+ # Note: catch_warnings is not thread safe
+ with warnings.catch_warnings(record=True) as ws:
+ for i, line in enumerate(input_lines):
+ if line.endswith(';'):
+ is_semicolon = True
+
+ if i == 0:
+ # process the first input line
+ if is_verbatim:
+ self.process_input_line('')
+ self.IP.execution_count += 1 # increment it anyway
+ else:
+ # only submit the line in non-verbatim mode
+ self.process_input_line(line, store_history=store_history)
+ formatted_line = '%s %s'%(input_prompt, line)
+ else:
+ # process a continuation line
+ if not is_verbatim:
+ self.process_input_line(line, store_history=store_history)
+
+ formatted_line = '%s %s'%(continuation, line)
+
+ if not is_suppress:
+ ret.append(formatted_line)
+
+ if not is_suppress and len(rest.strip()) and is_verbatim:
+ # The "rest" is the standard output of the input. This needs to be
+ # added when in verbatim mode. If there is no "rest", then we don't
+ # add it, as the new line will be added by the processed output.
+ ret.append(rest)
+
+ # Fetch the processed output. (This is not the submitted output.)
+ self.cout.seek(0)
+ processed_output = self.cout.read()
+ if not is_suppress and not is_semicolon:
+ #
+ # In IPythonDirective.run, the elements of `ret` are eventually
+ # combined such that '' entries correspond to newlines. So if
+ # `processed_output` is equal to '', then the adding it to `ret`
+ # ensures that there is a blank line between consecutive inputs
+ # that have no outputs, as in:
+ #
+ # In [1]: x = 4
+ #
+ # In [2]: x = 5
+ #
+ # When there is processed output, it has a '\n' at the tail end. So
+ # adding the output to `ret` will provide the necessary spacing
+ # between consecutive input/output blocks, as in:
+ #
+ # In [1]: x
+ # Out[1]: 5
+ #
+ # In [2]: x
+ # Out[2]: 5
+ #
+ # When there is stdout from the input, it also has a '\n' at the
+ # tail end, and so this ensures proper spacing as well. E.g.:
+ #
+ # In [1]: print x
+ # 5
+ #
+ # In [2]: x = 5
+ #
+ # When in verbatim mode, `processed_output` is empty (because
+ # nothing was passed to IP. Sometimes the submitted code block has
+ # an Out[] portion and sometimes it does not. When it does not, we
+ # need to ensure proper spacing, so we have to add '' to `ret`.
+ # However, if there is an Out[] in the submitted code, then we do
+ # not want to add a newline as `process_output` has stuff to add.
+ # The difficulty is that `process_input` doesn't know if
+ # `process_output` will be called---so it doesn't know if there is
+ # Out[] in the code block. The requires that we include a hack in
+ # `process_block`. See the comments there.
+ #
+ ret.append(processed_output)
+ elif is_semicolon:
+ # Make sure there is a newline after the semicolon.
+ ret.append('')
+
+ # context information
+ filename = "Unknown"
+ lineno = 0
+ if self.directive.state:
+ filename = self.directive.state.document.current_source
+ lineno = self.directive.state.document.current_line
+
+ # output any exceptions raised during execution to stdout
+ # unless :okexcept: has been specified.
+ if not is_okexcept and "Traceback" in processed_output:
+ s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
+ s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
+ sys.stdout.write('\n\n>>>' + ('-' * 73))
+ sys.stdout.write(s)
+ sys.stdout.write(processed_output)
+ sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
+
+ # output any warning raised during execution to stdout
+ # unless :okwarning: has been specified.
+ if not is_okwarning:
+ for w in ws:
+ s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
+ s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
+ sys.stdout.write('\n\n>>>' + ('-' * 73))
+ sys.stdout.write(s)
+ sys.stdout.write(('-' * 76) + '\n')
+ s=warnings.formatwarning(w.message, w.category,
+ w.filename, w.lineno, w.line)
+ sys.stdout.write(s)
+ sys.stdout.write('<<<' + ('-' * 73) + '\n')
+
+ self.cout.truncate(0)
+
+ return (ret, input_lines, processed_output,
+ is_doctest, decorator, image_file, image_directive)
+
+
+ def process_output(self, data, output_prompt, input_lines, output,
+ is_doctest, decorator, image_file):
+ """
+ Process data block for OUTPUT token.
+
+ """
+ # Recall: `data` is the submitted output, and `output` is the processed
+ # output from `input_lines`.
+
+ TAB = ' ' * 4
+
+ if is_doctest and output is not None:
+
+ found = output # This is the processed output
+ found = found.strip()
+ submitted = data.strip()
+
+ if self.directive is None:
+ source = 'Unavailable'
+ content = 'Unavailable'
+ else:
+ source = self.directive.state.document.current_source
+ content = self.directive.content
+ # Add tabs and join into a single string.
+ content = '\n'.join([TAB + line for line in content])
+
+ # Make sure the output contains the output prompt.
+ ind = found.find(output_prompt)
+ if ind < 0:
+ e = ('output does not contain output prompt\n\n'
+ 'Document source: {0}\n\n'
+ 'Raw content: \n{1}\n\n'
+ 'Input line(s):\n{TAB}{2}\n\n'
+ 'Output line(s):\n{TAB}{3}\n\n')
+ e = e.format(source, content, '\n'.join(input_lines),
+ repr(found), TAB=TAB)
+ raise RuntimeError(e)
+ found = found[len(output_prompt):].strip()
+
+ # Handle the actual doctest comparison.
+ if decorator.strip() == '@doctest':
+ # Standard doctest
+ if found != submitted:
+ e = ('doctest failure\n\n'
+ 'Document source: {0}\n\n'
+ 'Raw content: \n{1}\n\n'
+ 'On input line(s):\n{TAB}{2}\n\n'
+ 'we found output:\n{TAB}{3}\n\n'
+ 'instead of the expected:\n{TAB}{4}\n\n')
+ e = e.format(source, content, '\n'.join(input_lines),
+ repr(found), repr(submitted), TAB=TAB)
+ raise RuntimeError(e)
+ else:
+ self.custom_doctest(decorator, input_lines, found, submitted)
+
+ # When in verbatim mode, this holds additional submitted output
+ # to be written in the final Sphinx output.
+ # https://github.com/ipython/ipython/issues/5776
+ out_data = []
+
+ is_verbatim = decorator=='@verbatim' or self.is_verbatim
+ if is_verbatim and data.strip():
+ # Note that `ret` in `process_block` has '' as its last element if
+ # the code block was in verbatim mode. So if there is no submitted
+ # output, then we will have proper spacing only if we do not add
+ # an additional '' to `out_data`. This is why we condition on
+ # `and data.strip()`.
+
+ # The submitted output has no output prompt. If we want the
+ # prompt and the code to appear, we need to join them now
+ # instead of adding them separately---as this would create an
+ # undesired newline. How we do this ultimately depends on the
+ # format of the output regex. I'll do what works for the default
+ # prompt for now, and we might have to adjust if it doesn't work
+ # in other cases. Finally, the submitted output does not have
+ # a trailing newline, so we must add it manually.
+ out_data.append("{0} {1}\n".format(output_prompt, data))
+
+ return out_data
+
+ def process_comment(self, data):
+ """Process data fPblock for COMMENT token."""
+ if not self.is_suppress:
+ return [data]
+
+ def save_image(self, image_file):
+ """
+ Saves the image file to disk.
+ """
+ self.ensure_pyplot()
+ command = 'plt.gcf().savefig("%s")'%image_file
+ #print 'SAVEFIG', command # dbg
+ self.process_input_line('bookmark ipy_thisdir', store_history=False)
+ self.process_input_line('cd -b ipy_savedir', store_history=False)
+ self.process_input_line(command, store_history=False)
+ self.process_input_line('cd -b ipy_thisdir', store_history=False)
+ self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
+ self.clear_cout()
+
+ def process_block(self, block):
+ """
+ process block from the block_parser and return a list of processed lines
+ """
+ ret = []
+ output = None
+ input_lines = None
+ lineno = self.IP.execution_count
+
+ input_prompt = self.promptin % lineno
+ output_prompt = self.promptout % lineno
+ image_file = None
+ image_directive = None
+
+ found_input = False
+ for token, data in block:
+ if token == COMMENT:
+ out_data = self.process_comment(data)
+ elif token == INPUT:
+ found_input = True
+ (out_data, input_lines, output, is_doctest,
+ decorator, image_file, image_directive) = \
+ self.process_input(data, input_prompt, lineno)
+ elif token == OUTPUT:
+ if not found_input:
+
+ TAB = ' ' * 4
+ linenumber = 0
+ source = 'Unavailable'
+ content = 'Unavailable'
+ if self.directive:
+ linenumber = self.directive.state.document.current_line
+ source = self.directive.state.document.current_source
+ content = self.directive.content
+ # Add tabs and join into a single string.
+ content = '\n'.join([TAB + line for line in content])
+
+ e = ('\n\nInvalid block: Block contains an output prompt '
+ 'without an input prompt.\n\n'
+ 'Document source: {0}\n\n'
+ 'Content begins at line {1}: \n\n{2}\n\n'
+ 'Problematic block within content: \n\n{TAB}{3}\n\n')
+ e = e.format(source, linenumber, content, block, TAB=TAB)
+
+ # Write, rather than include in exception, since Sphinx
+ # will truncate tracebacks.
+ sys.stdout.write(e)
+ raise RuntimeError('An invalid block was detected.')
+
+ out_data = \
+ self.process_output(data, output_prompt, input_lines,
+ output, is_doctest, decorator,
+ image_file)
+ if out_data:
+ # Then there was user submitted output in verbatim mode.
+ # We need to remove the last element of `ret` that was
+ # added in `process_input`, as it is '' and would introduce
+ # an undesirable newline.
+ assert(ret[-1] == '')
+ del ret[-1]
+
+ if out_data:
+ ret.extend(out_data)
+
+ # save the image files
+ if image_file is not None:
+ self.save_image(image_file)
+
+ return ret, image_directive
+
+ def ensure_pyplot(self):
+ """
+ Ensures that pyplot has been imported into the embedded IPython shell.
+
+ Also, makes sure to set the backend appropriately if not set already.
+
+ """
+ # We are here if the @figure pseudo decorator was used. Thus, it's
+ # possible that we could be here even if python_mplbackend were set to
+ # `None`. That's also strange and perhaps worthy of raising an
+ # exception, but for now, we just set the backend to 'agg'.
+
+ if not self._pyplot_imported:
+ if 'matplotlib.backends' not in sys.modules:
+ # Then ipython_matplotlib was set to None but there was a
+ # call to the @figure decorator (and ipython_execlines did
+ # not set a backend).
+ #raise Exception("No backend was set, but @figure was used!")
+ import matplotlib
+ matplotlib.use('agg')
+
+ # Always import pyplot into embedded shell.
+ self.process_input_line('import matplotlib.pyplot as plt',
+ store_history=False)
+ self._pyplot_imported = True
+
+ def process_pure_python(self, content):
+ """
+ content is a list of strings. it is unedited directive content
+
+ This runs it line by line in the InteractiveShell, prepends
+ prompts as needed capturing stderr and stdout, then returns
+ the content as a list as if it were ipython code
+ """
+ output = []
+ savefig = False # keep up with this to clear figure
+ multiline = False # to handle line continuation
+ multiline_start = None
+ fmtin = self.promptin
+
+ ct = 0
+
+ for lineno, line in enumerate(content):
+
+ line_stripped = line.strip()
+ if not len(line):
+ output.append(line)
+ continue
+
+ # handle decorators
+ if line_stripped.startswith('@'):
+ output.extend([line])
+ if 'savefig' in line:
+ savefig = True # and need to clear figure
+ continue
+
+ # handle comments
+ if line_stripped.startswith('#'):
+ output.extend([line])
+ continue
+
+ # deal with lines checking for multiline
+ continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
+ if not multiline:
+ modified = u"%s %s" % (fmtin % ct, line_stripped)
+ output.append(modified)
+ ct += 1
+ try:
+ ast.parse(line_stripped)
+ output.append(u'')
+ except Exception: # on a multiline
+ multiline = True
+ multiline_start = lineno
+ else: # still on a multiline
+ modified = u'%s %s' % (continuation, line)
+ output.append(modified)
+
+ # if the next line is indented, it should be part of multiline
+ if len(content) > lineno + 1:
+ nextline = content[lineno + 1]
+ if len(nextline) - len(nextline.lstrip()) > 3:
+ continue
+ try:
+ mod = ast.parse(
+ '\n'.join(content[multiline_start:lineno+1]))
+ if isinstance(mod.body[0], ast.FunctionDef):
+ # check to see if we have the whole function
+ for element in mod.body[0].body:
+ if isinstance(element, ast.Return):
+ multiline = False
+ else:
+ output.append(u'')
+ multiline = False
+ except Exception:
+ pass
+
+ if savefig: # clear figure if plotted
+ self.ensure_pyplot()
+ self.process_input_line('plt.clf()', store_history=False)
+ self.clear_cout()
+ savefig = False
+
+ return output
+
+ def custom_doctest(self, decorator, input_lines, found, submitted):
+ """
+ Perform a specialized doctest.
+
+ """
+ from .custom_doctests import doctests
+
+ args = decorator.split()
+ doctest_type = args[1]
+ if doctest_type in doctests:
+ doctests[doctest_type](self, args, input_lines, found, submitted)
+ else:
+ e = "Invalid option to @doctest: {0}".format(doctest_type)
+ raise Exception(e)
+
+
+class IPythonDirective(Directive):
+
+ has_content = True
+ required_arguments = 0
+ optional_arguments = 4 # python, suppress, verbatim, doctest
+ final_argumuent_whitespace = True
+ option_spec = { 'python': directives.unchanged,
+ 'suppress' : directives.flag,
+ 'verbatim' : directives.flag,
+ 'doctest' : directives.flag,
+ 'okexcept': directives.flag,
+ 'okwarning': directives.flag
+ }
+
+ shell = None
+
+ seen_docs = set()
+
+ def get_config_options(self):
+ # contains sphinx configuration variables
+ config = self.state.document.settings.env.config
+
+ # get config variables to set figure output directory
+ savefig_dir = config.ipython_savefig_dir
source_dir = self.state.document.settings.env.srcdir
savefig_dir = os.path.join(source_dir, savefig_dir)
-
- # get regex and prompt stuff
- rgxin = config.ipython_rgxin
- rgxout = config.ipython_rgxout
- promptin = config.ipython_promptin
- promptout = config.ipython_promptout
- mplbackend = config.ipython_mplbackend
- exec_lines = config.ipython_execlines
- hold_count = config.ipython_holdcount
-
- return (savefig_dir, source_dir, rgxin, rgxout,
- promptin, promptout, mplbackend, exec_lines, hold_count)
-
- def setup(self):
- # Get configuration values.
- (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
- mplbackend, exec_lines, hold_count) = self.get_config_options()
-
+
+ # get regex and prompt stuff
+ rgxin = config.ipython_rgxin
+ rgxout = config.ipython_rgxout
+ promptin = config.ipython_promptin
+ promptout = config.ipython_promptout
+ mplbackend = config.ipython_mplbackend
+ exec_lines = config.ipython_execlines
+ hold_count = config.ipython_holdcount
+
+ return (savefig_dir, source_dir, rgxin, rgxout,
+ promptin, promptout, mplbackend, exec_lines, hold_count)
+
+ def setup(self):
+ # Get configuration values.
+ (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
+ mplbackend, exec_lines, hold_count) = self.get_config_options()
+
try:
os.makedirs(savefig_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
- if self.shell is None:
- # We will be here many times. However, when the
- # EmbeddedSphinxShell is created, its interactive shell member
- # is the same for each instance.
-
+ if self.shell is None:
+ # We will be here many times. However, when the
+ # EmbeddedSphinxShell is created, its interactive shell member
+ # is the same for each instance.
+
if mplbackend and 'matplotlib.backends' not in sys.modules:
- import matplotlib
- matplotlib.use(mplbackend)
-
- # Must be called after (potentially) importing matplotlib and
- # setting its backend since exec_lines might import pylab.
- self.shell = EmbeddedSphinxShell(exec_lines)
-
- # Store IPython directive to enable better error messages
- self.shell.directive = self
-
- # reset the execution count if we haven't processed this doc
- #NOTE: this may be borked if there are multiple seen_doc tmp files
- #check time stamp?
- if not self.state.document.current_source in self.seen_docs:
- self.shell.IP.history_manager.reset()
- self.shell.IP.execution_count = 1
- self.seen_docs.add(self.state.document.current_source)
-
- # and attach to shell so we don't have to pass them around
- self.shell.rgxin = rgxin
- self.shell.rgxout = rgxout
- self.shell.promptin = promptin
- self.shell.promptout = promptout
- self.shell.savefig_dir = savefig_dir
- self.shell.source_dir = source_dir
- self.shell.hold_count = hold_count
-
- # setup bookmark for saving figures directory
- self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
- store_history=False)
- self.shell.clear_cout()
-
- return rgxin, rgxout, promptin, promptout
-
- def teardown(self):
- # delete last bookmark
- self.shell.process_input_line('bookmark -d ipy_savedir',
- store_history=False)
- self.shell.clear_cout()
-
- def run(self):
- debug = False
-
- #TODO, any reason block_parser can't be a method of embeddable shell
- # then we wouldn't have to carry these around
- rgxin, rgxout, promptin, promptout = self.setup()
-
- options = self.options
- self.shell.is_suppress = 'suppress' in options
- self.shell.is_doctest = 'doctest' in options
- self.shell.is_verbatim = 'verbatim' in options
- self.shell.is_okexcept = 'okexcept' in options
- self.shell.is_okwarning = 'okwarning' in options
-
- # handle pure python code
- if 'python' in self.arguments:
- content = self.content
- self.content = self.shell.process_pure_python(content)
-
- # parts consists of all text within the ipython-block.
- # Each part is an input/output block.
- parts = '\n'.join(self.content).split('\n\n')
-
- lines = ['.. code-block:: ipython', '']
- figures = []
-
- for part in parts:
- block = block_parser(part, rgxin, rgxout, promptin, promptout)
- if len(block):
- rows, figure = self.shell.process_block(block)
- for row in rows:
- lines.extend([' {0}'.format(line)
- for line in row.split('\n')])
-
- if figure is not None:
- figures.append(figure)
-
- for figure in figures:
- lines.append('')
- lines.extend(figure.split('\n'))
- lines.append('')
-
- if len(lines) > 2:
- if debug:
- print('\n'.join(lines))
- else:
- # This has to do with input, not output. But if we comment
- # these lines out, then no IPython code will appear in the
- # final output.
- self.state_machine.insert_input(
- lines, self.state_machine.input_lines.source(0))
-
- # cleanup
- self.teardown()
-
- return []
-
-# Enable as a proper Sphinx directive
-def setup(app):
- setup.app = app
-
- app.add_directive('ipython', IPythonDirective)
+ import matplotlib
+ matplotlib.use(mplbackend)
+
+ # Must be called after (potentially) importing matplotlib and
+ # setting its backend since exec_lines might import pylab.
+ self.shell = EmbeddedSphinxShell(exec_lines)
+
+ # Store IPython directive to enable better error messages
+ self.shell.directive = self
+
+ # reset the execution count if we haven't processed this doc
+ #NOTE: this may be borked if there are multiple seen_doc tmp files
+ #check time stamp?
+ if not self.state.document.current_source in self.seen_docs:
+ self.shell.IP.history_manager.reset()
+ self.shell.IP.execution_count = 1
+ self.seen_docs.add(self.state.document.current_source)
+
+ # and attach to shell so we don't have to pass them around
+ self.shell.rgxin = rgxin
+ self.shell.rgxout = rgxout
+ self.shell.promptin = promptin
+ self.shell.promptout = promptout
+ self.shell.savefig_dir = savefig_dir
+ self.shell.source_dir = source_dir
+ self.shell.hold_count = hold_count
+
+ # setup bookmark for saving figures directory
+ self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
+ store_history=False)
+ self.shell.clear_cout()
+
+ return rgxin, rgxout, promptin, promptout
+
+ def teardown(self):
+ # delete last bookmark
+ self.shell.process_input_line('bookmark -d ipy_savedir',
+ store_history=False)
+ self.shell.clear_cout()
+
+ def run(self):
+ debug = False
+
+ #TODO, any reason block_parser can't be a method of embeddable shell
+ # then we wouldn't have to carry these around
+ rgxin, rgxout, promptin, promptout = self.setup()
+
+ options = self.options
+ self.shell.is_suppress = 'suppress' in options
+ self.shell.is_doctest = 'doctest' in options
+ self.shell.is_verbatim = 'verbatim' in options
+ self.shell.is_okexcept = 'okexcept' in options
+ self.shell.is_okwarning = 'okwarning' in options
+
+ # handle pure python code
+ if 'python' in self.arguments:
+ content = self.content
+ self.content = self.shell.process_pure_python(content)
+
+ # parts consists of all text within the ipython-block.
+ # Each part is an input/output block.
+ parts = '\n'.join(self.content).split('\n\n')
+
+ lines = ['.. code-block:: ipython', '']
+ figures = []
+
+ for part in parts:
+ block = block_parser(part, rgxin, rgxout, promptin, promptout)
+ if len(block):
+ rows, figure = self.shell.process_block(block)
+ for row in rows:
+ lines.extend([' {0}'.format(line)
+ for line in row.split('\n')])
+
+ if figure is not None:
+ figures.append(figure)
+
+ for figure in figures:
+ lines.append('')
+ lines.extend(figure.split('\n'))
+ lines.append('')
+
+ if len(lines) > 2:
+ if debug:
+ print('\n'.join(lines))
+ else:
+ # This has to do with input, not output. But if we comment
+ # these lines out, then no IPython code will appear in the
+ # final output.
+ self.state_machine.insert_input(
+ lines, self.state_machine.input_lines.source(0))
+
+ # cleanup
+ self.teardown()
+
+ return []
+
+# Enable as a proper Sphinx directive
+def setup(app):
+ setup.app = app
+
+ app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', 'savefig', 'env')
- app.add_config_value('ipython_rgxin',
- re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
- app.add_config_value('ipython_rgxout',
- re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
- app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
- app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
-
- # We could just let matplotlib pick whatever is specified as the default
- # backend in the matplotlibrc file, but this would cause issues if the
- # backend didn't work in headless environments. For this reason, 'agg'
- # is a good default backend choice.
- app.add_config_value('ipython_mplbackend', 'agg', 'env')
-
- # If the user sets this config value to `None`, then EmbeddedSphinxShell's
- # __init__ method will treat it as [].
- execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
- app.add_config_value('ipython_execlines', execlines, 'env')
-
- app.add_config_value('ipython_holdcount', True, 'env')
-
- metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
- return metadata
-
-# Simple smoke test, needs to be converted to a proper automatic test.
-def test():
-
- examples = [
- r"""
-In [9]: pwd
-Out[9]: '/home/jdhunter/py4science/book'
-
-In [10]: cd bookdata/
-/home/jdhunter/py4science/book/bookdata
-
-In [2]: from pylab import *
-
-In [2]: ion()
-
-In [3]: im = imread('stinkbug.png')
-
-@savefig mystinkbug.png width=4in
-In [4]: imshow(im)
-Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
-
-""",
- r"""
-
-In [1]: x = 'hello world'
-
-# string methods can be
-# used to alter the string
-@doctest
-In [2]: x.upper()
-Out[2]: 'HELLO WORLD'
-
-@verbatim
-In [3]: x.st<TAB>
-x.startswith x.strip
-""",
- r"""
-
-In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
- .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
-
-In [131]: print url.split('&')
-['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
-
-In [60]: import urllib
-
-""",
- r"""\
-
-In [133]: import numpy.random
-
-@suppress
-In [134]: numpy.random.seed(2358)
-
-@doctest
-In [135]: numpy.random.rand(10,2)
-Out[135]:
-array([[ 0.64524308, 0.59943846],
- [ 0.47102322, 0.8715456 ],
- [ 0.29370834, 0.74776844],
- [ 0.99539577, 0.1313423 ],
- [ 0.16250302, 0.21103583],
- [ 0.81626524, 0.1312433 ],
- [ 0.67338089, 0.72302393],
- [ 0.7566368 , 0.07033696],
- [ 0.22591016, 0.77731835],
- [ 0.0072729 , 0.34273127]])
-
-""",
-
- r"""
-In [106]: print x
-jdh
-
-In [109]: for i in range(10):
- .....: print i
- .....:
- .....:
-0
-1
-2
-3
-4
-5
-6
-7
-8
-9
-""",
-
- r"""
-
-In [144]: from pylab import *
-
-In [145]: ion()
-
-# use a semicolon to suppress the output
-@savefig test_hist.png width=4in
-In [151]: hist(np.random.randn(10000), 100);
-
-
-@savefig test_plot.png width=4in
-In [151]: plot(np.random.randn(10000), 'o');
- """,
-
- r"""
-# use a semicolon to suppress the output
-In [151]: plt.clf()
-
-@savefig plot_simple.png width=4in
-In [151]: plot([1,2,3])
-
-@savefig hist_simple.png width=4in
-In [151]: hist(np.random.randn(10000), 100);
-
-""",
- r"""
-# update the current fig
-In [151]: ylabel('number')
-
-In [152]: title('normal distribution')
-
-
-@savefig hist_with_text.png
-In [153]: grid(True)
-
-@doctest float
-In [154]: 0.1 + 0.2
-Out[154]: 0.3
-
-@doctest float
-In [155]: np.arange(16).reshape(4,4)
-Out[155]:
-array([[ 0, 1, 2, 3],
- [ 4, 5, 6, 7],
- [ 8, 9, 10, 11],
- [12, 13, 14, 15]])
-
-In [1]: x = np.arange(16, dtype=float).reshape(4,4)
-
-In [2]: x[0,0] = np.inf
-
-In [3]: x[0,1] = np.nan
-
-@doctest float
-In [4]: x
-Out[4]:
-array([[ inf, nan, 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [ 12., 13., 14., 15.]])
-
-
- """,
- ]
- # skip local-file depending first example:
- examples = examples[1:]
-
- #ipython_directive.DEBUG = True # dbg
- #options = dict(suppress=True) # dbg
- options = dict()
- for example in examples:
- content = example.split('\n')
- IPythonDirective('debug', arguments=None, options=options,
- content=content, lineno=0,
- content_offset=None, block_text=None,
- state=None, state_machine=None,
- )
-
-# Run test suite as a script
-if __name__=='__main__':
- if not os.path.isdir('_static'):
- os.mkdir('_static')
- test()
- print('All OK? Check figures in _static/')
+ app.add_config_value('ipython_rgxin',
+ re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
+ app.add_config_value('ipython_rgxout',
+ re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
+ app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
+ app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
+
+ # We could just let matplotlib pick whatever is specified as the default
+ # backend in the matplotlibrc file, but this would cause issues if the
+ # backend didn't work in headless environments. For this reason, 'agg'
+ # is a good default backend choice.
+ app.add_config_value('ipython_mplbackend', 'agg', 'env')
+
+ # If the user sets this config value to `None`, then EmbeddedSphinxShell's
+ # __init__ method will treat it as [].
+ execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
+ app.add_config_value('ipython_execlines', execlines, 'env')
+
+ app.add_config_value('ipython_holdcount', True, 'env')
+
+ metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
+ return metadata
+
+# Simple smoke test, needs to be converted to a proper automatic test.
+def test():
+
+ examples = [
+ r"""
+In [9]: pwd
+Out[9]: '/home/jdhunter/py4science/book'
+
+In [10]: cd bookdata/
+/home/jdhunter/py4science/book/bookdata
+
+In [2]: from pylab import *
+
+In [2]: ion()
+
+In [3]: im = imread('stinkbug.png')
+
+@savefig mystinkbug.png width=4in
+In [4]: imshow(im)
+Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
+
+""",
+ r"""
+
+In [1]: x = 'hello world'
+
+# string methods can be
+# used to alter the string
+@doctest
+In [2]: x.upper()
+Out[2]: 'HELLO WORLD'
+
+@verbatim
+In [3]: x.st<TAB>
+x.startswith x.strip
+""",
+ r"""
+
+In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
+ .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
+
+In [131]: print url.split('&')
+['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
+
+In [60]: import urllib
+
+""",
+ r"""\
+
+In [133]: import numpy.random
+
+@suppress
+In [134]: numpy.random.seed(2358)
+
+@doctest
+In [135]: numpy.random.rand(10,2)
+Out[135]:
+array([[ 0.64524308, 0.59943846],
+ [ 0.47102322, 0.8715456 ],
+ [ 0.29370834, 0.74776844],
+ [ 0.99539577, 0.1313423 ],
+ [ 0.16250302, 0.21103583],
+ [ 0.81626524, 0.1312433 ],
+ [ 0.67338089, 0.72302393],
+ [ 0.7566368 , 0.07033696],
+ [ 0.22591016, 0.77731835],
+ [ 0.0072729 , 0.34273127]])
+
+""",
+
+ r"""
+In [106]: print x
+jdh
+
+In [109]: for i in range(10):
+ .....: print i
+ .....:
+ .....:
+0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+""",
+
+ r"""
+
+In [144]: from pylab import *
+
+In [145]: ion()
+
+# use a semicolon to suppress the output
+@savefig test_hist.png width=4in
+In [151]: hist(np.random.randn(10000), 100);
+
+
+@savefig test_plot.png width=4in
+In [151]: plot(np.random.randn(10000), 'o');
+ """,
+
+ r"""
+# use a semicolon to suppress the output
+In [151]: plt.clf()
+
+@savefig plot_simple.png width=4in
+In [151]: plot([1,2,3])
+
+@savefig hist_simple.png width=4in
+In [151]: hist(np.random.randn(10000), 100);
+
+""",
+ r"""
+# update the current fig
+In [151]: ylabel('number')
+
+In [152]: title('normal distribution')
+
+
+@savefig hist_with_text.png
+In [153]: grid(True)
+
+@doctest float
+In [154]: 0.1 + 0.2
+Out[154]: 0.3
+
+@doctest float
+In [155]: np.arange(16).reshape(4,4)
+Out[155]:
+array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+In [1]: x = np.arange(16, dtype=float).reshape(4,4)
+
+In [2]: x[0,0] = np.inf
+
+In [3]: x[0,1] = np.nan
+
+@doctest float
+In [4]: x
+Out[4]:
+array([[ inf, nan, 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [ 12., 13., 14., 15.]])
+
+
+ """,
+ ]
+ # skip local-file depending first example:
+ examples = examples[1:]
+
+ #ipython_directive.DEBUG = True # dbg
+ #options = dict(suppress=True) # dbg
+ options = dict()
+ for example in examples:
+ content = example.split('\n')
+ IPythonDirective('debug', arguments=None, options=options,
+ content=content, lineno=0,
+ content_offset=None, block_text=None,
+ state=None, state_machine=None,
+ )
+
+# Run test suite as a script
+if __name__=='__main__':
+ if not os.path.isdir('_static'):
+ os.mkdir('_static')
+ test()
+ print('All OK? Check figures in _static/')
diff --git a/contrib/python/ipython/py2/IPython/terminal/console.py b/contrib/python/ipython/py2/IPython/terminal/console.py
index 981ee46b17..65571a7572 100644
--- a/contrib/python/ipython/py2/IPython/terminal/console.py
+++ b/contrib/python/ipython/py2/IPython/terminal/console.py
@@ -1,19 +1,19 @@
-"""
-Shim to maintain backwards compatibility with old IPython.terminal.console imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-from warnings import warn
-
-from IPython.utils.shimmodule import ShimModule, ShimWarning
-
+"""
+Shim to maintain backwards compatibility with old IPython.terminal.console imports.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+from warnings import warn
+
+from IPython.utils.shimmodule import ShimModule, ShimWarning
+
warn("The `IPython.terminal.console` package has been deprecated since IPython 4.0. "
- "You should import from jupyter_console instead.", ShimWarning)
-
-# Unconditionally insert the shim into sys.modules so that further import calls
-# trigger the custom attribute access above
-
-sys.modules['IPython.terminal.console'] = ShimModule(
- src='IPython.terminal.console', mirror='jupyter_console')
+ "You should import from jupyter_console instead.", ShimWarning)
+
+# Unconditionally insert the shim into sys.modules so that further import calls
+# trigger the custom attribute access above
+
+sys.modules['IPython.terminal.console'] = ShimModule(
+ src='IPython.terminal.console', mirror='jupyter_console')
diff --git a/contrib/python/ipython/py2/IPython/terminal/embed.py b/contrib/python/ipython/py2/IPython/terminal/embed.py
index 8ff15313dd..5ad70431e4 100644
--- a/contrib/python/ipython/py2/IPython/terminal/embed.py
+++ b/contrib/python/ipython/py2/IPython/terminal/embed.py
@@ -1,33 +1,33 @@
-# encoding: utf-8
-"""
-An embedded IPython shell.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import with_statement
-from __future__ import print_function
-
-import sys
-import warnings
-
-from IPython.core import ultratb, compilerop
+# encoding: utf-8
+"""
+An embedded IPython shell.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import with_statement
+from __future__ import print_function
+
+import sys
+import warnings
+
+from IPython.core import ultratb, compilerop
from IPython.core import magic_arguments
-from IPython.core.magic import Magics, magics_class, line_magic
+from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core.interactiveshell import DummyMod, InteractiveShell
-from IPython.terminal.interactiveshell import TerminalInteractiveShell
-from IPython.terminal.ipapp import load_default_config
-
-from traitlets import Bool, CBool, Unicode
-from IPython.utils.io import ask_yes_no
-
-class KillEmbeded(Exception):pass
-
-# This is an additional magic that is exposed in embedded shells.
-@magics_class
-class EmbeddedMagics(Magics):
-
- @line_magic
+from IPython.terminal.interactiveshell import TerminalInteractiveShell
+from IPython.terminal.ipapp import load_default_config
+
+from traitlets import Bool, CBool, Unicode
+from IPython.utils.io import ask_yes_no
+
+class KillEmbeded(Exception):pass
+
+# This is an additional magic that is exposed in embedded shells.
+@magics_class
+class EmbeddedMagics(Magics):
+
+ @line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument('-i', '--instance', action='store_true',
help='Kill instance instead of call location')
@@ -35,10 +35,10 @@ class EmbeddedMagics(Magics):
help='Also exit the current session')
@magic_arguments.argument('-y', '--yes', action='store_true',
help='Do not ask confirmation')
- def kill_embedded(self, parameter_s=''):
+ def kill_embedded(self, parameter_s=''):
"""%kill_embedded : deactivate for good the current embedded IPython
-
- This function (after asking for confirmation) sets an internal flag so
+
+ This function (after asking for confirmation) sets an internal flag so
that an embedded IPython will never activate again for the given call
location. This is useful to permanently disable a shell that is being
called inside a loop: once you've figured out what you needed from it,
@@ -60,8 +60,8 @@ class EmbeddedMagics(Magics):
This was the default behavior before IPython 5.2
- """
-
+ """
+
args = magic_arguments.parse_argstring(self.kill_embedded, parameter_s)
print(args)
if args.instance:
@@ -85,38 +85,38 @@ class EmbeddedMagics(Magics):
self.shell.embedded_active = False
print("This embedded IPython call location will not reactivate anymore "
"once you exit.")
-
+
if args.exit:
# Ask-exit does not really ask, it just set internals flags to exit
# on next loop.
self.shell.ask_exit()
-
-
- @line_magic
- def exit_raise(self, parameter_s=''):
- """%exit_raise Make the current embedded kernel exit and raise and exception.
-
- This function sets an internal flag so that an embedded IPython will
- raise a `IPython.terminal.embed.KillEmbeded` Exception on exit, and then exit the current I. This is
- useful to permanently exit a loop that create IPython embed instance.
- """
-
- self.shell.should_raise = True
- self.shell.ask_exit()
-
-
-
-class InteractiveShellEmbed(TerminalInteractiveShell):
-
- dummy_mode = Bool(False)
- exit_msg = Unicode('')
- embedded = CBool(True)
- should_raise = CBool(False)
- # Like the base class display_banner is not configurable, but here it
- # is True by default.
- display_banner = CBool(True)
- exit_msg = Unicode()
-
+
+
+ @line_magic
+ def exit_raise(self, parameter_s=''):
+ """%exit_raise Make the current embedded kernel exit and raise and exception.
+
+ This function sets an internal flag so that an embedded IPython will
+ raise a `IPython.terminal.embed.KillEmbeded` Exception on exit, and then exit the current I. This is
+ useful to permanently exit a loop that create IPython embed instance.
+ """
+
+ self.shell.should_raise = True
+ self.shell.ask_exit()
+
+
+
+class InteractiveShellEmbed(TerminalInteractiveShell):
+
+ dummy_mode = Bool(False)
+ exit_msg = Unicode('')
+ embedded = CBool(True)
+ should_raise = CBool(False)
+ # Like the base class display_banner is not configurable, but here it
+ # is True by default.
+ display_banner = CBool(True)
+ exit_msg = Unicode()
+
# When embedding, by default we don't change the terminal title
term_title = Bool(False,
help="Automatically set the terminal title"
@@ -144,140 +144,140 @@ class InteractiveShellEmbed(TerminalInteractiveShell):
InteractiveShellEmbed._inactive_locations.add(
self._call_location_id)
- def __init__(self, **kw):
- if kw.get('user_global_ns', None) is not None:
+ def __init__(self, **kw):
+ if kw.get('user_global_ns', None) is not None:
raise DeprecationWarning(
"Key word argument `user_global_ns` has been replaced by `user_module` since IPython 4.0.")
-
+
clid = kw.pop('_init_location_id', None)
if not clid:
frame = sys._getframe(1)
clid = '%s:%s' % (frame.f_code.co_filename, frame.f_lineno)
self._init_location_id = clid
- super(InteractiveShellEmbed,self).__init__(**kw)
-
- # don't use the ipython crash handler so that user exceptions aren't
- # trapped
- sys.excepthook = ultratb.FormattedTB(color_scheme=self.colors,
- mode=self.xmode,
- call_pdb=self.pdb)
-
- def init_sys_modules(self):
+ super(InteractiveShellEmbed,self).__init__(**kw)
+
+ # don't use the ipython crash handler so that user exceptions aren't
+ # trapped
+ sys.excepthook = ultratb.FormattedTB(color_scheme=self.colors,
+ mode=self.xmode,
+ call_pdb=self.pdb)
+
+ def init_sys_modules(self):
"""
Explicitly overwrite :mod:`IPython.core.interactiveshell` to do nothing.
"""
- pass
-
- def init_magics(self):
- super(InteractiveShellEmbed, self).init_magics()
- self.register_magics(EmbeddedMagics)
-
- def __call__(self, header='', local_ns=None, module=None, dummy=None,
+ pass
+
+ def init_magics(self):
+ super(InteractiveShellEmbed, self).init_magics()
+ self.register_magics(EmbeddedMagics)
+
+ def __call__(self, header='', local_ns=None, module=None, dummy=None,
stack_depth=1, global_ns=None, compile_flags=None, **kw):
- """Activate the interactive interpreter.
-
- __call__(self,header='',local_ns=None,module=None,dummy=None) -> Start
- the interpreter shell with the given local and global namespaces, and
- optionally print a header string at startup.
-
- The shell can be globally activated/deactivated using the
- dummy_mode attribute. This allows you to turn off a shell used
- for debugging globally.
-
- However, *each* time you call the shell you can override the current
- state of dummy_mode with the optional keyword parameter 'dummy'. For
- example, if you set dummy mode on with IPShell.dummy_mode = True, you
- can still have a specific call work by making it as IPShell(dummy=False).
- """
-
+ """Activate the interactive interpreter.
+
+ __call__(self,header='',local_ns=None,module=None,dummy=None) -> Start
+ the interpreter shell with the given local and global namespaces, and
+ optionally print a header string at startup.
+
+ The shell can be globally activated/deactivated using the
+ dummy_mode attribute. This allows you to turn off a shell used
+ for debugging globally.
+
+ However, *each* time you call the shell you can override the current
+ state of dummy_mode with the optional keyword parameter 'dummy'. For
+ example, if you set dummy mode on with IPShell.dummy_mode = True, you
+ can still have a specific call work by making it as IPShell(dummy=False).
+ """
+
# we are called, set the underlying interactiveshell not to exit.
self.keep_running = True
- # If the user has turned it off, go away
+ # If the user has turned it off, go away
clid = kw.pop('_call_location_id', None)
if not clid:
frame = sys._getframe(1)
clid = '%s:%s' % (frame.f_code.co_filename, frame.f_lineno)
self._call_location_id = clid
- if not self.embedded_active:
- return
-
- # Normal exits from interactive mode set this flag, so the shell can't
- # re-enter (it checks this variable at the start of interactive mode).
- self.exit_now = False
-
- # Allow the dummy parameter to override the global __dummy_mode
- if dummy or (dummy != 0 and self.dummy_mode):
- return
-
- # self.banner is auto computed
- if header:
- self.old_banner2 = self.banner2
- self.banner2 = self.banner2 + '\n' + header + '\n'
- else:
- self.old_banner2 = ''
-
+ if not self.embedded_active:
+ return
+
+ # Normal exits from interactive mode set this flag, so the shell can't
+ # re-enter (it checks this variable at the start of interactive mode).
+ self.exit_now = False
+
+ # Allow the dummy parameter to override the global __dummy_mode
+ if dummy or (dummy != 0 and self.dummy_mode):
+ return
+
+ # self.banner is auto computed
+ if header:
+ self.old_banner2 = self.banner2
+ self.banner2 = self.banner2 + '\n' + header + '\n'
+ else:
+ self.old_banner2 = ''
+
if self.display_banner:
self.show_banner()
- # Call the embedding code with a stack depth of 1 so it can skip over
- # our call and get the original caller's namespaces.
- self.mainloop(local_ns, module, stack_depth=stack_depth,
- global_ns=global_ns, compile_flags=compile_flags)
-
- self.banner2 = self.old_banner2
-
- if self.exit_msg is not None:
- print(self.exit_msg)
-
- if self.should_raise:
- raise KillEmbeded('Embedded IPython raising error, as user requested.')
-
-
- def mainloop(self, local_ns=None, module=None, stack_depth=0,
- display_banner=None, global_ns=None, compile_flags=None):
- """Embeds IPython into a running python program.
-
- Parameters
- ----------
-
- local_ns, module
- Working local namespace (a dict) and module (a module or similar
- object). If given as None, they are automatically taken from the scope
- where the shell was called, so that program variables become visible.
-
- stack_depth : int
- How many levels in the stack to go to looking for namespaces (when
- local_ns or module is None). This allows an intermediate caller to
- make sure that this function gets the namespace from the intended
- level in the stack. By default (0) it will get its locals and globals
- from the immediate caller.
-
- compile_flags
- A bit field identifying the __future__ features
- that are enabled, as passed to the builtin :func:`compile` function.
- If given as None, they are automatically taken from the scope where
- the shell was called.
-
- """
-
- if (global_ns is not None) and (module is None):
+ # Call the embedding code with a stack depth of 1 so it can skip over
+ # our call and get the original caller's namespaces.
+ self.mainloop(local_ns, module, stack_depth=stack_depth,
+ global_ns=global_ns, compile_flags=compile_flags)
+
+ self.banner2 = self.old_banner2
+
+ if self.exit_msg is not None:
+ print(self.exit_msg)
+
+ if self.should_raise:
+ raise KillEmbeded('Embedded IPython raising error, as user requested.')
+
+
+ def mainloop(self, local_ns=None, module=None, stack_depth=0,
+ display_banner=None, global_ns=None, compile_flags=None):
+ """Embeds IPython into a running python program.
+
+ Parameters
+ ----------
+
+ local_ns, module
+ Working local namespace (a dict) and module (a module or similar
+ object). If given as None, they are automatically taken from the scope
+ where the shell was called, so that program variables become visible.
+
+ stack_depth : int
+ How many levels in the stack to go to looking for namespaces (when
+ local_ns or module is None). This allows an intermediate caller to
+ make sure that this function gets the namespace from the intended
+ level in the stack. By default (0) it will get its locals and globals
+ from the immediate caller.
+
+ compile_flags
+ A bit field identifying the __future__ features
+ that are enabled, as passed to the builtin :func:`compile` function.
+ If given as None, they are automatically taken from the scope where
+ the shell was called.
+
+ """
+
+ if (global_ns is not None) and (module is None):
raise DeprecationWarning("'global_ns' keyword argument is deprecated, and has been removed in IPython 5.0 use `module` keyword argument instead.")
-
+
if (display_banner is not None):
warnings.warn("The display_banner parameter is deprecated since IPython 4.0", DeprecationWarning)
- # Get locals and globals from caller
- if ((local_ns is None or module is None or compile_flags is None)
- and self.default_user_namespaces):
- call_frame = sys._getframe(stack_depth).f_back
-
- if local_ns is None:
- local_ns = call_frame.f_locals
- if module is None:
- global_ns = call_frame.f_globals
+ # Get locals and globals from caller
+ if ((local_ns is None or module is None or compile_flags is None)
+ and self.default_user_namespaces):
+ call_frame = sys._getframe(stack_depth).f_back
+
+ if local_ns is None:
+ local_ns = call_frame.f_locals
+ if module is None:
+ global_ns = call_frame.f_globals
try:
module = sys.modules[global_ns['__name__']]
except KeyError:
@@ -286,110 +286,110 @@ class InteractiveShellEmbed(TerminalInteractiveShell):
)
module = DummyMod()
module.__dict__ = global_ns
- if compile_flags is None:
- compile_flags = (call_frame.f_code.co_flags &
- compilerop.PyCF_MASK)
-
- # Save original namespace and module so we can restore them after
- # embedding; otherwise the shell doesn't shut down correctly.
- orig_user_module = self.user_module
- orig_user_ns = self.user_ns
- orig_compile_flags = self.compile.flags
-
- # Update namespaces and fire up interpreter
-
- # The global one is easy, we can just throw it in
- if module is not None:
- self.user_module = module
-
- # But the user/local one is tricky: ipython needs it to store internal
- # data, but we also need the locals. We'll throw our hidden variables
- # like _ih and get_ipython() into the local namespace, but delete them
- # later.
- if local_ns is not None:
- reentrant_local_ns = {k: v for (k, v) in local_ns.items() if k not in self.user_ns_hidden.keys()}
- self.user_ns = reentrant_local_ns
- self.init_user_ns()
-
- # Compiler flags
- if compile_flags is not None:
- self.compile.flags = compile_flags
-
- # make sure the tab-completer has the correct frame information, so it
- # actually completes using the frame's locals/globals
- self.set_completer_frame()
-
- with self.builtin_trap, self.display_trap:
+ if compile_flags is None:
+ compile_flags = (call_frame.f_code.co_flags &
+ compilerop.PyCF_MASK)
+
+ # Save original namespace and module so we can restore them after
+ # embedding; otherwise the shell doesn't shut down correctly.
+ orig_user_module = self.user_module
+ orig_user_ns = self.user_ns
+ orig_compile_flags = self.compile.flags
+
+ # Update namespaces and fire up interpreter
+
+ # The global one is easy, we can just throw it in
+ if module is not None:
+ self.user_module = module
+
+ # But the user/local one is tricky: ipython needs it to store internal
+ # data, but we also need the locals. We'll throw our hidden variables
+ # like _ih and get_ipython() into the local namespace, but delete them
+ # later.
+ if local_ns is not None:
+ reentrant_local_ns = {k: v for (k, v) in local_ns.items() if k not in self.user_ns_hidden.keys()}
+ self.user_ns = reentrant_local_ns
+ self.init_user_ns()
+
+ # Compiler flags
+ if compile_flags is not None:
+ self.compile.flags = compile_flags
+
+ # make sure the tab-completer has the correct frame information, so it
+ # actually completes using the frame's locals/globals
+ self.set_completer_frame()
+
+ with self.builtin_trap, self.display_trap:
self.interact()
-
- # now, purge out the local namespace of IPython's hidden variables.
- if local_ns is not None:
- local_ns.update({k: v for (k, v) in self.user_ns.items() if k not in self.user_ns_hidden.keys()})
-
-
- # Restore original namespace so shell can shut down when we exit.
- self.user_module = orig_user_module
- self.user_ns = orig_user_ns
- self.compile.flags = orig_compile_flags
-
-
-def embed(**kwargs):
- """Call this to embed IPython at the current point in your program.
-
- The first invocation of this will create an :class:`InteractiveShellEmbed`
- instance and then call it. Consecutive calls just call the already
- created instance.
-
- If you don't want the kernel to initialize the namespace
- from the scope of the surrounding function,
- and/or you want to load full IPython configuration,
- you probably want `IPython.start_ipython()` instead.
-
- Here is a simple example::
-
- from IPython import embed
- a = 10
- b = 20
- embed(header='First time')
- c = 30
- d = 40
- embed()
-
- Full customization can be done by passing a :class:`Config` in as the
- config argument.
- """
- config = kwargs.get('config')
- header = kwargs.pop('header', u'')
- compile_flags = kwargs.pop('compile_flags', None)
- if config is None:
- config = load_default_config()
- config.InteractiveShellEmbed = config.TerminalInteractiveShell
- kwargs['config'] = config
- #save ps1/ps2 if defined
- ps1 = None
- ps2 = None
- try:
- ps1 = sys.ps1
- ps2 = sys.ps2
- except AttributeError:
- pass
- #save previous instance
- saved_shell_instance = InteractiveShell._instance
- if saved_shell_instance is not None:
- cls = type(saved_shell_instance)
- cls.clear_instance()
+
+ # now, purge out the local namespace of IPython's hidden variables.
+ if local_ns is not None:
+ local_ns.update({k: v for (k, v) in self.user_ns.items() if k not in self.user_ns_hidden.keys()})
+
+
+ # Restore original namespace so shell can shut down when we exit.
+ self.user_module = orig_user_module
+ self.user_ns = orig_user_ns
+ self.compile.flags = orig_compile_flags
+
+
+def embed(**kwargs):
+ """Call this to embed IPython at the current point in your program.
+
+ The first invocation of this will create an :class:`InteractiveShellEmbed`
+ instance and then call it. Consecutive calls just call the already
+ created instance.
+
+ If you don't want the kernel to initialize the namespace
+ from the scope of the surrounding function,
+ and/or you want to load full IPython configuration,
+ you probably want `IPython.start_ipython()` instead.
+
+ Here is a simple example::
+
+ from IPython import embed
+ a = 10
+ b = 20
+ embed(header='First time')
+ c = 30
+ d = 40
+ embed()
+
+ Full customization can be done by passing a :class:`Config` in as the
+ config argument.
+ """
+ config = kwargs.get('config')
+ header = kwargs.pop('header', u'')
+ compile_flags = kwargs.pop('compile_flags', None)
+ if config is None:
+ config = load_default_config()
+ config.InteractiveShellEmbed = config.TerminalInteractiveShell
+ kwargs['config'] = config
+ #save ps1/ps2 if defined
+ ps1 = None
+ ps2 = None
+ try:
+ ps1 = sys.ps1
+ ps2 = sys.ps2
+ except AttributeError:
+ pass
+ #save previous instance
+ saved_shell_instance = InteractiveShell._instance
+ if saved_shell_instance is not None:
+ cls = type(saved_shell_instance)
+ cls.clear_instance()
frame = sys._getframe(1)
shell = InteractiveShellEmbed.instance(_init_location_id='%s:%s' % (
frame.f_code.co_filename, frame.f_lineno), **kwargs)
shell(header=header, stack_depth=2, compile_flags=compile_flags,
_call_location_id='%s:%s' % (frame.f_code.co_filename, frame.f_lineno))
- InteractiveShellEmbed.clear_instance()
- #restore previous instance
- if saved_shell_instance is not None:
- cls = type(saved_shell_instance)
- cls.clear_instance()
- for subclass in cls._walk_mro():
- subclass._instance = saved_shell_instance
- if ps1 is not None:
- sys.ps1 = ps1
- sys.ps2 = ps2
+ InteractiveShellEmbed.clear_instance()
+ #restore previous instance
+ if saved_shell_instance is not None:
+ cls = type(saved_shell_instance)
+ cls.clear_instance()
+ for subclass in cls._walk_mro():
+ subclass._instance = saved_shell_instance
+ if ps1 is not None:
+ sys.ps1 = ps1
+ sys.ps2 = ps2
diff --git a/contrib/python/ipython/py2/IPython/terminal/interactiveshell.py b/contrib/python/ipython/py2/IPython/terminal/interactiveshell.py
index e80f8c1503..f67cc6b502 100644
--- a/contrib/python/ipython/py2/IPython/terminal/interactiveshell.py
+++ b/contrib/python/ipython/py2/IPython/terminal/interactiveshell.py
@@ -1,18 +1,18 @@
"""IPython terminal interface using prompt_toolkit"""
-from __future__ import print_function
-
-import os
-import sys
+from __future__ import print_function
+
+import os
+import sys
import warnings
from warnings import warn
-
-from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
+
+from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
from IPython.utils import io
from IPython.utils.py3compat import PY3, cast_unicode_py2, input, string_types
-from IPython.utils.terminal import toggle_set_term_title, set_term_title
-from IPython.utils.process import abbrev_cwd
+from IPython.utils.terminal import toggle_set_term_title, set_term_title
+from IPython.utils.process import abbrev_cwd
from traitlets import Bool, Unicode, Dict, Integer, observe, Instance, Type, default, Enum, Union
-
+
from prompt_toolkit.document import Document
from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode
from prompt_toolkit.filters import (HasFocus, Condition, IsDone)
@@ -22,7 +22,7 @@ from prompt_toolkit.interface import CommandLineInterface
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.layout.processors import ConditionalProcessor, HighlightMatchingBracketProcessor
from prompt_toolkit.styles import PygmentsStyle, DynamicStyle
-
+
from pygments.styles import get_style_by_name
from pygments.style import Style
from pygments.token import Token
@@ -59,23 +59,23 @@ _style_overrides_linux = {
-def get_default_editor():
- try:
- ed = os.environ['EDITOR']
+def get_default_editor():
+ try:
+ ed = os.environ['EDITOR']
if not PY3:
- ed = ed.decode()
- return ed
- except KeyError:
- pass
- except UnicodeError:
- warn("$EDITOR environment variable is not pure ASCII. Using platform "
- "default editor.")
-
- if os.name == 'posix':
- return 'vi' # the only one guaranteed to be there!
- else:
- return 'notepad' # same in Windows!
-
+ ed = ed.decode()
+ return ed
+ except KeyError:
+ pass
+ except UnicodeError:
+ warn("$EDITOR environment variable is not pure ASCII. Using platform "
+ "default editor.")
+
+ if os.name == 'posix':
+ return 'vi' # the only one guaranteed to be there!
+ else:
+ return 'notepad' # same in Windows!
+
# conservatively check for tty
# overridden streams can result in things like:
# - sys.stdin = None
@@ -87,116 +87,116 @@ for _name in ('stdin', 'stdout', 'stderr'):
break
else:
_is_tty = True
-
-
+
+
_use_simple_prompt = ('IPY_TEST_SIMPLE_PROMPT' in os.environ) or (not _is_tty)
-
+
class TerminalInteractiveShell(InteractiveShell):
space_for_menu = Integer(6, help='Number of line at the bottom of the screen '
'to reserve for the completion menu'
).tag(config=True)
-
+
def _space_for_menu_changed(self, old, new):
self._update_layout()
-
+
pt_cli = None
debugger_history = None
_pt_app = None
-
+
simple_prompt = Bool(_use_simple_prompt,
help="""Use `raw_input` for the REPL, without completion and prompt colors.
-
+
Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are:
IPython own testing machinery, and emacs inferior-shell integration through elpy.
-
+
This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT`
environment variable is set, or the current terminal is not a tty.
-
+
"""
).tag(config=True)
-
+
@property
def debugger_cls(self):
return Pdb if self.simple_prompt else TerminalPdb
-
+
confirm_exit = Bool(True,
help="""
Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
).tag(config=True)
-
+
editing_mode = Unicode('emacs',
help="Shortcut style to use at the prompt. 'vi' or 'emacs'.",
).tag(config=True)
-
+
mouse_support = Bool(False,
help="Enable mouse support in the prompt"
).tag(config=True)
-
+
# We don't load the list of styles for the help string, because loading
# Pygments plugins takes time and can cause unexpected errors.
highlighting_style = Union([Unicode('legacy'), Type(klass=Style)],
help="""The name or class of a Pygments style to use for syntax
highlighting. To see available styles, run `pygmentize -L styles`."""
).tag(config=True)
-
+
@observe('highlighting_style')
@observe('colors')
def _highlighting_style_changed(self, change):
self.refresh_style()
-
+
def refresh_style(self):
self._style = self._make_style_from_name_or_cls(self.highlighting_style)
-
-
+
+
highlighting_style_overrides = Dict(
help="Override highlighting format for specific tokens"
).tag(config=True)
-
+
true_color = Bool(False,
help=("Use 24bit colors instead of 256 colors in prompt highlighting. "
"If your terminal supports true color, the following command "
"should print 'TRUECOLOR' in orange: "
"printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"")
).tag(config=True)
-
+
editor = Unicode(get_default_editor(),
help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
).tag(config=True)
-
+
prompts_class = Type(Prompts, help='Class used to generate Prompt token for prompt_toolkit').tag(config=True)
-
+
prompts = Instance(Prompts)
-
+
@default('prompts')
def _prompts_default(self):
return self.prompts_class(self)
-
+
@observe('prompts')
def _(self, change):
self._update_layout()
-
+
@default('displayhook_class')
def _displayhook_class_default(self):
return RichPromptDisplayHook
-
+
term_title = Bool(True,
help="Automatically set the terminal title"
).tag(config=True)
-
+
display_completions = Enum(('column', 'multicolumn','readlinelike'),
help= ( "Options for displaying tab completions, 'column', 'multicolumn', and "
"'readlinelike'. These options are for `prompt_toolkit`, see "
"`prompt_toolkit` documentation for more information."
),
default_value='multicolumn').tag(config=True)
-
+
highlight_matching_brackets = Bool(True,
help="Highlight matching brackets.",
).tag(config=True)
-
+
extra_open_editor_shortcuts = Bool(False,
help="Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. "
"This is in addition to the F2 binding, which is always enabled."
@@ -210,14 +210,14 @@ class TerminalInteractiveShell(InteractiveShell):
set_term_title('IPython: ' + abbrev_cwd())
else:
toggle_set_term_title(False)
-
+
def init_display_formatter(self):
super(TerminalInteractiveShell, self).init_display_formatter()
# terminal only supports plain text
self.display_formatter.active_types = ['text/plain']
# disable `_ipython_display_`
self.display_formatter.ipython_display_formatter.enabled = False
-
+
def init_prompt_toolkit_cli(self):
if self.simple_prompt:
# Fall back to plain non-interactive output for tests.
@@ -233,13 +233,13 @@ class TerminalInteractiveShell(InteractiveShell):
return isp.source_reset()
self.prompt_for_code = prompt
return
-
+
# Set up keyboard shortcuts
kbmanager = KeyBindingManager.for_prompt(
enable_open_in_editor=self.extra_open_editor_shortcuts,
)
register_ipython_shortcuts(kbmanager.registry, self)
-
+
# Pre-populate history from IPython's history database
history = InMemoryHistory()
last_cell = u""
@@ -250,12 +250,12 @@ class TerminalInteractiveShell(InteractiveShell):
if cell and (cell != last_cell):
history.append(cell)
last_cell = cell
-
+
self._style = self._make_style_from_name_or_cls(self.highlighting_style)
self.style = DynamicStyle(lambda: self._style)
-
+
editing_mode = getattr(EditingMode, self.editing_mode.upper())
-
+
def patch_stdout(**kwargs):
return self.pt_cli.patch_stdout_context(**kwargs)
@@ -274,13 +274,13 @@ class TerminalInteractiveShell(InteractiveShell):
self.pt_cli = CommandLineInterface(
self._pt_app, eventloop=self._eventloop,
output=create_output(true_color=self.true_color))
-
+
def _make_style_from_name_or_cls(self, name_or_cls):
"""
Small wrapper that make an IPython compatible style from a style name
-
+
We need that to add style for prompt ... etc.
- """
+ """
style_overrides = {}
if name_or_cls == 'legacy':
legacy = self.colors.lower()
@@ -336,9 +336,9 @@ class TerminalInteractiveShell(InteractiveShell):
style_overrides.update(self.highlighting_style_overrides)
style = PygmentsStyle.from_defaults(pygments_style_cls=style_cls,
style_dict=style_overrides)
-
+
return style
-
+
def _layout_options(self):
"""
Return the current layout option for the current Terminal InteractiveShell
@@ -350,7 +350,7 @@ class TerminalInteractiveShell(InteractiveShell):
'get_continuation_tokens':self.prompts.continuation_prompt_tokens,
'multiline':True,
'display_completions_in_columns': (self.display_completions == 'multicolumn'),
-
+
# Highlight matching brackets, but only when this setting is
# enabled, and only when the DEFAULT_BUFFER has the focus.
'extra_input_processors': [ConditionalProcessor(
@@ -358,20 +358,20 @@ class TerminalInteractiveShell(InteractiveShell):
filter=HasFocus(DEFAULT_BUFFER) & ~IsDone() &
Condition(lambda cli: self.highlight_matching_brackets))],
}
-
+
def _update_layout(self):
- """
+ """
Ask for a re computation of the application layout, if for example ,
some configuration options have changed.
"""
if self._pt_app:
self._pt_app.layout = create_prompt_layout(**self._layout_options())
-
+
def prompt_for_code(self):
document = self.pt_cli.run(
pre_run=self.pre_prompt, reset_current_buffer=True)
return document.text
-
+
def enable_win_unicode_console(self):
if sys.version_info >= (3, 6):
# Since PEP 528, Python uses the unicode APIs for the Windows
@@ -379,36 +379,36 @@ class TerminalInteractiveShell(InteractiveShell):
return
import win_unicode_console
-
+
if PY3:
win_unicode_console.enable()
- else:
+ else:
# https://github.com/ipython/ipython/issues/9768
from win_unicode_console.streams import (TextStreamWrapper,
stdout_text_transcoded, stderr_text_transcoded)
-
+
class LenientStrStreamWrapper(TextStreamWrapper):
def write(self, s):
if isinstance(s, bytes):
s = s.decode(self.encoding, 'replace')
-
+
self.base.write(s)
-
+
stdout_text_str = LenientStrStreamWrapper(stdout_text_transcoded)
stderr_text_str = LenientStrStreamWrapper(stderr_text_transcoded)
-
+
win_unicode_console.enable(stdout=stdout_text_str,
stderr=stderr_text_str)
-
+
def init_io(self):
if sys.platform not in {'win32', 'cli'}:
return
-
+
self.enable_win_unicode_console()
-
+
import colorama
colorama.init()
-
+
# For some reason we make these wrappers around stdout/stderr.
# For now, we need to reset them so all output gets coloured.
# https://github.com/ipython/ipython/issues/8669
@@ -418,37 +418,37 @@ class TerminalInteractiveShell(InteractiveShell):
warnings.simplefilter('ignore', DeprecationWarning)
io.stdout = io.IOStream(sys.stdout)
io.stderr = io.IOStream(sys.stderr)
-
+
def init_magics(self):
super(TerminalInteractiveShell, self).init_magics()
self.register_magics(TerminalMagics)
-
- def init_alias(self):
- # The parent class defines aliases that can be safely used with any
- # frontend.
- super(TerminalInteractiveShell, self).init_alias()
-
- # Now define aliases that only make sense on the terminal, because they
- # need direct access to the console in a way that we can't emulate in
- # GUI or web frontend
- if os.name == 'posix':
+
+ def init_alias(self):
+ # The parent class defines aliases that can be safely used with any
+ # frontend.
+ super(TerminalInteractiveShell, self).init_alias()
+
+ # Now define aliases that only make sense on the terminal, because they
+ # need direct access to the console in a way that we can't emulate in
+ # GUI or web frontend
+ if os.name == 'posix':
for cmd in ['clear', 'more', 'less', 'man']:
self.alias_manager.soft_define_alias(cmd, cmd)
-
-
+
+
def __init__(self, *args, **kwargs):
super(TerminalInteractiveShell, self).__init__(*args, **kwargs)
self.init_prompt_toolkit_cli()
self.init_term_title()
self.keep_running = True
-
+
self.debugger_history = InMemoryHistory()
-
+
def ask_exit(self):
self.keep_running = False
-
+
rl_next_input = None
-
+
def pre_prompt(self):
if self.rl_next_input:
# We can't set the buffer here, because it will be reset just after
@@ -464,34 +464,34 @@ class TerminalInteractiveShell(InteractiveShell):
# directly here.
set_doc()
self.rl_next_input = None
-
+
def interact(self, display_banner=DISPLAY_BANNER_DEPRECATED):
-
+
if display_banner is not DISPLAY_BANNER_DEPRECATED:
warn('interact `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2)
-
+
self.keep_running = True
while self.keep_running:
print(self.separate_in, end='')
-
- try:
+
+ try:
code = self.prompt_for_code()
except EOFError:
if (not self.confirm_exit) \
or self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'):
self.ask_exit()
-
- else:
+
+ else:
if code:
self.run_cell(code, store_history=True)
-
+
def mainloop(self, display_banner=DISPLAY_BANNER_DEPRECATED):
# An extra layer of protection in case someone mashing Ctrl-C breaks
# out of our internal code.
if display_banner is not DISPLAY_BANNER_DEPRECATED:
warn('mainloop `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2)
while True:
- try:
+ try:
self.interact()
break
except KeyboardInterrupt as e:
@@ -503,12 +503,12 @@ class TerminalInteractiveShell(InteractiveShell):
# https://github.com/ipython/ipython/pull/9867
if hasattr(self, '_eventloop'):
self._eventloop.stop()
-
+
_inputhook = None
def inputhook(self, context):
if self._inputhook is not None:
self._inputhook(context)
-
+
active_eventloop = None
def enable_gui(self, gui=None):
if gui:
@@ -516,24 +516,24 @@ class TerminalInteractiveShell(InteractiveShell):
get_inputhook_name_and_func(gui)
else:
self.active_eventloop = self._inputhook = None
-
+
# Run !system commands directly, not through pipes, so terminal programs
# work correctly.
system = InteractiveShell.system_raw
-
+
def auto_rewrite_input(self, cmd):
"""Overridden from the parent class to use fancy rewriting prompt"""
if not self.show_rewritten_input:
return
-
+
tokens = self.prompts.rewrite_prompt_tokens()
if self.pt_cli:
self.pt_cli.print_tokens(tokens)
print(cmd)
- else:
+ else:
prompt = ''.join(s for t, s in tokens)
print(prompt, cmd, sep='')
-
+
_prompts_before = None
def switch_doctest_mode(self, mode):
"""Switch prompts to classic for %doctest_mode"""
@@ -544,9 +544,9 @@ class TerminalInteractiveShell(InteractiveShell):
self.prompts = self._prompts_before
self._prompts_before = None
self._update_layout()
-
-
+
+
InteractiveShellABC.register(TerminalInteractiveShell)
-
+
if __name__ == '__main__':
TerminalInteractiveShell.instance().interact()
diff --git a/contrib/python/ipython/py2/IPython/terminal/ipapp.py b/contrib/python/ipython/py2/IPython/terminal/ipapp.py
index f8136ef0b6..6b25aaa3e3 100755
--- a/contrib/python/ipython/py2/IPython/terminal/ipapp.py
+++ b/contrib/python/ipython/py2/IPython/terminal/ipapp.py
@@ -1,190 +1,190 @@
-#!/usr/bin/env python
-# encoding: utf-8
-"""
-The :class:`~IPython.core.application.Application` object for the command
-line :command:`ipython` program.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-import logging
-import os
-import sys
+#!/usr/bin/env python
+# encoding: utf-8
+"""
+The :class:`~IPython.core.application.Application` object for the command
+line :command:`ipython` program.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import logging
+import os
+import sys
import warnings
-
-from traitlets.config.loader import Config
-from traitlets.config.application import boolean_flag, catch_config_error, Application
-from IPython.core import release
-from IPython.core import usage
-from IPython.core.completer import IPCompleter
-from IPython.core.crashhandler import CrashHandler
-from IPython.core.formatters import PlainTextFormatter
-from IPython.core.history import HistoryManager
-from IPython.core.application import (
- ProfileDir, BaseIPythonApplication, base_flags, base_aliases
-)
-from IPython.core.magics import ScriptMagics
-from IPython.core.shellapp import (
- InteractiveShellApp, shell_flags, shell_aliases
-)
-from IPython.extensions.storemagic import StoreMagics
+
+from traitlets.config.loader import Config
+from traitlets.config.application import boolean_flag, catch_config_error, Application
+from IPython.core import release
+from IPython.core import usage
+from IPython.core.completer import IPCompleter
+from IPython.core.crashhandler import CrashHandler
+from IPython.core.formatters import PlainTextFormatter
+from IPython.core.history import HistoryManager
+from IPython.core.application import (
+ ProfileDir, BaseIPythonApplication, base_flags, base_aliases
+)
+from IPython.core.magics import ScriptMagics
+from IPython.core.shellapp import (
+ InteractiveShellApp, shell_flags, shell_aliases
+)
+from IPython.extensions.storemagic import StoreMagics
from .interactiveshell import TerminalInteractiveShell
-from IPython.paths import get_ipython_dir
-from traitlets import (
+from IPython.paths import get_ipython_dir
+from traitlets import (
Bool, List, Dict, default, observe, Type
-)
-
-#-----------------------------------------------------------------------------
-# Globals, utilities and helpers
-#-----------------------------------------------------------------------------
-
-_examples = """
-ipython --matplotlib # enable matplotlib integration
-ipython --matplotlib=qt # enable matplotlib integration with qt4 backend
-
-ipython --log-level=DEBUG # set logging to DEBUG
-ipython --profile=foo # start with profile foo
-
-ipython profile create foo # create profile foo w/ default config files
-ipython help profile # show the help for the profile subcmd
-
-ipython locate # print the path to the IPython directory
-ipython locate profile foo # print the path to the directory for profile `foo`
-"""
-
-#-----------------------------------------------------------------------------
-# Crash handler for this application
-#-----------------------------------------------------------------------------
-
-class IPAppCrashHandler(CrashHandler):
- """sys.excepthook for IPython itself, leaves a detailed report on disk."""
-
- def __init__(self, app):
- contact_name = release.author
- contact_email = release.author_email
- bug_tracker = 'https://github.com/ipython/ipython/issues'
- super(IPAppCrashHandler,self).__init__(
- app, contact_name, contact_email, bug_tracker
- )
-
- def make_report(self,traceback):
- """Return a string containing a crash report."""
-
- sec_sep = self.section_sep
- # Start with parent report
- report = [super(IPAppCrashHandler, self).make_report(traceback)]
- # Add interactive-specific info we may have
- rpt_add = report.append
- try:
- rpt_add(sec_sep+"History of session input:")
- for line in self.app.shell.user_ns['_ih']:
- rpt_add(line)
- rpt_add('\n*** Last line of input (may not be in above history):\n')
- rpt_add(self.app.shell._last_input_line+'\n')
- except:
- pass
-
- return ''.join(report)
-
-#-----------------------------------------------------------------------------
-# Aliases and Flags
-#-----------------------------------------------------------------------------
-flags = dict(base_flags)
-flags.update(shell_flags)
-frontend_flags = {}
-addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
-addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
- 'Turn on auto editing of files with syntax errors.',
- 'Turn off auto editing of files with syntax errors.'
-)
+)
+
+#-----------------------------------------------------------------------------
+# Globals, utilities and helpers
+#-----------------------------------------------------------------------------
+
+_examples = """
+ipython --matplotlib # enable matplotlib integration
+ipython --matplotlib=qt # enable matplotlib integration with qt4 backend
+
+ipython --log-level=DEBUG # set logging to DEBUG
+ipython --profile=foo # start with profile foo
+
+ipython profile create foo # create profile foo w/ default config files
+ipython help profile # show the help for the profile subcmd
+
+ipython locate # print the path to the IPython directory
+ipython locate profile foo # print the path to the directory for profile `foo`
+"""
+
+#-----------------------------------------------------------------------------
+# Crash handler for this application
+#-----------------------------------------------------------------------------
+
+class IPAppCrashHandler(CrashHandler):
+ """sys.excepthook for IPython itself, leaves a detailed report on disk."""
+
+ def __init__(self, app):
+ contact_name = release.author
+ contact_email = release.author_email
+ bug_tracker = 'https://github.com/ipython/ipython/issues'
+ super(IPAppCrashHandler,self).__init__(
+ app, contact_name, contact_email, bug_tracker
+ )
+
+ def make_report(self,traceback):
+ """Return a string containing a crash report."""
+
+ sec_sep = self.section_sep
+ # Start with parent report
+ report = [super(IPAppCrashHandler, self).make_report(traceback)]
+ # Add interactive-specific info we may have
+ rpt_add = report.append
+ try:
+ rpt_add(sec_sep+"History of session input:")
+ for line in self.app.shell.user_ns['_ih']:
+ rpt_add(line)
+ rpt_add('\n*** Last line of input (may not be in above history):\n')
+ rpt_add(self.app.shell._last_input_line+'\n')
+ except:
+ pass
+
+ return ''.join(report)
+
+#-----------------------------------------------------------------------------
+# Aliases and Flags
+#-----------------------------------------------------------------------------
+flags = dict(base_flags)
+flags.update(shell_flags)
+frontend_flags = {}
+addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
+addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
+ 'Turn on auto editing of files with syntax errors.',
+ 'Turn off auto editing of files with syntax errors.'
+)
addflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt',
"Force simple minimal prompt using `raw_input`",
"Use a rich interactive prompt with prompt_toolkit",
)
-addflag('banner', 'TerminalIPythonApp.display_banner',
- "Display a banner upon starting IPython.",
- "Don't display a banner upon starting IPython."
-)
-addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
- """Set to confirm when you try to exit IPython with an EOF (Control-D
- in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
- you can force a direct exit without any confirmation.""",
- "Don't prompt the user when exiting."
-)
-addflag('term-title', 'TerminalInteractiveShell.term_title',
- "Enable auto setting the terminal title.",
- "Disable auto setting the terminal title."
-)
-classic_config = Config()
-classic_config.InteractiveShell.cache_size = 0
-classic_config.PlainTextFormatter.pprint = False
+addflag('banner', 'TerminalIPythonApp.display_banner',
+ "Display a banner upon starting IPython.",
+ "Don't display a banner upon starting IPython."
+)
+addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
+ """Set to confirm when you try to exit IPython with an EOF (Control-D
+ in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
+ you can force a direct exit without any confirmation.""",
+ "Don't prompt the user when exiting."
+)
+addflag('term-title', 'TerminalInteractiveShell.term_title',
+ "Enable auto setting the terminal title.",
+ "Disable auto setting the terminal title."
+)
+classic_config = Config()
+classic_config.InteractiveShell.cache_size = 0
+classic_config.PlainTextFormatter.pprint = False
classic_config.TerminalInteractiveShell.prompts_class='IPython.terminal.prompts.ClassicPrompts'
-classic_config.InteractiveShell.separate_in = ''
-classic_config.InteractiveShell.separate_out = ''
-classic_config.InteractiveShell.separate_out2 = ''
-classic_config.InteractiveShell.colors = 'NoColor'
-classic_config.InteractiveShell.xmode = 'Plain'
-
-frontend_flags['classic']=(
- classic_config,
- "Gives IPython a similar feel to the classic Python prompt."
-)
-# # log doesn't make so much sense this way anymore
-# paa('--log','-l',
-# action='store_true', dest='InteractiveShell.logstart',
-# help="Start logging to the default log file (./ipython_log.py).")
-#
-# # quick is harder to implement
-frontend_flags['quick']=(
- {'TerminalIPythonApp' : {'quick' : True}},
- "Enable quick startup with no config files."
-)
-
-frontend_flags['i'] = (
- {'TerminalIPythonApp' : {'force_interact' : True}},
- """If running code from the command line, become interactive afterwards.
- It is often useful to follow this with `--` to treat remaining flags as
- script arguments.
- """
-)
-flags.update(frontend_flags)
-
-aliases = dict(base_aliases)
-aliases.update(shell_aliases)
-
-#-----------------------------------------------------------------------------
-# Main classes and functions
-#-----------------------------------------------------------------------------
-
-
-class LocateIPythonApp(BaseIPythonApplication):
- description = """print the path to the IPython dir"""
- subcommands = Dict(dict(
- profile=('IPython.core.profileapp.ProfileLocate',
- "print the path to an IPython profile directory",
- ),
- ))
- def start(self):
- if self.subapp is not None:
- return self.subapp.start()
- else:
- print(self.ipython_dir)
-
-
-class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
- name = u'ipython'
- description = usage.cl_usage
- crash_handler_class = IPAppCrashHandler
- examples = _examples
-
- flags = Dict(flags)
- aliases = Dict(aliases)
- classes = List()
+classic_config.InteractiveShell.separate_in = ''
+classic_config.InteractiveShell.separate_out = ''
+classic_config.InteractiveShell.separate_out2 = ''
+classic_config.InteractiveShell.colors = 'NoColor'
+classic_config.InteractiveShell.xmode = 'Plain'
+
+frontend_flags['classic']=(
+ classic_config,
+ "Gives IPython a similar feel to the classic Python prompt."
+)
+# # log doesn't make so much sense this way anymore
+# paa('--log','-l',
+# action='store_true', dest='InteractiveShell.logstart',
+# help="Start logging to the default log file (./ipython_log.py).")
+#
+# # quick is harder to implement
+frontend_flags['quick']=(
+ {'TerminalIPythonApp' : {'quick' : True}},
+ "Enable quick startup with no config files."
+)
+
+frontend_flags['i'] = (
+ {'TerminalIPythonApp' : {'force_interact' : True}},
+ """If running code from the command line, become interactive afterwards.
+ It is often useful to follow this with `--` to treat remaining flags as
+ script arguments.
+ """
+)
+flags.update(frontend_flags)
+
+aliases = dict(base_aliases)
+aliases.update(shell_aliases)
+
+#-----------------------------------------------------------------------------
+# Main classes and functions
+#-----------------------------------------------------------------------------
+
+
+class LocateIPythonApp(BaseIPythonApplication):
+ description = """print the path to the IPython dir"""
+ subcommands = Dict(dict(
+ profile=('IPython.core.profileapp.ProfileLocate',
+ "print the path to an IPython profile directory",
+ ),
+ ))
+ def start(self):
+ if self.subapp is not None:
+ return self.subapp.start()
+ else:
+ print(self.ipython_dir)
+
+
+class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
+ name = u'ipython'
+ description = usage.cl_usage
+ crash_handler_class = IPAppCrashHandler
+ examples = _examples
+
+ flags = Dict(flags)
+ aliases = Dict(aliases)
+ classes = List()
interactive_shell_class = Type(
klass=object, # use default_value otherwise which only allow subclasses.
@@ -193,185 +193,185 @@ class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
).tag(config=True)
@default('classes')
- def _classes_default(self):
- """This has to be in a method, for TerminalIPythonApp to be available."""
- return [
- InteractiveShellApp, # ShellApp comes before TerminalApp, because
- self.__class__, # it will also affect subclasses (e.g. QtConsole)
- TerminalInteractiveShell,
- HistoryManager,
- ProfileDir,
- PlainTextFormatter,
- IPCompleter,
- ScriptMagics,
- StoreMagics,
- ]
-
- deprecated_subcommands = dict(
- qtconsole=('qtconsole.qtconsoleapp.JupyterQtConsoleApp',
- """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter Qt Console."""
- ),
- notebook=('notebook.notebookapp.NotebookApp',
- """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter HTML Notebook Server."""
- ),
- console=('jupyter_console.app.ZMQTerminalIPythonApp',
- """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter terminal-based Console."""
- ),
- nbconvert=('nbconvert.nbconvertapp.NbConvertApp',
- "DEPRECATED, Will be removed in IPython 6.0 : Convert notebooks to/from other formats."
- ),
- trust=('nbformat.sign.TrustNotebookApp',
- "DEPRECATED, Will be removed in IPython 6.0 : Sign notebooks to trust their potentially unsafe contents at load."
- ),
- kernelspec=('jupyter_client.kernelspecapp.KernelSpecApp',
- "DEPRECATED, Will be removed in IPython 6.0 : Manage Jupyter kernel specifications."
- ),
- )
- subcommands = dict(
- profile = ("IPython.core.profileapp.ProfileApp",
- "Create and manage IPython profiles."
- ),
- kernel = ("ipykernel.kernelapp.IPKernelApp",
- "Start a kernel without an attached frontend."
- ),
- locate=('IPython.terminal.ipapp.LocateIPythonApp',
- LocateIPythonApp.description
- ),
- history=('IPython.core.historyapp.HistoryApp',
- "Manage the IPython history database."
- ),
- )
- deprecated_subcommands['install-nbextension'] = (
- "notebook.nbextensions.InstallNBExtensionApp",
- "DEPRECATED, Will be removed in IPython 6.0 : Install Jupyter notebook extension files"
- )
- subcommands.update(deprecated_subcommands)
-
- # *do* autocreate requested profile, but don't create the config file.
- auto_create=Bool(True)
- # configurables
+ def _classes_default(self):
+ """This has to be in a method, for TerminalIPythonApp to be available."""
+ return [
+ InteractiveShellApp, # ShellApp comes before TerminalApp, because
+ self.__class__, # it will also affect subclasses (e.g. QtConsole)
+ TerminalInteractiveShell,
+ HistoryManager,
+ ProfileDir,
+ PlainTextFormatter,
+ IPCompleter,
+ ScriptMagics,
+ StoreMagics,
+ ]
+
+ deprecated_subcommands = dict(
+ qtconsole=('qtconsole.qtconsoleapp.JupyterQtConsoleApp',
+ """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter Qt Console."""
+ ),
+ notebook=('notebook.notebookapp.NotebookApp',
+ """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter HTML Notebook Server."""
+ ),
+ console=('jupyter_console.app.ZMQTerminalIPythonApp',
+ """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter terminal-based Console."""
+ ),
+ nbconvert=('nbconvert.nbconvertapp.NbConvertApp',
+ "DEPRECATED, Will be removed in IPython 6.0 : Convert notebooks to/from other formats."
+ ),
+ trust=('nbformat.sign.TrustNotebookApp',
+ "DEPRECATED, Will be removed in IPython 6.0 : Sign notebooks to trust their potentially unsafe contents at load."
+ ),
+ kernelspec=('jupyter_client.kernelspecapp.KernelSpecApp',
+ "DEPRECATED, Will be removed in IPython 6.0 : Manage Jupyter kernel specifications."
+ ),
+ )
+ subcommands = dict(
+ profile = ("IPython.core.profileapp.ProfileApp",
+ "Create and manage IPython profiles."
+ ),
+ kernel = ("ipykernel.kernelapp.IPKernelApp",
+ "Start a kernel without an attached frontend."
+ ),
+ locate=('IPython.terminal.ipapp.LocateIPythonApp',
+ LocateIPythonApp.description
+ ),
+ history=('IPython.core.historyapp.HistoryApp',
+ "Manage the IPython history database."
+ ),
+ )
+ deprecated_subcommands['install-nbextension'] = (
+ "notebook.nbextensions.InstallNBExtensionApp",
+ "DEPRECATED, Will be removed in IPython 6.0 : Install Jupyter notebook extension files"
+ )
+ subcommands.update(deprecated_subcommands)
+
+ # *do* autocreate requested profile, but don't create the config file.
+ auto_create=Bool(True)
+ # configurables
quick = Bool(False,
- help="""Start IPython quickly by skipping the loading of config files."""
+ help="""Start IPython quickly by skipping the loading of config files."""
).tag(config=True)
@observe('quick')
def _quick_changed(self, change):
if change['new']:
- self.load_config_file = lambda *a, **kw: None
-
+ self.load_config_file = lambda *a, **kw: None
+
display_banner = Bool(True,
- help="Whether to display a banner upon starting IPython."
+ help="Whether to display a banner upon starting IPython."
).tag(config=True)
-
- # if there is code of files to run from the cmd line, don't interact
- # unless the --i flag (App.force_interact) is true.
+
+ # if there is code of files to run from the cmd line, don't interact
+ # unless the --i flag (App.force_interact) is true.
force_interact = Bool(False,
- help="""If a command or file is given via the command-line,
- e.g. 'ipython foo.py', start an interactive shell after executing the
- file or command."""
+ help="""If a command or file is given via the command-line,
+ e.g. 'ipython foo.py', start an interactive shell after executing the
+ file or command."""
).tag(config=True)
@observe('force_interact')
def _force_interact_changed(self, change):
if change['new']:
- self.interact = True
-
+ self.interact = True
+
@observe('file_to_run', 'code_to_run', 'module_to_run')
def _file_to_run_changed(self, change):
new = change['new']
- if new:
- self.something_to_run = True
- if new and not self.force_interact:
- self.interact = False
-
- # internal, not-configurable
- something_to_run=Bool(False)
-
- def parse_command_line(self, argv=None):
- """override to allow old '-pylab' flag with deprecation warning"""
-
- argv = sys.argv[1:] if argv is None else argv
-
- if '-pylab' in argv:
- # deprecated `-pylab` given,
- # warn and transform into current syntax
- argv = argv[:] # copy, don't clobber
- idx = argv.index('-pylab')
+ if new:
+ self.something_to_run = True
+ if new and not self.force_interact:
+ self.interact = False
+
+ # internal, not-configurable
+ something_to_run=Bool(False)
+
+ def parse_command_line(self, argv=None):
+ """override to allow old '-pylab' flag with deprecation warning"""
+
+ argv = sys.argv[1:] if argv is None else argv
+
+ if '-pylab' in argv:
+ # deprecated `-pylab` given,
+ # warn and transform into current syntax
+ argv = argv[:] # copy, don't clobber
+ idx = argv.index('-pylab')
warnings.warn("`-pylab` flag has been deprecated.\n"
- " Use `--matplotlib <backend>` and import pylab manually.")
- argv[idx] = '--pylab'
-
- return super(TerminalIPythonApp, self).parse_command_line(argv)
-
- @catch_config_error
- def initialize(self, argv=None):
- """Do actions after construct, but before starting the app."""
- super(TerminalIPythonApp, self).initialize(argv)
- if self.subapp is not None:
- # don't bother initializing further, starting subapp
- return
- # print self.extra_args
- if self.extra_args and not self.something_to_run:
- self.file_to_run = self.extra_args[0]
- self.init_path()
- # create the shell
- self.init_shell()
- # and draw the banner
- self.init_banner()
- # Now a variety of things that happen after the banner is printed.
- self.init_gui_pylab()
- self.init_extensions()
- self.init_code()
-
- def init_shell(self):
- """initialize the InteractiveShell instance"""
- # Create an InteractiveShell instance.
- # shell.display_banner should always be False for the terminal
- # based app, because we call shell.show_banner() by hand below
- # so the banner shows *before* all extension loading stuff.
+ " Use `--matplotlib <backend>` and import pylab manually.")
+ argv[idx] = '--pylab'
+
+ return super(TerminalIPythonApp, self).parse_command_line(argv)
+
+ @catch_config_error
+ def initialize(self, argv=None):
+ """Do actions after construct, but before starting the app."""
+ super(TerminalIPythonApp, self).initialize(argv)
+ if self.subapp is not None:
+ # don't bother initializing further, starting subapp
+ return
+ # print self.extra_args
+ if self.extra_args and not self.something_to_run:
+ self.file_to_run = self.extra_args[0]
+ self.init_path()
+ # create the shell
+ self.init_shell()
+ # and draw the banner
+ self.init_banner()
+ # Now a variety of things that happen after the banner is printed.
+ self.init_gui_pylab()
+ self.init_extensions()
+ self.init_code()
+
+ def init_shell(self):
+ """initialize the InteractiveShell instance"""
+ # Create an InteractiveShell instance.
+ # shell.display_banner should always be False for the terminal
+ # based app, because we call shell.show_banner() by hand below
+ # so the banner shows *before* all extension loading stuff.
self.shell = self.interactive_shell_class.instance(parent=self,
profile_dir=self.profile_dir,
- ipython_dir=self.ipython_dir, user_ns=self.user_ns)
- self.shell.configurables.append(self)
-
- def init_banner(self):
- """optionally display the banner"""
- if self.display_banner and self.interact:
- self.shell.show_banner()
- # Make sure there is a space below the banner.
- if self.log_level <= logging.INFO: print()
-
- def _pylab_changed(self, name, old, new):
- """Replace --pylab='inline' with --pylab='auto'"""
- if new == 'inline':
+ ipython_dir=self.ipython_dir, user_ns=self.user_ns)
+ self.shell.configurables.append(self)
+
+ def init_banner(self):
+ """optionally display the banner"""
+ if self.display_banner and self.interact:
+ self.shell.show_banner()
+ # Make sure there is a space below the banner.
+ if self.log_level <= logging.INFO: print()
+
+ def _pylab_changed(self, name, old, new):
+ """Replace --pylab='inline' with --pylab='auto'"""
+ if new == 'inline':
warnings.warn("'inline' not available as pylab backend, "
- "using 'auto' instead.")
- self.pylab = 'auto'
-
- def start(self):
- if self.subapp is not None:
- return self.subapp.start()
- # perform any prexec steps:
- if self.interact:
- self.log.debug("Starting IPython's mainloop...")
- self.shell.mainloop()
- else:
- self.log.debug("IPython not interactive...")
-
-def load_default_config(ipython_dir=None):
- """Load the default config file from the default ipython_dir.
-
- This is useful for embedded shells.
- """
- if ipython_dir is None:
- ipython_dir = get_ipython_dir()
-
- profile_dir = os.path.join(ipython_dir, 'profile_default')
+ "using 'auto' instead.")
+ self.pylab = 'auto'
+
+ def start(self):
+ if self.subapp is not None:
+ return self.subapp.start()
+ # perform any prexec steps:
+ if self.interact:
+ self.log.debug("Starting IPython's mainloop...")
+ self.shell.mainloop()
+ else:
+ self.log.debug("IPython not interactive...")
+
+def load_default_config(ipython_dir=None):
+ """Load the default config file from the default ipython_dir.
+
+ This is useful for embedded shells.
+ """
+ if ipython_dir is None:
+ ipython_dir = get_ipython_dir()
+
+ profile_dir = os.path.join(ipython_dir, 'profile_default')
app = TerminalIPythonApp()
app.config_file_paths.append(profile_dir)
app.load_config_file()
return app.config
-
-launch_new_instance = TerminalIPythonApp.launch_instance
-
-
-if __name__ == '__main__':
- launch_new_instance()
+
+launch_new_instance = TerminalIPythonApp.launch_instance
+
+
+if __name__ == '__main__':
+ launch_new_instance()
diff --git a/contrib/python/ipython/py2/IPython/testing/__init__.py b/contrib/python/ipython/py2/IPython/testing/__init__.py
index 6011b39f77..165f503169 100644
--- a/contrib/python/ipython/py2/IPython/testing/__init__.py
+++ b/contrib/python/ipython/py2/IPython/testing/__init__.py
@@ -1,38 +1,38 @@
-"""Testing support (tools to test IPython itself).
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2009-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Functions
-#-----------------------------------------------------------------------------
-
-# User-level entry point for testing
-def test(**kwargs):
- """Run the entire IPython test suite.
-
- Any of the options for run_iptestall() may be passed as keyword arguments.
-
- For example::
-
- IPython.test(testgroups=['lib', 'config', 'utils'], fast=2)
-
- will run those three sections of the test suite, using two processes.
- """
-
- # Do the import internally, so that this function doesn't increase total
- # import time
- from .iptestcontroller import run_iptestall, default_options
- options = default_options()
- for name, val in kwargs.items():
- setattr(options, name, val)
- run_iptestall(options)
-
-# So nose doesn't try to run this as a test itself and we end up with an
-# infinite test loop
-test.__test__ = False
+"""Testing support (tools to test IPython itself).
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2009-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Functions
+#-----------------------------------------------------------------------------
+
+# User-level entry point for testing
+def test(**kwargs):
+ """Run the entire IPython test suite.
+
+ Any of the options for run_iptestall() may be passed as keyword arguments.
+
+ For example::
+
+ IPython.test(testgroups=['lib', 'config', 'utils'], fast=2)
+
+ will run those three sections of the test suite, using two processes.
+ """
+
+ # Do the import internally, so that this function doesn't increase total
+ # import time
+ from .iptestcontroller import run_iptestall, default_options
+ options = default_options()
+ for name, val in kwargs.items():
+ setattr(options, name, val)
+ run_iptestall(options)
+
+# So nose doesn't try to run this as a test itself and we end up with an
+# infinite test loop
+test.__test__ = False
diff --git a/contrib/python/ipython/py2/IPython/testing/__main__.py b/contrib/python/ipython/py2/IPython/testing/__main__.py
index 179ec6f699..4b0bb8ba9c 100644
--- a/contrib/python/ipython/py2/IPython/testing/__main__.py
+++ b/contrib/python/ipython/py2/IPython/testing/__main__.py
@@ -1,3 +1,3 @@
-if __name__ == '__main__':
- from IPython.testing import iptestcontroller
- iptestcontroller.main()
+if __name__ == '__main__':
+ from IPython.testing import iptestcontroller
+ iptestcontroller.main()
diff --git a/contrib/python/ipython/py2/IPython/testing/decorators.py b/contrib/python/ipython/py2/IPython/testing/decorators.py
index 2fe72f6cae..c9807ce70e 100644
--- a/contrib/python/ipython/py2/IPython/testing/decorators.py
+++ b/contrib/python/ipython/py2/IPython/testing/decorators.py
@@ -1,384 +1,384 @@
-# -*- coding: utf-8 -*-
-"""Decorators for labeling test objects.
-
-Decorators that merely return a modified version of the original function
-object are straightforward. Decorators that return a new function object need
-to use nose.tools.make_decorator(original_function)(decorator) in returning the
-decorator, in order to preserve metadata such as function name, setup and
-teardown functions and so on - see nose.tools for more information.
-
-This module provides a set of useful decorators meant to be ready to use in
-your own tests. See the bottom of the file for the ready-made ones, and if you
-find yourself writing a new one that may be of generic use, add it here.
-
-Included decorators:
-
-
-Lightweight testing that remains unittest-compatible.
-
-- An @as_unittest decorator can be used to tag any normal parameter-less
- function as a unittest TestCase. Then, both nose and normal unittest will
- recognize it as such. This will make it easier to migrate away from Nose if
- we ever need/want to while maintaining very lightweight tests.
-
-NOTE: This file contains IPython-specific decorators. Using the machinery in
-IPython.external.decorators, we import either numpy.testing.decorators if numpy is
-available, OR use equivalent code in IPython.external._decorators, which
-we've copied verbatim from numpy.
-
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-import os
-import tempfile
-import unittest
-import warnings
-
-from decorator import decorator
-
-# Expose the unittest-driven decorators
-from .ipunittest import ipdoctest, ipdocstring
-
-# Grab the numpy-specific decorators which we keep in a file that we
-# occasionally update from upstream: decorators.py is a copy of
-# numpy.testing.decorators, we expose all of it here.
-from IPython.external.decorators import *
-
-# For onlyif_cmd_exists decorator
+# -*- coding: utf-8 -*-
+"""Decorators for labeling test objects.
+
+Decorators that merely return a modified version of the original function
+object are straightforward. Decorators that return a new function object need
+to use nose.tools.make_decorator(original_function)(decorator) in returning the
+decorator, in order to preserve metadata such as function name, setup and
+teardown functions and so on - see nose.tools for more information.
+
+This module provides a set of useful decorators meant to be ready to use in
+your own tests. See the bottom of the file for the ready-made ones, and if you
+find yourself writing a new one that may be of generic use, add it here.
+
+Included decorators:
+
+
+Lightweight testing that remains unittest-compatible.
+
+- An @as_unittest decorator can be used to tag any normal parameter-less
+ function as a unittest TestCase. Then, both nose and normal unittest will
+ recognize it as such. This will make it easier to migrate away from Nose if
+ we ever need/want to while maintaining very lightweight tests.
+
+NOTE: This file contains IPython-specific decorators. Using the machinery in
+IPython.external.decorators, we import either numpy.testing.decorators if numpy is
+available, OR use equivalent code in IPython.external._decorators, which
+we've copied verbatim from numpy.
+
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+import os
+import tempfile
+import unittest
+import warnings
+
+from decorator import decorator
+
+# Expose the unittest-driven decorators
+from .ipunittest import ipdoctest, ipdocstring
+
+# Grab the numpy-specific decorators which we keep in a file that we
+# occasionally update from upstream: decorators.py is a copy of
+# numpy.testing.decorators, we expose all of it here.
+from IPython.external.decorators import *
+
+# For onlyif_cmd_exists decorator
from IPython.utils.py3compat import string_types, which, PY2, PY3, PYPY
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-
-# Simple example of the basic idea
-def as_unittest(func):
- """Decorator to make a simple function into a normal test via unittest."""
- class Tester(unittest.TestCase):
- def test(self):
- func()
-
- Tester.__name__ = func.__name__
-
- return Tester
-
-# Utility functions
-
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+# Simple example of the basic idea
+def as_unittest(func):
+ """Decorator to make a simple function into a normal test via unittest."""
+ class Tester(unittest.TestCase):
+ def test(self):
+ func()
+
+ Tester.__name__ = func.__name__
+
+ return Tester
+
+# Utility functions
+
def apply_wrapper(wrapper, func):
- """Apply a wrapper to a function for decoration.
-
- This mixes Michele Simionato's decorator tool with nose's make_decorator,
- to apply a wrapper in a decorator so that all nose attributes, as well as
- function signature and other properties, survive the decoration cleanly.
- This will ensure that wrapped functions can still be well introspected via
- IPython, for example.
- """
+ """Apply a wrapper to a function for decoration.
+
+ This mixes Michele Simionato's decorator tool with nose's make_decorator,
+ to apply a wrapper in a decorator so that all nose attributes, as well as
+ function signature and other properties, survive the decoration cleanly.
+ This will ensure that wrapped functions can still be well introspected via
+ IPython, for example.
+ """
warnings.warn("The function `apply_wrapper` is deprecated since IPython 4.0",
DeprecationWarning, stacklevel=2)
- import nose.tools
-
- return decorator(wrapper,nose.tools.make_decorator(func)(wrapper))
-
-
+ import nose.tools
+
+ return decorator(wrapper,nose.tools.make_decorator(func)(wrapper))
+
+
def make_label_dec(label, ds=None):
- """Factory function to create a decorator that applies one or more labels.
-
- Parameters
- ----------
- label : string or sequence
- One or more labels that will be applied by the decorator to the functions
- it decorates. Labels are attributes of the decorated function with their
- value set to True.
-
- ds : string
- An optional docstring for the resulting decorator. If not given, a
- default docstring is auto-generated.
-
- Returns
- -------
- A decorator.
-
- Examples
- --------
-
- A simple labeling decorator:
-
- >>> slow = make_label_dec('slow')
- >>> slow.__doc__
- "Labels a test as 'slow'."
-
- And one that uses multiple labels and a custom docstring:
-
- >>> rare = make_label_dec(['slow','hard'],
- ... "Mix labels 'slow' and 'hard' for rare tests.")
- >>> rare.__doc__
- "Mix labels 'slow' and 'hard' for rare tests."
-
- Now, let's test using this one:
- >>> @rare
- ... def f(): pass
- ...
- >>>
- >>> f.slow
- True
- >>> f.hard
- True
- """
-
+ """Factory function to create a decorator that applies one or more labels.
+
+ Parameters
+ ----------
+ label : string or sequence
+ One or more labels that will be applied by the decorator to the functions
+ it decorates. Labels are attributes of the decorated function with their
+ value set to True.
+
+ ds : string
+ An optional docstring for the resulting decorator. If not given, a
+ default docstring is auto-generated.
+
+ Returns
+ -------
+ A decorator.
+
+ Examples
+ --------
+
+ A simple labeling decorator:
+
+ >>> slow = make_label_dec('slow')
+ >>> slow.__doc__
+ "Labels a test as 'slow'."
+
+ And one that uses multiple labels and a custom docstring:
+
+ >>> rare = make_label_dec(['slow','hard'],
+ ... "Mix labels 'slow' and 'hard' for rare tests.")
+ >>> rare.__doc__
+ "Mix labels 'slow' and 'hard' for rare tests."
+
+ Now, let's test using this one:
+ >>> @rare
+ ... def f(): pass
+ ...
+ >>>
+ >>> f.slow
+ True
+ >>> f.hard
+ True
+ """
+
warnings.warn("The function `make_label_dec` is deprecated since IPython 4.0",
DeprecationWarning, stacklevel=2)
- if isinstance(label, string_types):
- labels = [label]
- else:
- labels = label
-
- # Validate that the given label(s) are OK for use in setattr() by doing a
- # dry run on a dummy function.
- tmp = lambda : None
- for label in labels:
- setattr(tmp,label,True)
-
- # This is the actual decorator we'll return
- def decor(f):
- for label in labels:
- setattr(f,label,True)
- return f
-
- # Apply the user's docstring, or autogenerate a basic one
- if ds is None:
- ds = "Labels a test as %r." % label
- decor.__doc__ = ds
-
- return decor
-
-
-# Inspired by numpy's skipif, but uses the full apply_wrapper utility to
-# preserve function metadata better and allows the skip condition to be a
-# callable.
-def skipif(skip_condition, msg=None):
- ''' Make function raise SkipTest exception if skip_condition is true
-
- Parameters
- ----------
-
- skip_condition : bool or callable
- Flag to determine whether to skip test. If the condition is a
- callable, it is used at runtime to dynamically make the decision. This
- is useful for tests that may require costly imports, to delay the cost
- until the test suite is actually executed.
- msg : string
- Message to give on raising a SkipTest exception.
-
- Returns
- -------
- decorator : function
- Decorator, which, when applied to a function, causes SkipTest
- to be raised when the skip_condition was True, and the function
- to be called normally otherwise.
-
- Notes
- -----
- You will see from the code that we had to further decorate the
- decorator with the nose.tools.make_decorator function in order to
- transmit function name, and various other metadata.
- '''
-
- def skip_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
-
- # Allow for both boolean or callable skip conditions.
- if callable(skip_condition):
- skip_val = skip_condition
- else:
- skip_val = lambda : skip_condition
-
- def get_msg(func,msg=None):
- """Skip message with information about function being skipped."""
- if msg is None: out = 'Test skipped due to test condition.'
- else: out = msg
- return "Skipping test: %s. %s" % (func.__name__,out)
-
- # We need to define *two* skippers because Python doesn't allow both
- # return with value and yield inside the same function.
- def skipper_func(*args, **kwargs):
- """Skipper for normal test functions."""
- if skip_val():
- raise nose.SkipTest(get_msg(f,msg))
- else:
- return f(*args, **kwargs)
-
- def skipper_gen(*args, **kwargs):
- """Skipper for test generators."""
- if skip_val():
- raise nose.SkipTest(get_msg(f,msg))
- else:
- for x in f(*args, **kwargs):
- yield x
-
- # Choose the right skipper to use when building the actual generator.
- if nose.util.isgenerator(f):
- skipper = skipper_gen
- else:
- skipper = skipper_func
-
- return nose.tools.make_decorator(f)(skipper)
-
- return skip_decorator
-
-# A version with the condition set to true, common case just to attach a message
-# to a skip decorator
-def skip(msg=None):
- """Decorator factory - mark a test function for skipping from test suite.
-
- Parameters
- ----------
- msg : string
- Optional message to be added.
-
- Returns
- -------
- decorator : function
- Decorator, which, when applied to a function, causes SkipTest
- to be raised, with the optional message added.
- """
-
- return skipif(True,msg)
-
-
-def onlyif(condition, msg):
- """The reverse from skipif, see skipif for details."""
-
- if callable(condition):
- skip_condition = lambda : not condition()
- else:
- skip_condition = lambda : not condition
-
- return skipif(skip_condition, msg)
-
-#-----------------------------------------------------------------------------
-# Utility functions for decorators
-def module_not_available(module):
- """Can module be imported? Returns true if module does NOT import.
-
- This is used to make a decorator to skip tests that require module to be
- available, but delay the 'import numpy' to test execution time.
- """
- try:
- mod = __import__(module)
- mod_not_avail = False
- except ImportError:
- mod_not_avail = True
-
- return mod_not_avail
-
-
-def decorated_dummy(dec, name):
- """Return a dummy function decorated with dec, with the given name.
-
- Examples
- --------
- import IPython.testing.decorators as dec
- setup = dec.decorated_dummy(dec.skip_if_no_x11, __name__)
- """
+ if isinstance(label, string_types):
+ labels = [label]
+ else:
+ labels = label
+
+ # Validate that the given label(s) are OK for use in setattr() by doing a
+ # dry run on a dummy function.
+ tmp = lambda : None
+ for label in labels:
+ setattr(tmp,label,True)
+
+ # This is the actual decorator we'll return
+ def decor(f):
+ for label in labels:
+ setattr(f,label,True)
+ return f
+
+ # Apply the user's docstring, or autogenerate a basic one
+ if ds is None:
+ ds = "Labels a test as %r." % label
+ decor.__doc__ = ds
+
+ return decor
+
+
+# Inspired by numpy's skipif, but uses the full apply_wrapper utility to
+# preserve function metadata better and allows the skip condition to be a
+# callable.
+def skipif(skip_condition, msg=None):
+ ''' Make function raise SkipTest exception if skip_condition is true
+
+ Parameters
+ ----------
+
+ skip_condition : bool or callable
+ Flag to determine whether to skip test. If the condition is a
+ callable, it is used at runtime to dynamically make the decision. This
+ is useful for tests that may require costly imports, to delay the cost
+ until the test suite is actually executed.
+ msg : string
+ Message to give on raising a SkipTest exception.
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes SkipTest
+ to be raised when the skip_condition was True, and the function
+ to be called normally otherwise.
+
+ Notes
+ -----
+ You will see from the code that we had to further decorate the
+ decorator with the nose.tools.make_decorator function in order to
+ transmit function name, and various other metadata.
+ '''
+
+ def skip_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ # Allow for both boolean or callable skip conditions.
+ if callable(skip_condition):
+ skip_val = skip_condition
+ else:
+ skip_val = lambda : skip_condition
+
+ def get_msg(func,msg=None):
+ """Skip message with information about function being skipped."""
+ if msg is None: out = 'Test skipped due to test condition.'
+ else: out = msg
+ return "Skipping test: %s. %s" % (func.__name__,out)
+
+ # We need to define *two* skippers because Python doesn't allow both
+ # return with value and yield inside the same function.
+ def skipper_func(*args, **kwargs):
+ """Skipper for normal test functions."""
+ if skip_val():
+ raise nose.SkipTest(get_msg(f,msg))
+ else:
+ return f(*args, **kwargs)
+
+ def skipper_gen(*args, **kwargs):
+ """Skipper for test generators."""
+ if skip_val():
+ raise nose.SkipTest(get_msg(f,msg))
+ else:
+ for x in f(*args, **kwargs):
+ yield x
+
+ # Choose the right skipper to use when building the actual generator.
+ if nose.util.isgenerator(f):
+ skipper = skipper_gen
+ else:
+ skipper = skipper_func
+
+ return nose.tools.make_decorator(f)(skipper)
+
+ return skip_decorator
+
+# A version with the condition set to true, common case just to attach a message
+# to a skip decorator
+def skip(msg=None):
+ """Decorator factory - mark a test function for skipping from test suite.
+
+ Parameters
+ ----------
+ msg : string
+ Optional message to be added.
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes SkipTest
+ to be raised, with the optional message added.
+ """
+
+ return skipif(True,msg)
+
+
+def onlyif(condition, msg):
+ """The reverse from skipif, see skipif for details."""
+
+ if callable(condition):
+ skip_condition = lambda : not condition()
+ else:
+ skip_condition = lambda : not condition
+
+ return skipif(skip_condition, msg)
+
+#-----------------------------------------------------------------------------
+# Utility functions for decorators
+def module_not_available(module):
+ """Can module be imported? Returns true if module does NOT import.
+
+ This is used to make a decorator to skip tests that require module to be
+ available, but delay the 'import numpy' to test execution time.
+ """
+ try:
+ mod = __import__(module)
+ mod_not_avail = False
+ except ImportError:
+ mod_not_avail = True
+
+ return mod_not_avail
+
+
+def decorated_dummy(dec, name):
+ """Return a dummy function decorated with dec, with the given name.
+
+ Examples
+ --------
+ import IPython.testing.decorators as dec
+ setup = dec.decorated_dummy(dec.skip_if_no_x11, __name__)
+ """
warnings.warn("The function `decorated_dummy` is deprecated since IPython 4.0",
DeprecationWarning, stacklevel=2)
- dummy = lambda: None
- dummy.__name__ = name
- return dec(dummy)
-
-#-----------------------------------------------------------------------------
-# Decorators for public use
-
-# Decorators to skip certain tests on specific platforms.
-skip_win32 = skipif(sys.platform == 'win32',
- "This test does not run under Windows")
-skip_linux = skipif(sys.platform.startswith('linux'),
- "This test does not run under Linux")
-skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
-
-
-# Decorators to skip tests if not on specific platforms.
-skip_if_not_win32 = skipif(sys.platform != 'win32',
- "This test only runs under Windows")
-skip_if_not_linux = skipif(not sys.platform.startswith('linux'),
- "This test only runs under Linux")
-skip_if_not_osx = skipif(sys.platform != 'darwin',
- "This test only runs under OSX")
-
-
-_x11_skip_cond = (sys.platform not in ('darwin', 'win32') and
- os.environ.get('DISPLAY', '') == '')
-_x11_skip_msg = "Skipped under *nix when X11/XOrg not available"
-
-skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg)
-
-# not a decorator itself, returns a dummy function to be used as setup
-def skip_file_no_x11(name):
+ dummy = lambda: None
+ dummy.__name__ = name
+ return dec(dummy)
+
+#-----------------------------------------------------------------------------
+# Decorators for public use
+
+# Decorators to skip certain tests on specific platforms.
+skip_win32 = skipif(sys.platform == 'win32',
+ "This test does not run under Windows")
+skip_linux = skipif(sys.platform.startswith('linux'),
+ "This test does not run under Linux")
+skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
+
+
+# Decorators to skip tests if not on specific platforms.
+skip_if_not_win32 = skipif(sys.platform != 'win32',
+ "This test only runs under Windows")
+skip_if_not_linux = skipif(not sys.platform.startswith('linux'),
+ "This test only runs under Linux")
+skip_if_not_osx = skipif(sys.platform != 'darwin',
+ "This test only runs under OSX")
+
+
+_x11_skip_cond = (sys.platform not in ('darwin', 'win32') and
+ os.environ.get('DISPLAY', '') == '')
+_x11_skip_msg = "Skipped under *nix when X11/XOrg not available"
+
+skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg)
+
+# not a decorator itself, returns a dummy function to be used as setup
+def skip_file_no_x11(name):
warnings.warn("The function `skip_file_no_x11` is deprecated since IPython 4.0",
DeprecationWarning, stacklevel=2)
- return decorated_dummy(skip_if_no_x11, name) if _x11_skip_cond else None
-
-# Other skip decorators
-
-# generic skip without module
-skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod)
-
-skipif_not_numpy = skip_without('numpy')
-
-skipif_not_matplotlib = skip_without('matplotlib')
-
-skipif_not_sympy = skip_without('sympy')
-
-skip_known_failure = knownfailureif(True,'This test is known to fail')
-
-known_failure_py3 = knownfailureif(sys.version_info[0] >= 3,
- 'This test is known to fail on Python 3.')
-
+ return decorated_dummy(skip_if_no_x11, name) if _x11_skip_cond else None
+
+# Other skip decorators
+
+# generic skip without module
+skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod)
+
+skipif_not_numpy = skip_without('numpy')
+
+skipif_not_matplotlib = skip_without('matplotlib')
+
+skipif_not_sympy = skip_without('sympy')
+
+skip_known_failure = knownfailureif(True,'This test is known to fail')
+
+known_failure_py3 = knownfailureif(sys.version_info[0] >= 3,
+ 'This test is known to fail on Python 3.')
+
cpython2_only = skipif(PY3 or PYPY, "This test only runs on CPython 2.")
-py2_only = skipif(PY3, "This test only runs on Python 2.")
-py3_only = skipif(PY2, "This test only runs on Python 3.")
-
-# A null 'decorator', useful to make more readable code that needs to pick
-# between different decorators based on OS or other conditions
-null_deco = lambda f: f
-
-# Some tests only run where we can use unicode paths. Note that we can't just
-# check os.path.supports_unicode_filenames, which is always False on Linux.
-try:
- f = tempfile.NamedTemporaryFile(prefix=u"tmp€")
-except UnicodeEncodeError:
- unicode_paths = False
-else:
- unicode_paths = True
- f.close()
-
-onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable "
- "where we can use unicode in filenames."))
-
-
-def onlyif_cmds_exist(*commands):
- """
- Decorator to skip test when at least one of `commands` is not found.
- """
- for cmd in commands:
- if not which(cmd):
- return skip("This test runs only if command '{0}' "
- "is installed".format(cmd))
- return null_deco
-
-def onlyif_any_cmd_exists(*commands):
- """
- Decorator to skip test unless at least one of `commands` is found.
- """
+py2_only = skipif(PY3, "This test only runs on Python 2.")
+py3_only = skipif(PY2, "This test only runs on Python 3.")
+
+# A null 'decorator', useful to make more readable code that needs to pick
+# between different decorators based on OS or other conditions
+null_deco = lambda f: f
+
+# Some tests only run where we can use unicode paths. Note that we can't just
+# check os.path.supports_unicode_filenames, which is always False on Linux.
+try:
+ f = tempfile.NamedTemporaryFile(prefix=u"tmp€")
+except UnicodeEncodeError:
+ unicode_paths = False
+else:
+ unicode_paths = True
+ f.close()
+
+onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable "
+ "where we can use unicode in filenames."))
+
+
+def onlyif_cmds_exist(*commands):
+ """
+ Decorator to skip test when at least one of `commands` is not found.
+ """
+ for cmd in commands:
+ if not which(cmd):
+ return skip("This test runs only if command '{0}' "
+ "is installed".format(cmd))
+ return null_deco
+
+def onlyif_any_cmd_exists(*commands):
+ """
+ Decorator to skip test unless at least one of `commands` is found.
+ """
warnings.warn("The function `onlyif_any_cmd_exists` is deprecated since IPython 4.0",
DeprecationWarning, stacklevel=2)
- for cmd in commands:
- if which(cmd):
- return null_deco
- return skip("This test runs only if one of the commands {0} "
- "is installed".format(commands))
+ for cmd in commands:
+ if which(cmd):
+ return null_deco
+ return skip("This test runs only if one of the commands {0} "
+ "is installed".format(commands))
diff --git a/contrib/python/ipython/py2/IPython/testing/globalipapp.py b/contrib/python/ipython/py2/IPython/testing/globalipapp.py
index a40702cc67..3983393112 100644
--- a/contrib/python/ipython/py2/IPython/testing/globalipapp.py
+++ b/contrib/python/ipython/py2/IPython/testing/globalipapp.py
@@ -1,138 +1,138 @@
-"""Global IPython app to support test running.
-
-We must start our own ipython object and heavily muck with it so that all the
-modifications IPython makes to system behavior don't send the doctest machinery
-into a fit. This code should be considered a gross hack, but it gets the job
-done.
-"""
-from __future__ import absolute_import
-from __future__ import print_function
-
+"""Global IPython app to support test running.
+
+We must start our own ipython object and heavily muck with it so that all the
+modifications IPython makes to system behavior don't send the doctest machinery
+into a fit. This code should be considered a gross hack, but it gets the job
+done.
+"""
+from __future__ import absolute_import
+from __future__ import print_function
+
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
-
-import sys
+
+import sys
import warnings
-
-from . import tools
-
-from IPython.core import page
-from IPython.utils import io
-from IPython.utils import py3compat
-from IPython.utils.py3compat import builtin_mod
-from IPython.terminal.interactiveshell import TerminalInteractiveShell
-
-
-class StreamProxy(io.IOStream):
- """Proxy for sys.stdout/err. This will request the stream *at call time*
- allowing for nose's Capture plugin's redirection of sys.stdout/err.
-
- Parameters
- ----------
- name : str
- The name of the stream. This will be requested anew at every call
- """
-
- def __init__(self, name):
+
+from . import tools
+
+from IPython.core import page
+from IPython.utils import io
+from IPython.utils import py3compat
+from IPython.utils.py3compat import builtin_mod
+from IPython.terminal.interactiveshell import TerminalInteractiveShell
+
+
+class StreamProxy(io.IOStream):
+ """Proxy for sys.stdout/err. This will request the stream *at call time*
+ allowing for nose's Capture plugin's redirection of sys.stdout/err.
+
+ Parameters
+ ----------
+ name : str
+ The name of the stream. This will be requested anew at every call
+ """
+
+ def __init__(self, name):
warnings.warn("StreamProxy is deprecated and unused as of IPython 5", DeprecationWarning,
stacklevel=2,
)
- self.name=name
-
- @property
- def stream(self):
- return getattr(sys, self.name)
-
- def flush(self):
- self.stream.flush()
-
-
-def get_ipython():
- # This will get replaced by the real thing once we start IPython below
- return start_ipython()
-
-
-# A couple of methods to override those in the running IPython to interact
-# better with doctest (doctest captures on raw stdout, so we need to direct
-# various types of output there otherwise it will miss them).
-
-def xsys(self, cmd):
- """Replace the default system call with a capturing one for doctest.
- """
- # We use getoutput, but we need to strip it because pexpect captures
- # the trailing newline differently from commands.getoutput
- print(self.getoutput(cmd, split=False, depth=1).rstrip(), end='', file=sys.stdout)
- sys.stdout.flush()
-
-
-def _showtraceback(self, etype, evalue, stb):
- """Print the traceback purely on stdout for doctest to capture it.
- """
- print(self.InteractiveTB.stb2text(stb), file=sys.stdout)
-
-
-def start_ipython():
- """Start a global IPython shell, which we need for IPython-specific syntax.
- """
- global get_ipython
-
- # This function should only ever run once!
- if hasattr(start_ipython, 'already_called'):
- return
- start_ipython.already_called = True
-
- # Store certain global objects that IPython modifies
- _displayhook = sys.displayhook
- _excepthook = sys.excepthook
- _main = sys.modules.get('__main__')
-
- # Create custom argv and namespaces for our IPython to be test-friendly
- config = tools.default_config()
+ self.name=name
+
+ @property
+ def stream(self):
+ return getattr(sys, self.name)
+
+ def flush(self):
+ self.stream.flush()
+
+
+def get_ipython():
+ # This will get replaced by the real thing once we start IPython below
+ return start_ipython()
+
+
+# A couple of methods to override those in the running IPython to interact
+# better with doctest (doctest captures on raw stdout, so we need to direct
+# various types of output there otherwise it will miss them).
+
+def xsys(self, cmd):
+ """Replace the default system call with a capturing one for doctest.
+ """
+ # We use getoutput, but we need to strip it because pexpect captures
+ # the trailing newline differently from commands.getoutput
+ print(self.getoutput(cmd, split=False, depth=1).rstrip(), end='', file=sys.stdout)
+ sys.stdout.flush()
+
+
+def _showtraceback(self, etype, evalue, stb):
+ """Print the traceback purely on stdout for doctest to capture it.
+ """
+ print(self.InteractiveTB.stb2text(stb), file=sys.stdout)
+
+
+def start_ipython():
+ """Start a global IPython shell, which we need for IPython-specific syntax.
+ """
+ global get_ipython
+
+ # This function should only ever run once!
+ if hasattr(start_ipython, 'already_called'):
+ return
+ start_ipython.already_called = True
+
+ # Store certain global objects that IPython modifies
+ _displayhook = sys.displayhook
+ _excepthook = sys.excepthook
+ _main = sys.modules.get('__main__')
+
+ # Create custom argv and namespaces for our IPython to be test-friendly
+ config = tools.default_config()
config.TerminalInteractiveShell.simple_prompt = True
-
- # Create and initialize our test-friendly IPython instance.
- shell = TerminalInteractiveShell.instance(config=config,
- )
-
- # A few more tweaks needed for playing nicely with doctests...
-
- # remove history file
- shell.tempfiles.append(config.HistoryManager.hist_file)
-
- # These traps are normally only active for interactive use, set them
- # permanently since we'll be mocking interactive sessions.
- shell.builtin_trap.activate()
-
- # Modify the IPython system call with one that uses getoutput, so that we
- # can capture subcommands and print them to Python's stdout, otherwise the
- # doctest machinery would miss them.
- shell.system = py3compat.MethodType(xsys, shell)
-
- shell._showtraceback = py3compat.MethodType(_showtraceback, shell)
-
- # IPython is ready, now clean up some global state...
-
- # Deactivate the various python system hooks added by ipython for
- # interactive convenience so we don't confuse the doctest system
- sys.modules['__main__'] = _main
- sys.displayhook = _displayhook
- sys.excepthook = _excepthook
-
- # So that ipython magics and aliases can be doctested (they work by making
- # a call into a global _ip object). Also make the top-level get_ipython
- # now return this without recursively calling here again.
- _ip = shell
- get_ipython = _ip.get_ipython
- builtin_mod._ip = _ip
- builtin_mod.get_ipython = get_ipython
-
- # Override paging, so we don't require user interaction during the tests.
- def nopage(strng, start=0, screen_lines=0, pager_cmd=None):
+
+ # Create and initialize our test-friendly IPython instance.
+ shell = TerminalInteractiveShell.instance(config=config,
+ )
+
+ # A few more tweaks needed for playing nicely with doctests...
+
+ # remove history file
+ shell.tempfiles.append(config.HistoryManager.hist_file)
+
+ # These traps are normally only active for interactive use, set them
+ # permanently since we'll be mocking interactive sessions.
+ shell.builtin_trap.activate()
+
+ # Modify the IPython system call with one that uses getoutput, so that we
+ # can capture subcommands and print them to Python's stdout, otherwise the
+ # doctest machinery would miss them.
+ shell.system = py3compat.MethodType(xsys, shell)
+
+ shell._showtraceback = py3compat.MethodType(_showtraceback, shell)
+
+ # IPython is ready, now clean up some global state...
+
+ # Deactivate the various python system hooks added by ipython for
+ # interactive convenience so we don't confuse the doctest system
+ sys.modules['__main__'] = _main
+ sys.displayhook = _displayhook
+ sys.excepthook = _excepthook
+
+ # So that ipython magics and aliases can be doctested (they work by making
+ # a call into a global _ip object). Also make the top-level get_ipython
+ # now return this without recursively calling here again.
+ _ip = shell
+ get_ipython = _ip.get_ipython
+ builtin_mod._ip = _ip
+ builtin_mod.get_ipython = get_ipython
+
+ # Override paging, so we don't require user interaction during the tests.
+ def nopage(strng, start=0, screen_lines=0, pager_cmd=None):
if isinstance(strng, dict):
strng = strng.get('text/plain', '')
- print(strng)
-
- page.orig_page = page.pager_page
- page.pager_page = nopage
-
- return _ip
+ print(strng)
+
+ page.orig_page = page.pager_page
+ page.pager_page = nopage
+
+ return _ip
diff --git a/contrib/python/ipython/py2/IPython/testing/iptest.py b/contrib/python/ipython/py2/IPython/testing/iptest.py
index 9a5a3d6f58..4018264125 100644
--- a/contrib/python/ipython/py2/IPython/testing/iptest.py
+++ b/contrib/python/ipython/py2/IPython/testing/iptest.py
@@ -1,55 +1,55 @@
-# -*- coding: utf-8 -*-
-"""IPython Test Suite Runner.
-
-This module provides a main entry point to a user script to test IPython
-itself from the command line. There are two ways of running this script:
-
-1. With the syntax `iptest all`. This runs our entire test suite by
- calling this script (with different arguments) recursively. This
- causes modules and package to be tested in different processes, using nose
- or trial where appropriate.
-2. With the regular nose syntax, like `iptest -vvs IPython`. In this form
- the script simply calls nose, but with special command line flags and
- plugins loaded.
-
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
-import glob
-from io import BytesIO
-import os
-import os.path as path
-import sys
-from threading import Thread, Lock, Event
-import warnings
-
-import nose.plugins.builtin
-from nose.plugins.xunit import Xunit
-from nose import SkipTest
-from nose.core import TestProgram
-from nose.plugins import Plugin
-from nose.util import safe_str
-
-from IPython import version_info
-from IPython.utils.py3compat import bytes_to_str
-from IPython.utils.importstring import import_item
-from IPython.testing.plugin.ipdoctest import IPythonDoctest
-from IPython.external.decorators import KnownFailure, knownfailureif
-
-pjoin = path.join
-
-
-# Enable printing all warnings raise by IPython's modules
+# -*- coding: utf-8 -*-
+"""IPython Test Suite Runner.
+
+This module provides a main entry point to a user script to test IPython
+itself from the command line. There are two ways of running this script:
+
+1. With the syntax `iptest all`. This runs our entire test suite by
+ calling this script (with different arguments) recursively. This
+ causes modules and package to be tested in different processes, using nose
+ or trial where appropriate.
+2. With the regular nose syntax, like `iptest -vvs IPython`. In this form
+ the script simply calls nose, but with special command line flags and
+ plugins loaded.
+
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+
+import glob
+from io import BytesIO
+import os
+import os.path as path
+import sys
+from threading import Thread, Lock, Event
+import warnings
+
+import nose.plugins.builtin
+from nose.plugins.xunit import Xunit
+from nose import SkipTest
+from nose.core import TestProgram
+from nose.plugins import Plugin
+from nose.util import safe_str
+
+from IPython import version_info
+from IPython.utils.py3compat import bytes_to_str
+from IPython.utils.importstring import import_item
+from IPython.testing.plugin.ipdoctest import IPythonDoctest
+from IPython.external.decorators import KnownFailure, knownfailureif
+
+pjoin = path.join
+
+
+# Enable printing all warnings raise by IPython's modules
warnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*')
if sys.version_info > (3,0):
warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*')
warnings.filterwarnings('error', message=".*{'config': True}.*", category=DeprecationWarning, module='IPy.*')
-warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*')
-
+warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*')
+
warnings.filterwarnings('error', message='.*apply_wrapper.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*make_label_dec', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*decorated_dummy.*', category=DeprecationWarning, module='.*')
@@ -60,384 +60,384 @@ warnings.filterwarnings('error', message='.*disable_gui.*', category=Deprecation
warnings.filterwarnings('error', message='.*ExceptionColors global is deprecated.*', category=DeprecationWarning, module='.*')
-if version_info < (6,):
- # nose.tools renames all things from `camelCase` to `snake_case` which raise an
- # warning with the runner they also import from standard import library. (as of Dec 2015)
- # Ignore, let's revisit that in a couple of years for IPython 6.
- warnings.filterwarnings('ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*')
-
-
-# ------------------------------------------------------------------------------
-# Monkeypatch Xunit to count known failures as skipped.
-# ------------------------------------------------------------------------------
-def monkeypatch_xunit():
- try:
- knownfailureif(True)(lambda: None)()
- except Exception as e:
- KnownFailureTest = type(e)
-
- def addError(self, test, err, capt=None):
- if issubclass(err[0], KnownFailureTest):
- err = (SkipTest,) + err[1:]
- return self.orig_addError(test, err, capt)
-
- Xunit.orig_addError = Xunit.addError
- Xunit.addError = addError
-
-#-----------------------------------------------------------------------------
-# Check which dependencies are installed and greater than minimum version.
-#-----------------------------------------------------------------------------
-def extract_version(mod):
- return mod.__version__
-
-def test_for(item, min_version=None, callback=extract_version):
- """Test to see if item is importable, and optionally check against a minimum
- version.
-
- If min_version is given, the default behavior is to check against the
- `__version__` attribute of the item, but specifying `callback` allows you to
- extract the value you are interested in. e.g::
-
- In [1]: import sys
-
- In [2]: from IPython.testing.iptest import test_for
-
- In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)
- Out[3]: True
-
- """
- try:
- check = import_item(item)
- except (ImportError, RuntimeError):
- # GTK reports Runtime error if it can't be initialized even if it's
- # importable.
- return False
- else:
- if min_version:
- if callback:
- # extra processing step to get version to compare
- check = callback(check)
-
- return check >= min_version
- else:
- return True
-
-# Global dict where we can store information on what we have and what we don't
-# have available at test run time
-have = {'matplotlib': test_for('matplotlib'),
- 'pygments': test_for('pygments'),
- 'sqlite3': test_for('sqlite3')}
-
-#-----------------------------------------------------------------------------
-# Test suite definitions
-#-----------------------------------------------------------------------------
-
-test_group_names = ['core',
- 'extensions', 'lib', 'terminal', 'testing', 'utils',
- ]
-
-class TestSection(object):
- def __init__(self, name, includes):
- self.name = name
- self.includes = includes
- self.excludes = []
- self.dependencies = []
- self.enabled = True
-
- def exclude(self, module):
- if not module.startswith('IPython'):
- module = self.includes[0] + "." + module
- self.excludes.append(module.replace('.', os.sep))
-
- def requires(self, *packages):
- self.dependencies.extend(packages)
-
- @property
- def will_run(self):
- return self.enabled and all(have[p] for p in self.dependencies)
-
-# Name -> (include, exclude, dependencies_met)
-test_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names}
-
-
-# Exclusions and dependencies
-# ---------------------------
-
-# core:
-sec = test_sections['core']
-if not have['sqlite3']:
- sec.exclude('tests.test_history')
- sec.exclude('history')
-if not have['matplotlib']:
- sec.exclude('pylabtools'),
- sec.exclude('tests.test_pylabtools')
-
-# lib:
-sec = test_sections['lib']
-sec.exclude('kernel')
-if not have['pygments']:
- sec.exclude('tests.test_lexers')
-# We do this unconditionally, so that the test suite doesn't import
-# gtk, changing the default encoding and masking some unicode bugs.
-sec.exclude('inputhookgtk')
-# We also do this unconditionally, because wx can interfere with Unix signals.
-# There are currently no tests for it anyway.
-sec.exclude('inputhookwx')
-# Testing inputhook will need a lot of thought, to figure out
-# how to have tests that don't lock up with the gui event
-# loops in the picture
-sec.exclude('inputhook')
-
-# testing:
-sec = test_sections['testing']
-# These have to be skipped on win32 because they use echo, rm, cd, etc.
-# See ticket https://github.com/ipython/ipython/issues/87
-if sys.platform == 'win32':
- sec.exclude('plugin.test_exampleip')
- sec.exclude('plugin.dtexample')
-
-# don't run jupyter_console tests found via shim
-test_sections['terminal'].exclude('console')
-
-# extensions:
-sec = test_sections['extensions']
-# This is deprecated in favour of rpy2
-sec.exclude('rmagic')
-# autoreload does some strange stuff, so move it to its own test section
-sec.exclude('autoreload')
-sec.exclude('tests.test_autoreload')
-test_sections['autoreload'] = TestSection('autoreload',
- ['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])
-test_group_names.append('autoreload')
-
-
-#-----------------------------------------------------------------------------
-# Functions and classes
-#-----------------------------------------------------------------------------
-
-def check_exclusions_exist():
- from IPython.paths import get_ipython_package_dir
+if version_info < (6,):
+ # nose.tools renames all things from `camelCase` to `snake_case` which raise an
+ # warning with the runner they also import from standard import library. (as of Dec 2015)
+ # Ignore, let's revisit that in a couple of years for IPython 6.
+ warnings.filterwarnings('ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*')
+
+
+# ------------------------------------------------------------------------------
+# Monkeypatch Xunit to count known failures as skipped.
+# ------------------------------------------------------------------------------
+def monkeypatch_xunit():
+ try:
+ knownfailureif(True)(lambda: None)()
+ except Exception as e:
+ KnownFailureTest = type(e)
+
+ def addError(self, test, err, capt=None):
+ if issubclass(err[0], KnownFailureTest):
+ err = (SkipTest,) + err[1:]
+ return self.orig_addError(test, err, capt)
+
+ Xunit.orig_addError = Xunit.addError
+ Xunit.addError = addError
+
+#-----------------------------------------------------------------------------
+# Check which dependencies are installed and greater than minimum version.
+#-----------------------------------------------------------------------------
+def extract_version(mod):
+ return mod.__version__
+
+def test_for(item, min_version=None, callback=extract_version):
+ """Test to see if item is importable, and optionally check against a minimum
+ version.
+
+ If min_version is given, the default behavior is to check against the
+ `__version__` attribute of the item, but specifying `callback` allows you to
+ extract the value you are interested in. e.g::
+
+ In [1]: import sys
+
+ In [2]: from IPython.testing.iptest import test_for
+
+ In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)
+ Out[3]: True
+
+ """
+ try:
+ check = import_item(item)
+ except (ImportError, RuntimeError):
+ # GTK reports Runtime error if it can't be initialized even if it's
+ # importable.
+ return False
+ else:
+ if min_version:
+ if callback:
+ # extra processing step to get version to compare
+ check = callback(check)
+
+ return check >= min_version
+ else:
+ return True
+
+# Global dict where we can store information on what we have and what we don't
+# have available at test run time
+have = {'matplotlib': test_for('matplotlib'),
+ 'pygments': test_for('pygments'),
+ 'sqlite3': test_for('sqlite3')}
+
+#-----------------------------------------------------------------------------
+# Test suite definitions
+#-----------------------------------------------------------------------------
+
+test_group_names = ['core',
+ 'extensions', 'lib', 'terminal', 'testing', 'utils',
+ ]
+
+class TestSection(object):
+ def __init__(self, name, includes):
+ self.name = name
+ self.includes = includes
+ self.excludes = []
+ self.dependencies = []
+ self.enabled = True
+
+ def exclude(self, module):
+ if not module.startswith('IPython'):
+ module = self.includes[0] + "." + module
+ self.excludes.append(module.replace('.', os.sep))
+
+ def requires(self, *packages):
+ self.dependencies.extend(packages)
+
+ @property
+ def will_run(self):
+ return self.enabled and all(have[p] for p in self.dependencies)
+
+# Name -> (include, exclude, dependencies_met)
+test_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names}
+
+
+# Exclusions and dependencies
+# ---------------------------
+
+# core:
+sec = test_sections['core']
+if not have['sqlite3']:
+ sec.exclude('tests.test_history')
+ sec.exclude('history')
+if not have['matplotlib']:
+ sec.exclude('pylabtools'),
+ sec.exclude('tests.test_pylabtools')
+
+# lib:
+sec = test_sections['lib']
+sec.exclude('kernel')
+if not have['pygments']:
+ sec.exclude('tests.test_lexers')
+# We do this unconditionally, so that the test suite doesn't import
+# gtk, changing the default encoding and masking some unicode bugs.
+sec.exclude('inputhookgtk')
+# We also do this unconditionally, because wx can interfere with Unix signals.
+# There are currently no tests for it anyway.
+sec.exclude('inputhookwx')
+# Testing inputhook will need a lot of thought, to figure out
+# how to have tests that don't lock up with the gui event
+# loops in the picture
+sec.exclude('inputhook')
+
+# testing:
+sec = test_sections['testing']
+# These have to be skipped on win32 because they use echo, rm, cd, etc.
+# See ticket https://github.com/ipython/ipython/issues/87
+if sys.platform == 'win32':
+ sec.exclude('plugin.test_exampleip')
+ sec.exclude('plugin.dtexample')
+
+# don't run jupyter_console tests found via shim
+test_sections['terminal'].exclude('console')
+
+# extensions:
+sec = test_sections['extensions']
+# This is deprecated in favour of rpy2
+sec.exclude('rmagic')
+# autoreload does some strange stuff, so move it to its own test section
+sec.exclude('autoreload')
+sec.exclude('tests.test_autoreload')
+test_sections['autoreload'] = TestSection('autoreload',
+ ['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])
+test_group_names.append('autoreload')
+
+
+#-----------------------------------------------------------------------------
+# Functions and classes
+#-----------------------------------------------------------------------------
+
+def check_exclusions_exist():
+ from IPython.paths import get_ipython_package_dir
from warnings import warn
- parent = os.path.dirname(get_ipython_package_dir())
- for sec in test_sections:
- for pattern in sec.exclusions:
- fullpath = pjoin(parent, pattern)
- if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):
- warn("Excluding nonexistent file: %r" % pattern)
-
-
-class ExclusionPlugin(Plugin):
- """A nose plugin to effect our exclusions of files and directories.
- """
- name = 'exclusions'
- score = 3000 # Should come before any other plugins
-
- def __init__(self, exclude_patterns=None):
- """
- Parameters
- ----------
-
- exclude_patterns : sequence of strings, optional
- Filenames containing these patterns (as raw strings, not as regular
- expressions) are excluded from the tests.
- """
- self.exclude_patterns = exclude_patterns or []
- super(ExclusionPlugin, self).__init__()
-
- def options(self, parser, env=os.environ):
- Plugin.options(self, parser, env)
-
- def configure(self, options, config):
- Plugin.configure(self, options, config)
- # Override nose trying to disable plugin.
- self.enabled = True
-
- def wantFile(self, filename):
- """Return whether the given filename should be scanned for tests.
- """
- if any(pat in filename for pat in self.exclude_patterns):
- return False
- return None
-
- def wantDirectory(self, directory):
- """Return whether the given directory should be scanned for tests.
- """
- if any(pat in directory for pat in self.exclude_patterns):
- return False
- return None
-
-
-class StreamCapturer(Thread):
- daemon = True # Don't hang if main thread crashes
- started = False
- def __init__(self, echo=False):
- super(StreamCapturer, self).__init__()
- self.echo = echo
- self.streams = []
- self.buffer = BytesIO()
- self.readfd, self.writefd = os.pipe()
- self.buffer_lock = Lock()
- self.stop = Event()
-
- def run(self):
- self.started = True
-
- while not self.stop.is_set():
- chunk = os.read(self.readfd, 1024)
-
- with self.buffer_lock:
- self.buffer.write(chunk)
- if self.echo:
- sys.stdout.write(bytes_to_str(chunk))
-
- os.close(self.readfd)
- os.close(self.writefd)
-
- def reset_buffer(self):
- with self.buffer_lock:
- self.buffer.truncate(0)
- self.buffer.seek(0)
-
- def get_buffer(self):
- with self.buffer_lock:
- return self.buffer.getvalue()
-
- def ensure_started(self):
- if not self.started:
- self.start()
-
- def halt(self):
- """Safely stop the thread."""
- if not self.started:
- return
-
- self.stop.set()
- os.write(self.writefd, b'\0') # Ensure we're not locked in a read()
- self.join()
-
-class SubprocessStreamCapturePlugin(Plugin):
- name='subprocstreams'
- def __init__(self):
- Plugin.__init__(self)
- self.stream_capturer = StreamCapturer()
- self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')
- # This is ugly, but distant parts of the test machinery need to be able
- # to redirect streams, so we make the object globally accessible.
- nose.iptest_stdstreams_fileno = self.get_write_fileno
-
- def get_write_fileno(self):
- if self.destination == 'capture':
- self.stream_capturer.ensure_started()
- return self.stream_capturer.writefd
- elif self.destination == 'discard':
- return os.open(os.devnull, os.O_WRONLY)
- else:
- return sys.__stdout__.fileno()
-
- def configure(self, options, config):
- Plugin.configure(self, options, config)
- # Override nose trying to disable plugin.
- if self.destination == 'capture':
- self.enabled = True
-
- def startTest(self, test):
- # Reset log capture
- self.stream_capturer.reset_buffer()
-
- def formatFailure(self, test, err):
- # Show output
- ec, ev, tb = err
- captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
- if captured.strip():
- ev = safe_str(ev)
- out = [ev, '>> begin captured subprocess output <<',
- captured,
- '>> end captured subprocess output <<']
- return ec, '\n'.join(out), tb
-
- return err
-
- formatError = formatFailure
-
- def finalize(self, result):
- self.stream_capturer.halt()
-
-
-def run_iptest():
- """Run the IPython test suite using nose.
-
- This function is called when this script is **not** called with the form
- `iptest all`. It simply calls nose with appropriate command line flags
- and accepts all of the standard nose arguments.
- """
- # Apply our monkeypatch to Xunit
- if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):
- monkeypatch_xunit()
-
- arg1 = sys.argv[1]
- if arg1 in test_sections:
- section = test_sections[arg1]
- sys.argv[1:2] = section.includes
- elif arg1.startswith('IPython.') and arg1[8:] in test_sections:
- section = test_sections[arg1[8:]]
- sys.argv[1:2] = section.includes
- else:
- section = TestSection(arg1, includes=[arg1])
-
-
- argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks
- # We add --exe because of setuptools' imbecility (it
- # blindly does chmod +x on ALL files). Nose does the
- # right thing and it tries to avoid executables,
- # setuptools unfortunately forces our hand here. This
- # has been discussed on the distutils list and the
- # setuptools devs refuse to fix this problem!
- '--exe',
- ]
- if '-a' not in argv and '-A' not in argv:
- argv = argv + ['-a', '!crash']
-
- if nose.__version__ >= '0.11':
- # I don't fully understand why we need this one, but depending on what
- # directory the test suite is run from, if we don't give it, 0 tests
- # get run. Specifically, if the test suite is run from the source dir
- # with an argument (like 'iptest.py IPython.core', 0 tests are run,
- # even if the same call done in this directory works fine). It appears
- # that if the requested package is in the current dir, nose bails early
- # by default. Since it's otherwise harmless, leave it in by default
- # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.
- argv.append('--traverse-namespace')
-
- plugins = [ ExclusionPlugin(section.excludes), KnownFailure(),
- SubprocessStreamCapturePlugin() ]
-
- # we still have some vestigial doctests in core
- if (section.name.startswith(('core', 'IPython.core'))):
- plugins.append(IPythonDoctest())
- argv.extend([
- '--with-ipdoctest',
- '--ipdoctest-tests',
- '--ipdoctest-extension=txt',
- ])
-
-
- # Use working directory set by parent process (see iptestcontroller)
- if 'IPTEST_WORKING_DIR' in os.environ:
- os.chdir(os.environ['IPTEST_WORKING_DIR'])
-
- # We need a global ipython running in this process, but the special
- # in-process group spawns its own IPython kernels, so for *that* group we
- # must avoid also opening the global one (otherwise there's a conflict of
- # singletons). Ultimately the solution to this problem is to refactor our
- # assumptions about what needs to be a singleton and what doesn't (app
- # objects should, individual shells shouldn't). But for now, this
- # workaround allows the test suite for the inprocess module to complete.
- if 'kernel.inprocess' not in section.name:
- from IPython.testing import globalipapp
- globalipapp.start_ipython()
-
- # Now nose can run
- TestProgram(argv=argv, addplugins=plugins)
-
-if __name__ == '__main__':
- run_iptest()
+ parent = os.path.dirname(get_ipython_package_dir())
+ for sec in test_sections:
+ for pattern in sec.exclusions:
+ fullpath = pjoin(parent, pattern)
+ if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):
+ warn("Excluding nonexistent file: %r" % pattern)
+
+
+class ExclusionPlugin(Plugin):
+ """A nose plugin to effect our exclusions of files and directories.
+ """
+ name = 'exclusions'
+ score = 3000 # Should come before any other plugins
+
+ def __init__(self, exclude_patterns=None):
+ """
+ Parameters
+ ----------
+
+ exclude_patterns : sequence of strings, optional
+ Filenames containing these patterns (as raw strings, not as regular
+ expressions) are excluded from the tests.
+ """
+ self.exclude_patterns = exclude_patterns or []
+ super(ExclusionPlugin, self).__init__()
+
+ def options(self, parser, env=os.environ):
+ Plugin.options(self, parser, env)
+
+ def configure(self, options, config):
+ Plugin.configure(self, options, config)
+ # Override nose trying to disable plugin.
+ self.enabled = True
+
+ def wantFile(self, filename):
+ """Return whether the given filename should be scanned for tests.
+ """
+ if any(pat in filename for pat in self.exclude_patterns):
+ return False
+ return None
+
+ def wantDirectory(self, directory):
+ """Return whether the given directory should be scanned for tests.
+ """
+ if any(pat in directory for pat in self.exclude_patterns):
+ return False
+ return None
+
+
+class StreamCapturer(Thread):
+ daemon = True # Don't hang if main thread crashes
+ started = False
+ def __init__(self, echo=False):
+ super(StreamCapturer, self).__init__()
+ self.echo = echo
+ self.streams = []
+ self.buffer = BytesIO()
+ self.readfd, self.writefd = os.pipe()
+ self.buffer_lock = Lock()
+ self.stop = Event()
+
+ def run(self):
+ self.started = True
+
+ while not self.stop.is_set():
+ chunk = os.read(self.readfd, 1024)
+
+ with self.buffer_lock:
+ self.buffer.write(chunk)
+ if self.echo:
+ sys.stdout.write(bytes_to_str(chunk))
+
+ os.close(self.readfd)
+ os.close(self.writefd)
+
+ def reset_buffer(self):
+ with self.buffer_lock:
+ self.buffer.truncate(0)
+ self.buffer.seek(0)
+
+ def get_buffer(self):
+ with self.buffer_lock:
+ return self.buffer.getvalue()
+
+ def ensure_started(self):
+ if not self.started:
+ self.start()
+
+ def halt(self):
+ """Safely stop the thread."""
+ if not self.started:
+ return
+
+ self.stop.set()
+ os.write(self.writefd, b'\0') # Ensure we're not locked in a read()
+ self.join()
+
+class SubprocessStreamCapturePlugin(Plugin):
+ name='subprocstreams'
+ def __init__(self):
+ Plugin.__init__(self)
+ self.stream_capturer = StreamCapturer()
+ self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')
+ # This is ugly, but distant parts of the test machinery need to be able
+ # to redirect streams, so we make the object globally accessible.
+ nose.iptest_stdstreams_fileno = self.get_write_fileno
+
+ def get_write_fileno(self):
+ if self.destination == 'capture':
+ self.stream_capturer.ensure_started()
+ return self.stream_capturer.writefd
+ elif self.destination == 'discard':
+ return os.open(os.devnull, os.O_WRONLY)
+ else:
+ return sys.__stdout__.fileno()
+
+ def configure(self, options, config):
+ Plugin.configure(self, options, config)
+ # Override nose trying to disable plugin.
+ if self.destination == 'capture':
+ self.enabled = True
+
+ def startTest(self, test):
+ # Reset log capture
+ self.stream_capturer.reset_buffer()
+
+ def formatFailure(self, test, err):
+ # Show output
+ ec, ev, tb = err
+ captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
+ if captured.strip():
+ ev = safe_str(ev)
+ out = [ev, '>> begin captured subprocess output <<',
+ captured,
+ '>> end captured subprocess output <<']
+ return ec, '\n'.join(out), tb
+
+ return err
+
+ formatError = formatFailure
+
+ def finalize(self, result):
+ self.stream_capturer.halt()
+
+
+def run_iptest():
+ """Run the IPython test suite using nose.
+
+ This function is called when this script is **not** called with the form
+ `iptest all`. It simply calls nose with appropriate command line flags
+ and accepts all of the standard nose arguments.
+ """
+ # Apply our monkeypatch to Xunit
+ if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):
+ monkeypatch_xunit()
+
+ arg1 = sys.argv[1]
+ if arg1 in test_sections:
+ section = test_sections[arg1]
+ sys.argv[1:2] = section.includes
+ elif arg1.startswith('IPython.') and arg1[8:] in test_sections:
+ section = test_sections[arg1[8:]]
+ sys.argv[1:2] = section.includes
+ else:
+ section = TestSection(arg1, includes=[arg1])
+
+
+ argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks
+ # We add --exe because of setuptools' imbecility (it
+ # blindly does chmod +x on ALL files). Nose does the
+ # right thing and it tries to avoid executables,
+ # setuptools unfortunately forces our hand here. This
+ # has been discussed on the distutils list and the
+ # setuptools devs refuse to fix this problem!
+ '--exe',
+ ]
+ if '-a' not in argv and '-A' not in argv:
+ argv = argv + ['-a', '!crash']
+
+ if nose.__version__ >= '0.11':
+ # I don't fully understand why we need this one, but depending on what
+ # directory the test suite is run from, if we don't give it, 0 tests
+ # get run. Specifically, if the test suite is run from the source dir
+ # with an argument (like 'iptest.py IPython.core', 0 tests are run,
+ # even if the same call done in this directory works fine). It appears
+ # that if the requested package is in the current dir, nose bails early
+ # by default. Since it's otherwise harmless, leave it in by default
+ # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.
+ argv.append('--traverse-namespace')
+
+ plugins = [ ExclusionPlugin(section.excludes), KnownFailure(),
+ SubprocessStreamCapturePlugin() ]
+
+ # we still have some vestigial doctests in core
+ if (section.name.startswith(('core', 'IPython.core'))):
+ plugins.append(IPythonDoctest())
+ argv.extend([
+ '--with-ipdoctest',
+ '--ipdoctest-tests',
+ '--ipdoctest-extension=txt',
+ ])
+
+
+ # Use working directory set by parent process (see iptestcontroller)
+ if 'IPTEST_WORKING_DIR' in os.environ:
+ os.chdir(os.environ['IPTEST_WORKING_DIR'])
+
+ # We need a global ipython running in this process, but the special
+ # in-process group spawns its own IPython kernels, so for *that* group we
+ # must avoid also opening the global one (otherwise there's a conflict of
+ # singletons). Ultimately the solution to this problem is to refactor our
+ # assumptions about what needs to be a singleton and what doesn't (app
+ # objects should, individual shells shouldn't). But for now, this
+ # workaround allows the test suite for the inprocess module to complete.
+ if 'kernel.inprocess' not in section.name:
+ from IPython.testing import globalipapp
+ globalipapp.start_ipython()
+
+ # Now nose can run
+ TestProgram(argv=argv, addplugins=plugins)
+
+if __name__ == '__main__':
+ run_iptest()
diff --git a/contrib/python/ipython/py2/IPython/testing/iptestcontroller.py b/contrib/python/ipython/py2/IPython/testing/iptestcontroller.py
index 6d8834f193..95aa06e4a4 100644
--- a/contrib/python/ipython/py2/IPython/testing/iptestcontroller.py
+++ b/contrib/python/ipython/py2/IPython/testing/iptestcontroller.py
@@ -1,532 +1,532 @@
-# -*- coding: utf-8 -*-
-"""IPython Test Process Controller
-
-This module runs one or more subprocesses which will actually run the IPython
-test suite.
-
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
-import argparse
-import json
-import multiprocessing.pool
-import os
-import stat
-import re
-import requests
-import shutil
-import signal
-import sys
-import subprocess
-import time
-
-from .iptest import (
- have, test_group_names as py_test_group_names, test_sections, StreamCapturer,
- test_for,
-)
-from IPython.utils.path import compress_user
-from IPython.utils.py3compat import bytes_to_str
-from IPython.utils.sysinfo import get_sys_info
-from IPython.utils.tempdir import TemporaryDirectory
-from IPython.utils.text import strip_ansi
-
-try:
- # Python >= 3.3
- from subprocess import TimeoutExpired
- def popen_wait(p, timeout):
- return p.wait(timeout)
-except ImportError:
- class TimeoutExpired(Exception):
- pass
- def popen_wait(p, timeout):
- """backport of Popen.wait from Python 3"""
- for i in range(int(10 * timeout)):
- if p.poll() is not None:
- return
- time.sleep(0.1)
- if p.poll() is None:
- raise TimeoutExpired
-
-NOTEBOOK_SHUTDOWN_TIMEOUT = 10
-
-class TestController(object):
- """Run tests in a subprocess
- """
- #: str, IPython test suite to be executed.
- section = None
- #: list, command line arguments to be executed
- cmd = None
- #: dict, extra environment variables to set for the subprocess
- env = None
- #: list, TemporaryDirectory instances to clear up when the process finishes
- dirs = None
- #: subprocess.Popen instance
- process = None
- #: str, process stdout+stderr
- stdout = None
-
- def __init__(self):
- self.cmd = []
- self.env = {}
- self.dirs = []
-
- def setup(self):
- """Create temporary directories etc.
-
- This is only called when we know the test group will be run. Things
- created here may be cleaned up by self.cleanup().
- """
- pass
-
- def launch(self, buffer_output=False, capture_output=False):
- # print('*** ENV:', self.env) # dbg
- # print('*** CMD:', self.cmd) # dbg
- env = os.environ.copy()
- env.update(self.env)
- if buffer_output:
- capture_output = True
- self.stdout_capturer = c = StreamCapturer(echo=not buffer_output)
- c.start()
- stdout = c.writefd if capture_output else None
- stderr = subprocess.STDOUT if capture_output else None
- self.process = subprocess.Popen(self.cmd, stdout=stdout,
- stderr=stderr, env=env)
-
- def wait(self):
- self.process.wait()
- self.stdout_capturer.halt()
- self.stdout = self.stdout_capturer.get_buffer()
- return self.process.returncode
-
- def print_extra_info(self):
- """Print extra information about this test run.
-
- If we're running in parallel and showing the concise view, this is only
- called if the test group fails. Otherwise, it's called before the test
- group is started.
-
- The base implementation does nothing, but it can be overridden by
- subclasses.
- """
- return
-
- def cleanup_process(self):
- """Cleanup on exit by killing any leftover processes."""
- subp = self.process
- if subp is None or (subp.poll() is not None):
- return # Process doesn't exist, or is already dead.
-
- try:
- print('Cleaning up stale PID: %d' % subp.pid)
- subp.kill()
- except: # (OSError, WindowsError) ?
- # This is just a best effort, if we fail or the process was
- # really gone, ignore it.
- pass
- else:
- for i in range(10):
- if subp.poll() is None:
- time.sleep(0.1)
- else:
- break
-
- if subp.poll() is None:
- # The process did not die...
- print('... failed. Manual cleanup may be required.')
-
- def cleanup(self):
- "Kill process if it's still alive, and clean up temporary directories"
- self.cleanup_process()
- for td in self.dirs:
- td.cleanup()
-
- __del__ = cleanup
-
-
-class PyTestController(TestController):
- """Run Python tests using IPython.testing.iptest"""
- #: str, Python command to execute in subprocess
- pycmd = None
-
- def __init__(self, section, options):
- """Create new test runner."""
- TestController.__init__(self)
- self.section = section
- # pycmd is put into cmd[2] in PyTestController.launch()
- self.cmd = [sys.executable, '-c', None, section]
- self.pycmd = "from IPython.testing.iptest import run_iptest; run_iptest()"
- self.options = options
-
- def setup(self):
- ipydir = TemporaryDirectory()
- self.dirs.append(ipydir)
- self.env['IPYTHONDIR'] = ipydir.name
- self.workingdir = workingdir = TemporaryDirectory()
- self.dirs.append(workingdir)
- self.env['IPTEST_WORKING_DIR'] = workingdir.name
- # This means we won't get odd effects from our own matplotlib config
- self.env['MPLCONFIGDIR'] = workingdir.name
- # For security reasons (http://bugs.python.org/issue16202), use
- # a temporary directory to which other users have no access.
- self.env['TMPDIR'] = workingdir.name
-
- # Add a non-accessible directory to PATH (see gh-7053)
- noaccess = os.path.join(self.workingdir.name, "_no_access_")
- self.noaccess = noaccess
- os.mkdir(noaccess, 0)
-
- PATH = os.environ.get('PATH', '')
- if PATH:
- PATH = noaccess + os.pathsep + PATH
- else:
- PATH = noaccess
- self.env['PATH'] = PATH
-
- # From options:
- if self.options.xunit:
- self.add_xunit()
- if self.options.coverage:
- self.add_coverage()
- self.env['IPTEST_SUBPROC_STREAMS'] = self.options.subproc_streams
- self.cmd.extend(self.options.extra_args)
-
- def cleanup(self):
- """
- Make the non-accessible directory created in setup() accessible
- again, otherwise deleting the workingdir will fail.
- """
- os.chmod(self.noaccess, stat.S_IRWXU)
- TestController.cleanup(self)
-
- @property
- def will_run(self):
- try:
- return test_sections[self.section].will_run
- except KeyError:
- return True
-
- def add_xunit(self):
- xunit_file = os.path.abspath(self.section + '.xunit.xml')
- self.cmd.extend(['--with-xunit', '--xunit-file', xunit_file])
-
- def add_coverage(self):
- try:
- sources = test_sections[self.section].includes
- except KeyError:
- sources = ['IPython']
-
- coverage_rc = ("[run]\n"
- "data_file = {data_file}\n"
- "source =\n"
- " {source}\n"
- ).format(data_file=os.path.abspath('.coverage.'+self.section),
- source="\n ".join(sources))
- config_file = os.path.join(self.workingdir.name, '.coveragerc')
- with open(config_file, 'w') as f:
- f.write(coverage_rc)
-
- self.env['COVERAGE_PROCESS_START'] = config_file
- self.pycmd = "import coverage; coverage.process_startup(); " + self.pycmd
-
- def launch(self, buffer_output=False):
- self.cmd[2] = self.pycmd
- super(PyTestController, self).launch(buffer_output=buffer_output)
-
-
-def prepare_controllers(options):
- """Returns two lists of TestController instances, those to run, and those
- not to run."""
- testgroups = options.testgroups
- if not testgroups:
- testgroups = py_test_group_names
-
- controllers = [PyTestController(name, options) for name in testgroups]
-
- to_run = [c for c in controllers if c.will_run]
- not_run = [c for c in controllers if not c.will_run]
- return to_run, not_run
-
-def do_run(controller, buffer_output=True):
- """Setup and run a test controller.
-
- If buffer_output is True, no output is displayed, to avoid it appearing
- interleaved. In this case, the caller is responsible for displaying test
- output on failure.
-
- Returns
- -------
- controller : TestController
- The same controller as passed in, as a convenience for using map() type
- APIs.
- exitcode : int
- The exit code of the test subprocess. Non-zero indicates failure.
- """
- try:
- try:
- controller.setup()
- if not buffer_output:
- controller.print_extra_info()
- controller.launch(buffer_output=buffer_output)
- except Exception:
- import traceback
- traceback.print_exc()
- return controller, 1 # signal failure
-
- exitcode = controller.wait()
- return controller, exitcode
-
- except KeyboardInterrupt:
- return controller, -signal.SIGINT
- finally:
- controller.cleanup()
-
-def report():
- """Return a string with a summary report of test-related variables."""
- inf = get_sys_info()
- out = []
- def _add(name, value):
- out.append((name, value))
-
- _add('IPython version', inf['ipython_version'])
- _add('IPython commit', "{} ({})".format(inf['commit_hash'], inf['commit_source']))
- _add('IPython package', compress_user(inf['ipython_path']))
- _add('Python version', inf['sys_version'].replace('\n',''))
- _add('sys.executable', compress_user(inf['sys_executable']))
- _add('Platform', inf['platform'])
-
- width = max(len(n) for (n,v) in out)
- out = ["{:<{width}}: {}\n".format(n, v, width=width) for (n,v) in out]
-
- avail = []
- not_avail = []
-
- for k, is_avail in have.items():
- if is_avail:
- avail.append(k)
- else:
- not_avail.append(k)
-
- if avail:
- out.append('\nTools and libraries available at test time:\n')
- avail.sort()
- out.append(' ' + ' '.join(avail)+'\n')
-
- if not_avail:
- out.append('\nTools and libraries NOT available at test time:\n')
- not_avail.sort()
- out.append(' ' + ' '.join(not_avail)+'\n')
-
- return ''.join(out)
-
-def run_iptestall(options):
- """Run the entire IPython test suite by calling nose and trial.
-
- This function constructs :class:`IPTester` instances for all IPython
- modules and package and then runs each of them. This causes the modules
- and packages of IPython to be tested each in their own subprocess using
- nose.
-
- Parameters
- ----------
-
- All parameters are passed as attributes of the options object.
-
- testgroups : list of str
- Run only these sections of the test suite. If empty, run all the available
- sections.
-
- fast : int or None
- Run the test suite in parallel, using n simultaneous processes. If None
- is passed, one process is used per CPU core. Default 1 (i.e. sequential)
-
- inc_slow : bool
- Include slow tests. By default, these tests aren't run.
-
- url : unicode
- Address:port to use when running the JS tests.
-
- xunit : bool
- Produce Xunit XML output. This is written to multiple foo.xunit.xml files.
-
- coverage : bool or str
- Measure code coverage from tests. True will store the raw coverage data,
- or pass 'html' or 'xml' to get reports.
-
- extra_args : list
- Extra arguments to pass to the test subprocesses, e.g. '-v'
- """
- to_run, not_run = prepare_controllers(options)
-
- def justify(ltext, rtext, width=70, fill='-'):
- ltext += ' '
- rtext = (' ' + rtext).rjust(width - len(ltext), fill)
- return ltext + rtext
-
- # Run all test runners, tracking execution time
- failed = []
- t_start = time.time()
-
- print()
- if options.fast == 1:
- # This actually means sequential, i.e. with 1 job
- for controller in to_run:
- print('Test group:', controller.section)
- sys.stdout.flush() # Show in correct order when output is piped
- controller, res = do_run(controller, buffer_output=False)
- if res:
- failed.append(controller)
- if res == -signal.SIGINT:
- print("Interrupted")
- break
- print()
-
- else:
- # Run tests concurrently
- try:
- pool = multiprocessing.pool.ThreadPool(options.fast)
- for (controller, res) in pool.imap_unordered(do_run, to_run):
- res_string = 'OK' if res == 0 else 'FAILED'
- print(justify('Test group: ' + controller.section, res_string))
- if res:
- controller.print_extra_info()
- print(bytes_to_str(controller.stdout))
- failed.append(controller)
- if res == -signal.SIGINT:
- print("Interrupted")
- break
- except KeyboardInterrupt:
- return
-
- for controller in not_run:
- print(justify('Test group: ' + controller.section, 'NOT RUN'))
-
- t_end = time.time()
- t_tests = t_end - t_start
- nrunners = len(to_run)
- nfail = len(failed)
- # summarize results
- print('_'*70)
- print('Test suite completed for system with the following information:')
- print(report())
- took = "Took %.3fs." % t_tests
- print('Status: ', end='')
- if not failed:
- print('OK (%d test groups).' % nrunners, took)
- else:
- # If anything went wrong, point out what command to rerun manually to
- # see the actual errors and individual summary
- failed_sections = [c.section for c in failed]
- print('ERROR - {} out of {} test groups failed ({}).'.format(nfail,
- nrunners, ', '.join(failed_sections)), took)
- print()
- print('You may wish to rerun these, with:')
- print(' iptest', *failed_sections)
- print()
-
- if options.coverage:
- from coverage import coverage, CoverageException
- cov = coverage(data_file='.coverage')
- cov.combine()
- cov.save()
-
- # Coverage HTML report
- if options.coverage == 'html':
- html_dir = 'ipy_htmlcov'
- shutil.rmtree(html_dir, ignore_errors=True)
- print("Writing HTML coverage report to %s/ ... " % html_dir, end="")
- sys.stdout.flush()
-
- # Custom HTML reporter to clean up module names.
- from coverage.html import HtmlReporter
- class CustomHtmlReporter(HtmlReporter):
- def find_code_units(self, morfs):
- super(CustomHtmlReporter, self).find_code_units(morfs)
- for cu in self.code_units:
- nameparts = cu.name.split(os.sep)
- if 'IPython' not in nameparts:
- continue
- ix = nameparts.index('IPython')
- cu.name = '.'.join(nameparts[ix:])
-
- # Reimplement the html_report method with our custom reporter
- cov.get_data()
- cov.config.from_args(omit='*{0}tests{0}*'.format(os.sep), html_dir=html_dir,
- html_title='IPython test coverage',
- )
- reporter = CustomHtmlReporter(cov, cov.config)
- reporter.report(None)
- print('done.')
-
- # Coverage XML report
- elif options.coverage == 'xml':
- try:
- cov.xml_report(outfile='ipy_coverage.xml')
- except CoverageException as e:
- print('Generating coverage report failed. Are you running javascript tests only?')
- import traceback
- traceback.print_exc()
-
- if failed:
- # Ensure that our exit code indicates failure
- sys.exit(1)
-
-argparser = argparse.ArgumentParser(description='Run IPython test suite')
-argparser.add_argument('testgroups', nargs='*',
- help='Run specified groups of tests. If omitted, run '
- 'all tests.')
-argparser.add_argument('--all', action='store_true',
- help='Include slow tests not run by default.')
-argparser.add_argument('--url', help="URL to use for the JS tests.")
-argparser.add_argument('-j', '--fast', nargs='?', const=None, default=1, type=int,
- help='Run test sections in parallel. This starts as many '
- 'processes as you have cores, or you can specify a number.')
-argparser.add_argument('--xunit', action='store_true',
- help='Produce Xunit XML results')
-argparser.add_argument('--coverage', nargs='?', const=True, default=False,
- help="Measure test coverage. Specify 'html' or "
- "'xml' to get reports.")
-argparser.add_argument('--subproc-streams', default='capture',
- help="What to do with stdout/stderr from subprocesses. "
- "'capture' (default), 'show' and 'discard' are the options.")
-
-def default_options():
- """Get an argparse Namespace object with the default arguments, to pass to
- :func:`run_iptestall`.
- """
- options = argparser.parse_args([])
- options.extra_args = []
- return options
-
-def main():
- # iptest doesn't work correctly if the working directory is the
- # root of the IPython source tree. Tell the user to avoid
- # frustration.
- if os.path.exists(os.path.join(os.getcwd(),
- 'IPython', 'testing', '__main__.py')):
- print("Don't run iptest from the IPython source directory",
- file=sys.stderr)
- sys.exit(1)
- # Arguments after -- should be passed through to nose. Argparse treats
- # everything after -- as regular positional arguments, so we separate them
- # first.
- try:
- ix = sys.argv.index('--')
- except ValueError:
- to_parse = sys.argv[1:]
- extra_args = []
- else:
- to_parse = sys.argv[1:ix]
- extra_args = sys.argv[ix+1:]
-
- options = argparser.parse_args(to_parse)
- options.extra_args = extra_args
-
- run_iptestall(options)
-
-
-if __name__ == '__main__':
- main()
+# -*- coding: utf-8 -*-
+"""IPython Test Process Controller
+
+This module runs one or more subprocesses which will actually run the IPython
+test suite.
+
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+
+import argparse
+import json
+import multiprocessing.pool
+import os
+import stat
+import re
+import requests
+import shutil
+import signal
+import sys
+import subprocess
+import time
+
+from .iptest import (
+ have, test_group_names as py_test_group_names, test_sections, StreamCapturer,
+ test_for,
+)
+from IPython.utils.path import compress_user
+from IPython.utils.py3compat import bytes_to_str
+from IPython.utils.sysinfo import get_sys_info
+from IPython.utils.tempdir import TemporaryDirectory
+from IPython.utils.text import strip_ansi
+
+try:
+ # Python >= 3.3
+ from subprocess import TimeoutExpired
+ def popen_wait(p, timeout):
+ return p.wait(timeout)
+except ImportError:
+ class TimeoutExpired(Exception):
+ pass
+ def popen_wait(p, timeout):
+ """backport of Popen.wait from Python 3"""
+ for i in range(int(10 * timeout)):
+ if p.poll() is not None:
+ return
+ time.sleep(0.1)
+ if p.poll() is None:
+ raise TimeoutExpired
+
+NOTEBOOK_SHUTDOWN_TIMEOUT = 10
+
+class TestController(object):
+ """Run tests in a subprocess
+ """
+ #: str, IPython test suite to be executed.
+ section = None
+ #: list, command line arguments to be executed
+ cmd = None
+ #: dict, extra environment variables to set for the subprocess
+ env = None
+ #: list, TemporaryDirectory instances to clear up when the process finishes
+ dirs = None
+ #: subprocess.Popen instance
+ process = None
+ #: str, process stdout+stderr
+ stdout = None
+
+ def __init__(self):
+ self.cmd = []
+ self.env = {}
+ self.dirs = []
+
+ def setup(self):
+ """Create temporary directories etc.
+
+ This is only called when we know the test group will be run. Things
+ created here may be cleaned up by self.cleanup().
+ """
+ pass
+
+ def launch(self, buffer_output=False, capture_output=False):
+ # print('*** ENV:', self.env) # dbg
+ # print('*** CMD:', self.cmd) # dbg
+ env = os.environ.copy()
+ env.update(self.env)
+ if buffer_output:
+ capture_output = True
+ self.stdout_capturer = c = StreamCapturer(echo=not buffer_output)
+ c.start()
+ stdout = c.writefd if capture_output else None
+ stderr = subprocess.STDOUT if capture_output else None
+ self.process = subprocess.Popen(self.cmd, stdout=stdout,
+ stderr=stderr, env=env)
+
+ def wait(self):
+ self.process.wait()
+ self.stdout_capturer.halt()
+ self.stdout = self.stdout_capturer.get_buffer()
+ return self.process.returncode
+
+ def print_extra_info(self):
+ """Print extra information about this test run.
+
+ If we're running in parallel and showing the concise view, this is only
+ called if the test group fails. Otherwise, it's called before the test
+ group is started.
+
+ The base implementation does nothing, but it can be overridden by
+ subclasses.
+ """
+ return
+
+ def cleanup_process(self):
+ """Cleanup on exit by killing any leftover processes."""
+ subp = self.process
+ if subp is None or (subp.poll() is not None):
+ return # Process doesn't exist, or is already dead.
+
+ try:
+ print('Cleaning up stale PID: %d' % subp.pid)
+ subp.kill()
+ except: # (OSError, WindowsError) ?
+ # This is just a best effort, if we fail or the process was
+ # really gone, ignore it.
+ pass
+ else:
+ for i in range(10):
+ if subp.poll() is None:
+ time.sleep(0.1)
+ else:
+ break
+
+ if subp.poll() is None:
+ # The process did not die...
+ print('... failed. Manual cleanup may be required.')
+
+ def cleanup(self):
+ "Kill process if it's still alive, and clean up temporary directories"
+ self.cleanup_process()
+ for td in self.dirs:
+ td.cleanup()
+
+ __del__ = cleanup
+
+
+class PyTestController(TestController):
+ """Run Python tests using IPython.testing.iptest"""
+ #: str, Python command to execute in subprocess
+ pycmd = None
+
+ def __init__(self, section, options):
+ """Create new test runner."""
+ TestController.__init__(self)
+ self.section = section
+ # pycmd is put into cmd[2] in PyTestController.launch()
+ self.cmd = [sys.executable, '-c', None, section]
+ self.pycmd = "from IPython.testing.iptest import run_iptest; run_iptest()"
+ self.options = options
+
+ def setup(self):
+ ipydir = TemporaryDirectory()
+ self.dirs.append(ipydir)
+ self.env['IPYTHONDIR'] = ipydir.name
+ self.workingdir = workingdir = TemporaryDirectory()
+ self.dirs.append(workingdir)
+ self.env['IPTEST_WORKING_DIR'] = workingdir.name
+ # This means we won't get odd effects from our own matplotlib config
+ self.env['MPLCONFIGDIR'] = workingdir.name
+ # For security reasons (http://bugs.python.org/issue16202), use
+ # a temporary directory to which other users have no access.
+ self.env['TMPDIR'] = workingdir.name
+
+ # Add a non-accessible directory to PATH (see gh-7053)
+ noaccess = os.path.join(self.workingdir.name, "_no_access_")
+ self.noaccess = noaccess
+ os.mkdir(noaccess, 0)
+
+ PATH = os.environ.get('PATH', '')
+ if PATH:
+ PATH = noaccess + os.pathsep + PATH
+ else:
+ PATH = noaccess
+ self.env['PATH'] = PATH
+
+ # From options:
+ if self.options.xunit:
+ self.add_xunit()
+ if self.options.coverage:
+ self.add_coverage()
+ self.env['IPTEST_SUBPROC_STREAMS'] = self.options.subproc_streams
+ self.cmd.extend(self.options.extra_args)
+
+ def cleanup(self):
+ """
+ Make the non-accessible directory created in setup() accessible
+ again, otherwise deleting the workingdir will fail.
+ """
+ os.chmod(self.noaccess, stat.S_IRWXU)
+ TestController.cleanup(self)
+
+ @property
+ def will_run(self):
+ try:
+ return test_sections[self.section].will_run
+ except KeyError:
+ return True
+
+ def add_xunit(self):
+ xunit_file = os.path.abspath(self.section + '.xunit.xml')
+ self.cmd.extend(['--with-xunit', '--xunit-file', xunit_file])
+
+ def add_coverage(self):
+ try:
+ sources = test_sections[self.section].includes
+ except KeyError:
+ sources = ['IPython']
+
+ coverage_rc = ("[run]\n"
+ "data_file = {data_file}\n"
+ "source =\n"
+ " {source}\n"
+ ).format(data_file=os.path.abspath('.coverage.'+self.section),
+ source="\n ".join(sources))
+ config_file = os.path.join(self.workingdir.name, '.coveragerc')
+ with open(config_file, 'w') as f:
+ f.write(coverage_rc)
+
+ self.env['COVERAGE_PROCESS_START'] = config_file
+ self.pycmd = "import coverage; coverage.process_startup(); " + self.pycmd
+
+ def launch(self, buffer_output=False):
+ self.cmd[2] = self.pycmd
+ super(PyTestController, self).launch(buffer_output=buffer_output)
+
+
+def prepare_controllers(options):
+ """Returns two lists of TestController instances, those to run, and those
+ not to run."""
+ testgroups = options.testgroups
+ if not testgroups:
+ testgroups = py_test_group_names
+
+ controllers = [PyTestController(name, options) for name in testgroups]
+
+ to_run = [c for c in controllers if c.will_run]
+ not_run = [c for c in controllers if not c.will_run]
+ return to_run, not_run
+
+def do_run(controller, buffer_output=True):
+ """Setup and run a test controller.
+
+ If buffer_output is True, no output is displayed, to avoid it appearing
+ interleaved. In this case, the caller is responsible for displaying test
+ output on failure.
+
+ Returns
+ -------
+ controller : TestController
+ The same controller as passed in, as a convenience for using map() type
+ APIs.
+ exitcode : int
+ The exit code of the test subprocess. Non-zero indicates failure.
+ """
+ try:
+ try:
+ controller.setup()
+ if not buffer_output:
+ controller.print_extra_info()
+ controller.launch(buffer_output=buffer_output)
+ except Exception:
+ import traceback
+ traceback.print_exc()
+ return controller, 1 # signal failure
+
+ exitcode = controller.wait()
+ return controller, exitcode
+
+ except KeyboardInterrupt:
+ return controller, -signal.SIGINT
+ finally:
+ controller.cleanup()
+
+def report():
+ """Return a string with a summary report of test-related variables."""
+ inf = get_sys_info()
+ out = []
+ def _add(name, value):
+ out.append((name, value))
+
+ _add('IPython version', inf['ipython_version'])
+ _add('IPython commit', "{} ({})".format(inf['commit_hash'], inf['commit_source']))
+ _add('IPython package', compress_user(inf['ipython_path']))
+ _add('Python version', inf['sys_version'].replace('\n',''))
+ _add('sys.executable', compress_user(inf['sys_executable']))
+ _add('Platform', inf['platform'])
+
+ width = max(len(n) for (n,v) in out)
+ out = ["{:<{width}}: {}\n".format(n, v, width=width) for (n,v) in out]
+
+ avail = []
+ not_avail = []
+
+ for k, is_avail in have.items():
+ if is_avail:
+ avail.append(k)
+ else:
+ not_avail.append(k)
+
+ if avail:
+ out.append('\nTools and libraries available at test time:\n')
+ avail.sort()
+ out.append(' ' + ' '.join(avail)+'\n')
+
+ if not_avail:
+ out.append('\nTools and libraries NOT available at test time:\n')
+ not_avail.sort()
+ out.append(' ' + ' '.join(not_avail)+'\n')
+
+ return ''.join(out)
+
+def run_iptestall(options):
+ """Run the entire IPython test suite by calling nose and trial.
+
+ This function constructs :class:`IPTester` instances for all IPython
+ modules and package and then runs each of them. This causes the modules
+ and packages of IPython to be tested each in their own subprocess using
+ nose.
+
+ Parameters
+ ----------
+
+ All parameters are passed as attributes of the options object.
+
+ testgroups : list of str
+ Run only these sections of the test suite. If empty, run all the available
+ sections.
+
+ fast : int or None
+ Run the test suite in parallel, using n simultaneous processes. If None
+ is passed, one process is used per CPU core. Default 1 (i.e. sequential)
+
+ inc_slow : bool
+ Include slow tests. By default, these tests aren't run.
+
+ url : unicode
+ Address:port to use when running the JS tests.
+
+ xunit : bool
+ Produce Xunit XML output. This is written to multiple foo.xunit.xml files.
+
+ coverage : bool or str
+ Measure code coverage from tests. True will store the raw coverage data,
+ or pass 'html' or 'xml' to get reports.
+
+ extra_args : list
+ Extra arguments to pass to the test subprocesses, e.g. '-v'
+ """
+ to_run, not_run = prepare_controllers(options)
+
+ def justify(ltext, rtext, width=70, fill='-'):
+ ltext += ' '
+ rtext = (' ' + rtext).rjust(width - len(ltext), fill)
+ return ltext + rtext
+
+ # Run all test runners, tracking execution time
+ failed = []
+ t_start = time.time()
+
+ print()
+ if options.fast == 1:
+ # This actually means sequential, i.e. with 1 job
+ for controller in to_run:
+ print('Test group:', controller.section)
+ sys.stdout.flush() # Show in correct order when output is piped
+ controller, res = do_run(controller, buffer_output=False)
+ if res:
+ failed.append(controller)
+ if res == -signal.SIGINT:
+ print("Interrupted")
+ break
+ print()
+
+ else:
+ # Run tests concurrently
+ try:
+ pool = multiprocessing.pool.ThreadPool(options.fast)
+ for (controller, res) in pool.imap_unordered(do_run, to_run):
+ res_string = 'OK' if res == 0 else 'FAILED'
+ print(justify('Test group: ' + controller.section, res_string))
+ if res:
+ controller.print_extra_info()
+ print(bytes_to_str(controller.stdout))
+ failed.append(controller)
+ if res == -signal.SIGINT:
+ print("Interrupted")
+ break
+ except KeyboardInterrupt:
+ return
+
+ for controller in not_run:
+ print(justify('Test group: ' + controller.section, 'NOT RUN'))
+
+ t_end = time.time()
+ t_tests = t_end - t_start
+ nrunners = len(to_run)
+ nfail = len(failed)
+ # summarize results
+ print('_'*70)
+ print('Test suite completed for system with the following information:')
+ print(report())
+ took = "Took %.3fs." % t_tests
+ print('Status: ', end='')
+ if not failed:
+ print('OK (%d test groups).' % nrunners, took)
+ else:
+ # If anything went wrong, point out what command to rerun manually to
+ # see the actual errors and individual summary
+ failed_sections = [c.section for c in failed]
+ print('ERROR - {} out of {} test groups failed ({}).'.format(nfail,
+ nrunners, ', '.join(failed_sections)), took)
+ print()
+ print('You may wish to rerun these, with:')
+ print(' iptest', *failed_sections)
+ print()
+
+ if options.coverage:
+ from coverage import coverage, CoverageException
+ cov = coverage(data_file='.coverage')
+ cov.combine()
+ cov.save()
+
+ # Coverage HTML report
+ if options.coverage == 'html':
+ html_dir = 'ipy_htmlcov'
+ shutil.rmtree(html_dir, ignore_errors=True)
+ print("Writing HTML coverage report to %s/ ... " % html_dir, end="")
+ sys.stdout.flush()
+
+ # Custom HTML reporter to clean up module names.
+ from coverage.html import HtmlReporter
+ class CustomHtmlReporter(HtmlReporter):
+ def find_code_units(self, morfs):
+ super(CustomHtmlReporter, self).find_code_units(morfs)
+ for cu in self.code_units:
+ nameparts = cu.name.split(os.sep)
+ if 'IPython' not in nameparts:
+ continue
+ ix = nameparts.index('IPython')
+ cu.name = '.'.join(nameparts[ix:])
+
+ # Reimplement the html_report method with our custom reporter
+ cov.get_data()
+ cov.config.from_args(omit='*{0}tests{0}*'.format(os.sep), html_dir=html_dir,
+ html_title='IPython test coverage',
+ )
+ reporter = CustomHtmlReporter(cov, cov.config)
+ reporter.report(None)
+ print('done.')
+
+ # Coverage XML report
+ elif options.coverage == 'xml':
+ try:
+ cov.xml_report(outfile='ipy_coverage.xml')
+ except CoverageException as e:
+ print('Generating coverage report failed. Are you running javascript tests only?')
+ import traceback
+ traceback.print_exc()
+
+ if failed:
+ # Ensure that our exit code indicates failure
+ sys.exit(1)
+
+argparser = argparse.ArgumentParser(description='Run IPython test suite')
+argparser.add_argument('testgroups', nargs='*',
+ help='Run specified groups of tests. If omitted, run '
+ 'all tests.')
+argparser.add_argument('--all', action='store_true',
+ help='Include slow tests not run by default.')
+argparser.add_argument('--url', help="URL to use for the JS tests.")
+argparser.add_argument('-j', '--fast', nargs='?', const=None, default=1, type=int,
+ help='Run test sections in parallel. This starts as many '
+ 'processes as you have cores, or you can specify a number.')
+argparser.add_argument('--xunit', action='store_true',
+ help='Produce Xunit XML results')
+argparser.add_argument('--coverage', nargs='?', const=True, default=False,
+ help="Measure test coverage. Specify 'html' or "
+ "'xml' to get reports.")
+argparser.add_argument('--subproc-streams', default='capture',
+ help="What to do with stdout/stderr from subprocesses. "
+ "'capture' (default), 'show' and 'discard' are the options.")
+
+def default_options():
+ """Get an argparse Namespace object with the default arguments, to pass to
+ :func:`run_iptestall`.
+ """
+ options = argparser.parse_args([])
+ options.extra_args = []
+ return options
+
+def main():
+ # iptest doesn't work correctly if the working directory is the
+ # root of the IPython source tree. Tell the user to avoid
+ # frustration.
+ if os.path.exists(os.path.join(os.getcwd(),
+ 'IPython', 'testing', '__main__.py')):
+ print("Don't run iptest from the IPython source directory",
+ file=sys.stderr)
+ sys.exit(1)
+ # Arguments after -- should be passed through to nose. Argparse treats
+ # everything after -- as regular positional arguments, so we separate them
+ # first.
+ try:
+ ix = sys.argv.index('--')
+ except ValueError:
+ to_parse = sys.argv[1:]
+ extra_args = []
+ else:
+ to_parse = sys.argv[1:ix]
+ extra_args = sys.argv[ix+1:]
+
+ options = argparser.parse_args(to_parse)
+ options.extra_args = extra_args
+
+ run_iptestall(options)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/contrib/python/ipython/py2/IPython/testing/ipunittest.py b/contrib/python/ipython/py2/IPython/testing/ipunittest.py
index 04ea8320fb..da059816e2 100644
--- a/contrib/python/ipython/py2/IPython/testing/ipunittest.py
+++ b/contrib/python/ipython/py2/IPython/testing/ipunittest.py
@@ -1,178 +1,178 @@
-"""Experimental code for cleaner support of IPython syntax with unittest.
-
-In IPython up until 0.10, we've used very hacked up nose machinery for running
-tests with IPython special syntax, and this has proved to be extremely slow.
-This module provides decorators to try a different approach, stemming from a
-conversation Brian and I (FP) had about this problem Sept/09.
-
-The goal is to be able to easily write simple functions that can be seen by
-unittest as tests, and ultimately for these to support doctests with full
-IPython syntax. Nose already offers this based on naming conventions and our
-hackish plugins, but we are seeking to move away from nose dependencies if
-possible.
-
-This module follows a different approach, based on decorators.
-
-- A decorator called @ipdoctest can mark any function as having a docstring
- that should be viewed as a doctest, but after syntax conversion.
-
-Authors
--------
-
-- Fernando Perez <Fernando.Perez@berkeley.edu>
-"""
-
-from __future__ import absolute_import
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2009-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Stdlib
-import re
-import unittest
-from doctest import DocTestFinder, DocTestRunner, TestResults
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-
-def count_failures(runner):
- """Count number of failures in a doctest runner.
-
- Code modeled after the summarize() method in doctest.
- """
- return [TestResults(f, t) for f, t in runner._name2ft.values() if f > 0 ]
-
-
-class IPython2PythonConverter(object):
- """Convert IPython 'syntax' to valid Python.
-
- Eventually this code may grow to be the full IPython syntax conversion
- implementation, but for now it only does prompt convertion."""
-
- def __init__(self):
- self.rps1 = re.compile(r'In\ \[\d+\]: ')
- self.rps2 = re.compile(r'\ \ \ \.\.\.+: ')
- self.rout = re.compile(r'Out\[\d+\]: \s*?\n?')
- self.pyps1 = '>>> '
- self.pyps2 = '... '
- self.rpyps1 = re.compile ('(\s*%s)(.*)$' % self.pyps1)
- self.rpyps2 = re.compile ('(\s*%s)(.*)$' % self.pyps2)
-
- def __call__(self, ds):
- """Convert IPython prompts to python ones in a string."""
- from . import globalipapp
-
- pyps1 = '>>> '
- pyps2 = '... '
- pyout = ''
-
- dnew = ds
- dnew = self.rps1.sub(pyps1, dnew)
- dnew = self.rps2.sub(pyps2, dnew)
- dnew = self.rout.sub(pyout, dnew)
- ip = globalipapp.get_ipython()
-
- # Convert input IPython source into valid Python.
- out = []
- newline = out.append
- for line in dnew.splitlines():
-
- mps1 = self.rpyps1.match(line)
- if mps1 is not None:
- prompt, text = mps1.groups()
- newline(prompt+ip.prefilter(text, False))
- continue
-
- mps2 = self.rpyps2.match(line)
- if mps2 is not None:
- prompt, text = mps2.groups()
- newline(prompt+ip.prefilter(text, True))
- continue
-
- newline(line)
- newline('') # ensure a closing newline, needed by doctest
- #print "PYSRC:", '\n'.join(out) # dbg
- return '\n'.join(out)
-
- #return dnew
-
-
-class Doc2UnitTester(object):
- """Class whose instances act as a decorator for docstring testing.
-
- In practice we're only likely to need one instance ever, made below (though
- no attempt is made at turning it into a singleton, there is no need for
- that).
- """
- def __init__(self, verbose=False):
- """New decorator.
-
- Parameters
- ----------
-
- verbose : boolean, optional (False)
- Passed to the doctest finder and runner to control verbosity.
- """
- self.verbose = verbose
- # We can reuse the same finder for all instances
- self.finder = DocTestFinder(verbose=verbose, recurse=False)
-
- def __call__(self, func):
- """Use as a decorator: doctest a function's docstring as a unittest.
-
- This version runs normal doctests, but the idea is to make it later run
- ipython syntax instead."""
-
- # Capture the enclosing instance with a different name, so the new
- # class below can see it without confusion regarding its own 'self'
- # that will point to the test instance at runtime
- d2u = self
-
- # Rewrite the function's docstring to have python syntax
- if func.__doc__ is not None:
- func.__doc__ = ip2py(func.__doc__)
-
- # Now, create a tester object that is a real unittest instance, so
- # normal unittest machinery (or Nose, or Trial) can find it.
- class Tester(unittest.TestCase):
- def test(self):
- # Make a new runner per function to be tested
- runner = DocTestRunner(verbose=d2u.verbose)
+"""Experimental code for cleaner support of IPython syntax with unittest.
+
+In IPython up until 0.10, we've used very hacked up nose machinery for running
+tests with IPython special syntax, and this has proved to be extremely slow.
+This module provides decorators to try a different approach, stemming from a
+conversation Brian and I (FP) had about this problem Sept/09.
+
+The goal is to be able to easily write simple functions that can be seen by
+unittest as tests, and ultimately for these to support doctests with full
+IPython syntax. Nose already offers this based on naming conventions and our
+hackish plugins, but we are seeking to move away from nose dependencies if
+possible.
+
+This module follows a different approach, based on decorators.
+
+- A decorator called @ipdoctest can mark any function as having a docstring
+ that should be viewed as a doctest, but after syntax conversion.
+
+Authors
+-------
+
+- Fernando Perez <Fernando.Perez@berkeley.edu>
+"""
+
+from __future__ import absolute_import
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2009-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+# Stdlib
+import re
+import unittest
+from doctest import DocTestFinder, DocTestRunner, TestResults
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+def count_failures(runner):
+ """Count number of failures in a doctest runner.
+
+ Code modeled after the summarize() method in doctest.
+ """
+ return [TestResults(f, t) for f, t in runner._name2ft.values() if f > 0 ]
+
+
+class IPython2PythonConverter(object):
+ """Convert IPython 'syntax' to valid Python.
+
+ Eventually this code may grow to be the full IPython syntax conversion
+ implementation, but for now it only does prompt convertion."""
+
+ def __init__(self):
+ self.rps1 = re.compile(r'In\ \[\d+\]: ')
+ self.rps2 = re.compile(r'\ \ \ \.\.\.+: ')
+ self.rout = re.compile(r'Out\[\d+\]: \s*?\n?')
+ self.pyps1 = '>>> '
+ self.pyps2 = '... '
+ self.rpyps1 = re.compile ('(\s*%s)(.*)$' % self.pyps1)
+ self.rpyps2 = re.compile ('(\s*%s)(.*)$' % self.pyps2)
+
+ def __call__(self, ds):
+ """Convert IPython prompts to python ones in a string."""
+ from . import globalipapp
+
+ pyps1 = '>>> '
+ pyps2 = '... '
+ pyout = ''
+
+ dnew = ds
+ dnew = self.rps1.sub(pyps1, dnew)
+ dnew = self.rps2.sub(pyps2, dnew)
+ dnew = self.rout.sub(pyout, dnew)
+ ip = globalipapp.get_ipython()
+
+ # Convert input IPython source into valid Python.
+ out = []
+ newline = out.append
+ for line in dnew.splitlines():
+
+ mps1 = self.rpyps1.match(line)
+ if mps1 is not None:
+ prompt, text = mps1.groups()
+ newline(prompt+ip.prefilter(text, False))
+ continue
+
+ mps2 = self.rpyps2.match(line)
+ if mps2 is not None:
+ prompt, text = mps2.groups()
+ newline(prompt+ip.prefilter(text, True))
+ continue
+
+ newline(line)
+ newline('') # ensure a closing newline, needed by doctest
+ #print "PYSRC:", '\n'.join(out) # dbg
+ return '\n'.join(out)
+
+ #return dnew
+
+
+class Doc2UnitTester(object):
+ """Class whose instances act as a decorator for docstring testing.
+
+ In practice we're only likely to need one instance ever, made below (though
+ no attempt is made at turning it into a singleton, there is no need for
+ that).
+ """
+ def __init__(self, verbose=False):
+ """New decorator.
+
+ Parameters
+ ----------
+
+ verbose : boolean, optional (False)
+ Passed to the doctest finder and runner to control verbosity.
+ """
+ self.verbose = verbose
+ # We can reuse the same finder for all instances
+ self.finder = DocTestFinder(verbose=verbose, recurse=False)
+
+ def __call__(self, func):
+ """Use as a decorator: doctest a function's docstring as a unittest.
+
+ This version runs normal doctests, but the idea is to make it later run
+ ipython syntax instead."""
+
+ # Capture the enclosing instance with a different name, so the new
+ # class below can see it without confusion regarding its own 'self'
+ # that will point to the test instance at runtime
+ d2u = self
+
+ # Rewrite the function's docstring to have python syntax
+ if func.__doc__ is not None:
+ func.__doc__ = ip2py(func.__doc__)
+
+ # Now, create a tester object that is a real unittest instance, so
+ # normal unittest machinery (or Nose, or Trial) can find it.
+ class Tester(unittest.TestCase):
+ def test(self):
+ # Make a new runner per function to be tested
+ runner = DocTestRunner(verbose=d2u.verbose)
for the_test in d2u.finder.find(func, func.__name__):
runner.run(the_test)
- failed = count_failures(runner)
- if failed:
- # Since we only looked at a single function's docstring,
- # failed should contain at most one item. More than that
- # is a case we can't handle and should error out on
- if len(failed) > 1:
- err = "Invalid number of test results:" % failed
- raise ValueError(err)
- # Report a normal failure.
- self.fail('failed doctests: %s' % str(failed[0]))
-
- # Rename it so test reports have the original signature.
- Tester.__name__ = func.__name__
- return Tester
-
-
-def ipdocstring(func):
- """Change the function docstring via ip2py.
- """
- if func.__doc__ is not None:
- func.__doc__ = ip2py(func.__doc__)
- return func
-
-
-# Make an instance of the classes for public use
-ipdoctest = Doc2UnitTester()
-ip2py = IPython2PythonConverter()
+ failed = count_failures(runner)
+ if failed:
+ # Since we only looked at a single function's docstring,
+ # failed should contain at most one item. More than that
+ # is a case we can't handle and should error out on
+ if len(failed) > 1:
+ err = "Invalid number of test results:" % failed
+ raise ValueError(err)
+ # Report a normal failure.
+ self.fail('failed doctests: %s' % str(failed[0]))
+
+ # Rename it so test reports have the original signature.
+ Tester.__name__ = func.__name__
+ return Tester
+
+
+def ipdocstring(func):
+ """Change the function docstring via ip2py.
+ """
+ if func.__doc__ is not None:
+ func.__doc__ = ip2py(func.__doc__)
+ return func
+
+
+# Make an instance of the classes for public use
+ipdoctest = Doc2UnitTester()
+ip2py = IPython2PythonConverter()
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/Makefile b/contrib/python/ipython/py2/IPython/testing/plugin/Makefile
index d57d198f15..6f999a38fd 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/Makefile
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/Makefile
@@ -1,74 +1,74 @@
-# Set this prefix to where you want to install the plugin
-PREFIX=/usr/local
-
-NOSE0=nosetests -vs --with-doctest --doctest-tests --detailed-errors
-NOSE=nosetests -vvs --with-ipdoctest --doctest-tests --doctest-extension=txt \
---detailed-errors
-
-SRC=ipdoctest.py setup.py ../decorators.py
-
-# Default target for clean 'make'
-default: interactiveshell
-
-# The actual plugin installation
-plugin: IPython_doctest_plugin.egg-info
-
-# Simple targets that test one thing
-simple: plugin simple.py
- $(NOSE) simple.py
-
-dtest: plugin dtexample.py
- $(NOSE) dtexample.py
-
-rtest: plugin test_refs.py
- $(NOSE) test_refs.py
-
-test: plugin dtexample.py
- $(NOSE) dtexample.py test*.py test*.txt
-
-deb: plugin dtexample.py
- $(NOSE) test_combo.txt
-
-# IPython tests
-deco:
- $(NOSE0) IPython.testing.decorators
-
-magic: plugin
- $(NOSE) IPython.core.magic
-
-excolors: plugin
- $(NOSE) IPython.core.excolors
-
-interactiveshell: plugin
- $(NOSE) IPython.core.interactiveshell
-
-strd: plugin
- $(NOSE) IPython.core.strdispatch
-
-engine: plugin
- $(NOSE) IPython.kernel
-
-tf: plugin
- $(NOSE) IPython.config.traitlets
-
-# All of ipython itself
-ipython: plugin
- $(NOSE) IPython
-
-
-# Combined targets
-sr: rtest strd
-
-base: dtest rtest test strd deco
-
-quick: base interactiveshell ipipe
-
-all: base ipython
-
-# Main plugin and cleanup
-IPython_doctest_plugin.egg-info: $(SRC)
- python setup.py install --prefix=$(PREFIX)
- touch $@
-
-clean:
- rm -rf IPython_doctest_plugin.egg-info *~ *pyc build/ dist/
+# Set this prefix to where you want to install the plugin
+PREFIX=/usr/local
+
+NOSE0=nosetests -vs --with-doctest --doctest-tests --detailed-errors
+NOSE=nosetests -vvs --with-ipdoctest --doctest-tests --doctest-extension=txt \
+--detailed-errors
+
+SRC=ipdoctest.py setup.py ../decorators.py
+
+# Default target for clean 'make'
+default: interactiveshell
+
+# The actual plugin installation
+plugin: IPython_doctest_plugin.egg-info
+
+# Simple targets that test one thing
+simple: plugin simple.py
+ $(NOSE) simple.py
+
+dtest: plugin dtexample.py
+ $(NOSE) dtexample.py
+
+rtest: plugin test_refs.py
+ $(NOSE) test_refs.py
+
+test: plugin dtexample.py
+ $(NOSE) dtexample.py test*.py test*.txt
+
+deb: plugin dtexample.py
+ $(NOSE) test_combo.txt
+
+# IPython tests
+deco:
+ $(NOSE0) IPython.testing.decorators
+
+magic: plugin
+ $(NOSE) IPython.core.magic
+
+excolors: plugin
+ $(NOSE) IPython.core.excolors
+
+interactiveshell: plugin
+ $(NOSE) IPython.core.interactiveshell
+
+strd: plugin
+ $(NOSE) IPython.core.strdispatch
+
+engine: plugin
+ $(NOSE) IPython.kernel
+
+tf: plugin
+ $(NOSE) IPython.config.traitlets
+
+# All of ipython itself
+ipython: plugin
+ $(NOSE) IPython
+
+
+# Combined targets
+sr: rtest strd
+
+base: dtest rtest test strd deco
+
+quick: base interactiveshell ipipe
+
+all: base ipython
+
+# Main plugin and cleanup
+IPython_doctest_plugin.egg-info: $(SRC)
+ python setup.py install --prefix=$(PREFIX)
+ touch $@
+
+clean:
+ rm -rf IPython_doctest_plugin.egg-info *~ *pyc build/ dist/
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/README.txt b/contrib/python/ipython/py2/IPython/testing/plugin/README.txt
index e08380d9de..6b34f9e5e1 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/README.txt
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/README.txt
@@ -1,39 +1,39 @@
-=======================================================
- Nose plugin with IPython and extension module support
-=======================================================
-
-This directory provides the key functionality for test support that IPython
-needs as a nose plugin, which can be installed for use in projects other than
-IPython.
-
-The presence of a Makefile here is mostly for development and debugging
-purposes as it only provides a few shorthand commands. You can manually
-install the plugin by using standard Python procedures (``setup.py install``
-with appropriate arguments).
-
-To install the plugin using the Makefile, edit its first line to reflect where
-you'd like the installation. If you want it system-wide, you may want to edit
-the install line in the plugin target to use sudo and no prefix::
-
- sudo python setup.py install
-
-instead of the code using `--prefix` that's in there.
-
-Once you've set the prefix, simply build/install the plugin with::
-
- make
-
-and run the tests with::
-
- make test
-
-You should see output similar to::
-
- maqroll[plugin]> make test
- nosetests -s --with-ipdoctest --doctest-tests dtexample.py
- ..
- ----------------------------------------------------------------------
- Ran 2 tests in 0.016s
-
- OK
-
+=======================================================
+ Nose plugin with IPython and extension module support
+=======================================================
+
+This directory provides the key functionality for test support that IPython
+needs as a nose plugin, which can be installed for use in projects other than
+IPython.
+
+The presence of a Makefile here is mostly for development and debugging
+purposes as it only provides a few shorthand commands. You can manually
+install the plugin by using standard Python procedures (``setup.py install``
+with appropriate arguments).
+
+To install the plugin using the Makefile, edit its first line to reflect where
+you'd like the installation. If you want it system-wide, you may want to edit
+the install line in the plugin target to use sudo and no prefix::
+
+ sudo python setup.py install
+
+instead of the code using `--prefix` that's in there.
+
+Once you've set the prefix, simply build/install the plugin with::
+
+ make
+
+and run the tests with::
+
+ make test
+
+You should see output similar to::
+
+ maqroll[plugin]> make test
+ nosetests -s --with-ipdoctest --doctest-tests dtexample.py
+ ..
+ ----------------------------------------------------------------------
+ Ran 2 tests in 0.016s
+
+ OK
+
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/dtexample.py b/contrib/python/ipython/py2/IPython/testing/plugin/dtexample.py
index 081bf35571..5e02629bf7 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/dtexample.py
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/dtexample.py
@@ -1,158 +1,158 @@
-"""Simple example using doctests.
-
-This file just contains doctests both using plain python and IPython prompts.
-All tests should be loaded by nose.
-"""
-from __future__ import print_function
-
-def pyfunc():
- """Some pure python tests...
-
- >>> pyfunc()
- 'pyfunc'
-
- >>> import os
-
- >>> 2+3
- 5
-
- >>> for i in range(3):
- ... print(i, end=' ')
- ... print(i+1, end=' ')
- ...
- 0 1 1 2 2 3
- """
- return 'pyfunc'
-
-def ipfunc():
- """Some ipython tests...
-
- In [1]: import os
-
- In [3]: 2+3
- Out[3]: 5
-
- In [26]: for i in range(3):
- ....: print(i, end=' ')
- ....: print(i+1, end=' ')
- ....:
- 0 1 1 2 2 3
-
-
- Examples that access the operating system work:
-
- In [1]: !echo hello
- hello
-
- In [2]: !echo hello > /tmp/foo_iptest
-
- In [3]: !cat /tmp/foo_iptest
- hello
-
- In [4]: rm -f /tmp/foo_iptest
-
- It's OK to use '_' for the last result, but do NOT try to use IPython's
- numbered history of _NN outputs, since those won't exist under the
- doctest environment:
-
- In [7]: 'hi'
- Out[7]: 'hi'
-
- In [8]: print(repr(_))
- 'hi'
-
- In [7]: 3+4
- Out[7]: 7
-
- In [8]: _+3
- Out[8]: 10
-
- In [9]: ipfunc()
- Out[9]: 'ipfunc'
- """
- return 'ipfunc'
-
-
-def ranfunc():
- """A function with some random output.
-
- Normal examples are verified as usual:
- >>> 1+3
- 4
-
- But if you put '# random' in the output, it is ignored:
- >>> 1+3
- junk goes here... # random
-
- >>> 1+2
- again, anything goes #random
- if multiline, the random mark is only needed once.
-
- >>> 1+2
- You can also put the random marker at the end:
- # random
-
- >>> 1+2
- # random
- .. or at the beginning.
-
- More correct input is properly verified:
- >>> ranfunc()
- 'ranfunc'
- """
- return 'ranfunc'
-
-
-def random_all():
- """A function where we ignore the output of ALL examples.
-
- Examples:
-
- # all-random
-
- This mark tells the testing machinery that all subsequent examples should
- be treated as random (ignoring their output). They are still executed,
- so if a they raise an error, it will be detected as such, but their
- output is completely ignored.
-
- >>> 1+3
- junk goes here...
-
- >>> 1+3
- klasdfj;
-
- >>> 1+2
- again, anything goes
- blah...
- """
- pass
-
-def iprand():
- """Some ipython tests with random output.
-
- In [7]: 3+4
- Out[7]: 7
-
- In [8]: print('hello')
- world # random
-
- In [9]: iprand()
- Out[9]: 'iprand'
- """
- return 'iprand'
-
-def iprand_all():
- """Some ipython tests with fully random output.
-
- # all-random
-
- In [7]: 1
- Out[7]: 99
-
- In [8]: print('hello')
- world
-
- In [9]: iprand_all()
- Out[9]: 'junk'
- """
- return 'iprand_all'
+"""Simple example using doctests.
+
+This file just contains doctests both using plain python and IPython prompts.
+All tests should be loaded by nose.
+"""
+from __future__ import print_function
+
+def pyfunc():
+ """Some pure python tests...
+
+ >>> pyfunc()
+ 'pyfunc'
+
+ >>> import os
+
+ >>> 2+3
+ 5
+
+ >>> for i in range(3):
+ ... print(i, end=' ')
+ ... print(i+1, end=' ')
+ ...
+ 0 1 1 2 2 3
+ """
+ return 'pyfunc'
+
+def ipfunc():
+ """Some ipython tests...
+
+ In [1]: import os
+
+ In [3]: 2+3
+ Out[3]: 5
+
+ In [26]: for i in range(3):
+ ....: print(i, end=' ')
+ ....: print(i+1, end=' ')
+ ....:
+ 0 1 1 2 2 3
+
+
+ Examples that access the operating system work:
+
+ In [1]: !echo hello
+ hello
+
+ In [2]: !echo hello > /tmp/foo_iptest
+
+ In [3]: !cat /tmp/foo_iptest
+ hello
+
+ In [4]: rm -f /tmp/foo_iptest
+
+ It's OK to use '_' for the last result, but do NOT try to use IPython's
+ numbered history of _NN outputs, since those won't exist under the
+ doctest environment:
+
+ In [7]: 'hi'
+ Out[7]: 'hi'
+
+ In [8]: print(repr(_))
+ 'hi'
+
+ In [7]: 3+4
+ Out[7]: 7
+
+ In [8]: _+3
+ Out[8]: 10
+
+ In [9]: ipfunc()
+ Out[9]: 'ipfunc'
+ """
+ return 'ipfunc'
+
+
+def ranfunc():
+ """A function with some random output.
+
+ Normal examples are verified as usual:
+ >>> 1+3
+ 4
+
+ But if you put '# random' in the output, it is ignored:
+ >>> 1+3
+ junk goes here... # random
+
+ >>> 1+2
+ again, anything goes #random
+ if multiline, the random mark is only needed once.
+
+ >>> 1+2
+ You can also put the random marker at the end:
+ # random
+
+ >>> 1+2
+ # random
+ .. or at the beginning.
+
+ More correct input is properly verified:
+ >>> ranfunc()
+ 'ranfunc'
+ """
+ return 'ranfunc'
+
+
+def random_all():
+ """A function where we ignore the output of ALL examples.
+
+ Examples:
+
+ # all-random
+
+ This mark tells the testing machinery that all subsequent examples should
+ be treated as random (ignoring their output). They are still executed,
+ so if a they raise an error, it will be detected as such, but their
+ output is completely ignored.
+
+ >>> 1+3
+ junk goes here...
+
+ >>> 1+3
+ klasdfj;
+
+ >>> 1+2
+ again, anything goes
+ blah...
+ """
+ pass
+
+def iprand():
+ """Some ipython tests with random output.
+
+ In [7]: 3+4
+ Out[7]: 7
+
+ In [8]: print('hello')
+ world # random
+
+ In [9]: iprand()
+ Out[9]: 'iprand'
+ """
+ return 'iprand'
+
+def iprand_all():
+ """Some ipython tests with fully random output.
+
+ # all-random
+
+ In [7]: 1
+ Out[7]: 99
+
+ In [8]: print('hello')
+ world
+
+ In [9]: iprand_all()
+ Out[9]: 'junk'
+ """
+ return 'iprand_all'
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/ipdoctest.py b/contrib/python/ipython/py2/IPython/testing/plugin/ipdoctest.py
index 64e7e536f8..bc750e0efd 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/ipdoctest.py
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/ipdoctest.py
@@ -1,769 +1,769 @@
-"""Nose Plugin that supports IPython doctests.
-
-Limitations:
-
-- When generating examples for use as doctests, make sure that you have
- pretty-printing OFF. This can be done either by setting the
- ``PlainTextFormatter.pprint`` option in your configuration file to False, or
- by interactively disabling it with %Pprint. This is required so that IPython
- output matches that of normal Python, which is used by doctest for internal
- execution.
-
-- Do not rely on specific prompt numbers for results (such as using
- '_34==True', for example). For IPython tests run via an external process the
- prompt numbers may be different, and IPython tests run as normal python code
- won't even have these special _NN variables set at all.
-"""
-
-#-----------------------------------------------------------------------------
-# Module imports
-
-# From the standard library
-import doctest
-import inspect
-import logging
-import os
-import re
-import sys
-
+"""Nose Plugin that supports IPython doctests.
+
+Limitations:
+
+- When generating examples for use as doctests, make sure that you have
+ pretty-printing OFF. This can be done either by setting the
+ ``PlainTextFormatter.pprint`` option in your configuration file to False, or
+ by interactively disabling it with %Pprint. This is required so that IPython
+ output matches that of normal Python, which is used by doctest for internal
+ execution.
+
+- Do not rely on specific prompt numbers for results (such as using
+ '_34==True', for example). For IPython tests run via an external process the
+ prompt numbers may be different, and IPython tests run as normal python code
+ won't even have these special _NN variables set at all.
+"""
+
+#-----------------------------------------------------------------------------
+# Module imports
+
+# From the standard library
+import doctest
+import inspect
+import logging
+import os
+import re
+import sys
+
from testpath import modified_env
-from inspect import getmodule
-
-# We are overriding the default doctest runner, so we need to import a few
-# things from doctest directly
-from doctest import (REPORTING_FLAGS, REPORT_ONLY_FIRST_FAILURE,
- _unittest_reportflags, DocTestRunner,
- _extract_future_flags, pdb, _OutputRedirectingPdb,
- _exception_traceback,
- linecache)
-
-# Third-party modules
-
-from nose.plugins import doctests, Plugin
+from inspect import getmodule
+
+# We are overriding the default doctest runner, so we need to import a few
+# things from doctest directly
+from doctest import (REPORTING_FLAGS, REPORT_ONLY_FIRST_FAILURE,
+ _unittest_reportflags, DocTestRunner,
+ _extract_future_flags, pdb, _OutputRedirectingPdb,
+ _exception_traceback,
+ linecache)
+
+# Third-party modules
+
+from nose.plugins import doctests, Plugin
from nose.util import anyp, tolist
-
-# Our own imports
-from IPython.utils.py3compat import builtin_mod, PY3, getcwd
-
-if PY3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
-#-----------------------------------------------------------------------------
-# Module globals and other constants
-#-----------------------------------------------------------------------------
-
-log = logging.getLogger(__name__)
-
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-
-def is_extension_module(filename):
- """Return whether the given filename is an extension module.
-
- This simply checks that the extension is either .so or .pyd.
- """
- return os.path.splitext(filename)[1].lower() in ('.so','.pyd')
-
-
-class DocTestSkip(object):
- """Object wrapper for doctests to be skipped."""
-
- ds_skip = """Doctest to skip.
- >>> 1 #doctest: +SKIP
- """
-
- def __init__(self,obj):
- self.obj = obj
-
- def __getattribute__(self,key):
- if key == '__doc__':
- return DocTestSkip.ds_skip
- else:
- return getattr(object.__getattribute__(self,'obj'),key)
-
-# Modified version of the one in the stdlib, that fixes a python bug (doctests
-# not found in extension modules, http://bugs.python.org/issue3158)
-class DocTestFinder(doctest.DocTestFinder):
-
- def _from_module(self, module, object):
- """
- Return true if the given object is defined in the given
- module.
- """
- if module is None:
- return True
- elif inspect.isfunction(object):
- return module.__dict__ is object.__globals__
- elif inspect.isbuiltin(object):
- return module.__name__ == object.__module__
- elif inspect.isclass(object):
- return module.__name__ == object.__module__
- elif inspect.ismethod(object):
- # This one may be a bug in cython that fails to correctly set the
- # __module__ attribute of methods, but since the same error is easy
- # to make by extension code writers, having this safety in place
- # isn't such a bad idea
- return module.__name__ == object.__self__.__class__.__module__
- elif inspect.getmodule(object) is not None:
- return module is inspect.getmodule(object)
- elif hasattr(object, '__module__'):
- return module.__name__ == object.__module__
- elif isinstance(object, property):
- return True # [XX] no way not be sure.
- elif inspect.ismethoddescriptor(object):
- # Unbound PyQt signals reach this point in Python 3.4b3, and we want
- # to avoid throwing an error. See also http://bugs.python.org/issue3158
- return False
- else:
- raise ValueError("object must be a class or function, got %r" % object)
-
- def _find(self, tests, obj, name, module, source_lines, globs, seen):
- """
- Find tests for the given object and any contained objects, and
- add them to `tests`.
- """
- print('_find for:', obj, name, module) # dbg
- if hasattr(obj,"skip_doctest"):
- #print 'SKIPPING DOCTEST FOR:',obj # dbg
- obj = DocTestSkip(obj)
-
- doctest.DocTestFinder._find(self,tests, obj, name, module,
- source_lines, globs, seen)
-
- # Below we re-run pieces of the above method with manual modifications,
- # because the original code is buggy and fails to correctly identify
- # doctests in extension modules.
-
- # Local shorthands
+
+# Our own imports
+from IPython.utils.py3compat import builtin_mod, PY3, getcwd
+
+if PY3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+#-----------------------------------------------------------------------------
+# Module globals and other constants
+#-----------------------------------------------------------------------------
+
+log = logging.getLogger(__name__)
+
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+def is_extension_module(filename):
+ """Return whether the given filename is an extension module.
+
+ This simply checks that the extension is either .so or .pyd.
+ """
+ return os.path.splitext(filename)[1].lower() in ('.so','.pyd')
+
+
+class DocTestSkip(object):
+ """Object wrapper for doctests to be skipped."""
+
+ ds_skip = """Doctest to skip.
+ >>> 1 #doctest: +SKIP
+ """
+
+ def __init__(self,obj):
+ self.obj = obj
+
+ def __getattribute__(self,key):
+ if key == '__doc__':
+ return DocTestSkip.ds_skip
+ else:
+ return getattr(object.__getattribute__(self,'obj'),key)
+
+# Modified version of the one in the stdlib, that fixes a python bug (doctests
+# not found in extension modules, http://bugs.python.org/issue3158)
+class DocTestFinder(doctest.DocTestFinder):
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.__globals__
+ elif inspect.isbuiltin(object):
+ return module.__name__ == object.__module__
+ elif inspect.isclass(object):
+ return module.__name__ == object.__module__
+ elif inspect.ismethod(object):
+ # This one may be a bug in cython that fails to correctly set the
+ # __module__ attribute of methods, but since the same error is easy
+ # to make by extension code writers, having this safety in place
+ # isn't such a bad idea
+ return module.__name__ == object.__self__.__class__.__module__
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ elif inspect.ismethoddescriptor(object):
+ # Unbound PyQt signals reach this point in Python 3.4b3, and we want
+ # to avoid throwing an error. See also http://bugs.python.org/issue3158
+ return False
+ else:
+ raise ValueError("object must be a class or function, got %r" % object)
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+ print('_find for:', obj, name, module) # dbg
+ if hasattr(obj,"skip_doctest"):
+ #print 'SKIPPING DOCTEST FOR:',obj # dbg
+ obj = DocTestSkip(obj)
+
+ doctest.DocTestFinder._find(self,tests, obj, name, module,
+ source_lines, globs, seen)
+
+ # Below we re-run pieces of the above method with manual modifications,
+ # because the original code is buggy and fails to correctly identify
+ # doctests in extension modules.
+
+ # Local shorthands
from inspect import isroutine, isclass
-
- # Look for tests in a module's contained objects.
- if inspect.ismodule(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- valname1 = '%s.%s' % (name, valname)
- if ( (isroutine(val) or isclass(val))
- and self._from_module(module, val) ):
-
- self._find(tests, val, valname1, module, source_lines,
- globs, seen)
-
- # Look for tests in a class's contained objects.
- if inspect.isclass(obj) and self._recurse:
- #print 'RECURSE into class:',obj # dbg
- for valname, val in obj.__dict__.items():
- # Special handling for staticmethod/classmethod.
- if isinstance(val, staticmethod):
- val = getattr(obj, valname)
- if isinstance(val, classmethod):
- val = getattr(obj, valname).__func__
-
- # Recurse to methods, properties, and nested classes.
- if ((inspect.isfunction(val) or inspect.isclass(val) or
- inspect.ismethod(val) or
- isinstance(val, property)) and
- self._from_module(module, val)):
- valname = '%s.%s' % (name, valname)
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
-
-
-class IPDoctestOutputChecker(doctest.OutputChecker):
- """Second-chance checker with support for random tests.
-
- If the default comparison doesn't pass, this checker looks in the expected
- output string for flags that tell us to ignore the output.
- """
-
- random_re = re.compile(r'#\s*random\s+')
-
- def check_output(self, want, got, optionflags):
- """Check output, accepting special markers embedded in the output.
-
- If the output didn't pass the default validation but the special string
- '#random' is included, we accept it."""
-
- # Let the original tester verify first, in case people have valid tests
- # that happen to have a comment saying '#random' embedded in.
- ret = doctest.OutputChecker.check_output(self, want, got,
- optionflags)
- if not ret and self.random_re.search(want):
- #print >> sys.stderr, 'RANDOM OK:',want # dbg
- return True
-
- return ret
-
-
-class DocTestCase(doctests.DocTestCase):
- """Proxy for DocTestCase: provides an address() method that
- returns the correct address for the doctest case. Otherwise
- acts as a proxy to the test case. To provide hints for address(),
- an obj may also be passed -- this will be used as the test object
- for purposes of determining the test address, if it is provided.
- """
-
- # Note: this method was taken from numpy's nosetester module.
-
- # Subclass nose.plugins.doctests.DocTestCase to work around a bug in
- # its constructor that blocks non-default arguments from being passed
- # down into doctest.DocTestCase
-
- def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
- checker=None, obj=None, result_var='_'):
- self._result_var = result_var
- doctests.DocTestCase.__init__(self, test,
- optionflags=optionflags,
- setUp=setUp, tearDown=tearDown,
- checker=checker)
- # Now we must actually copy the original constructor from the stdlib
- # doctest class, because we can't call it directly and a bug in nose
- # means it never gets passed the right arguments.
-
- self._dt_optionflags = optionflags
- self._dt_checker = checker
- self._dt_test = test
- self._dt_test_globs_ori = test.globs
- self._dt_setUp = setUp
- self._dt_tearDown = tearDown
-
- # XXX - store this runner once in the object!
- runner = IPDocTestRunner(optionflags=optionflags,
- checker=checker, verbose=False)
- self._dt_runner = runner
-
-
- # Each doctest should remember the directory it was loaded from, so
- # things like %run work without too many contortions
- self._ori_dir = os.path.dirname(test.filename)
-
- # Modified runTest from the default stdlib
- def runTest(self):
- test = self._dt_test
- runner = self._dt_runner
-
- old = sys.stdout
- new = StringIO()
- optionflags = self._dt_optionflags
-
- if not (optionflags & REPORTING_FLAGS):
- # The option flags don't include any reporting flags,
- # so add the default reporting flags
- optionflags |= _unittest_reportflags
-
- try:
- # Save our current directory and switch out to the one where the
- # test was originally created, in case another doctest did a
- # directory change. We'll restore this in the finally clause.
- curdir = getcwd()
- #print 'runTest in dir:', self._ori_dir # dbg
- os.chdir(self._ori_dir)
-
- runner.DIVIDER = "-"*70
- failures, tries = runner.run(test,out=new.write,
- clear_globs=False)
- finally:
- sys.stdout = old
- os.chdir(curdir)
-
- if failures:
- raise self.failureException(self.format_failure(new.getvalue()))
-
- def setUp(self):
- """Modified test setup that syncs with ipython namespace"""
- #print "setUp test", self._dt_test.examples # dbg
- if isinstance(self._dt_test.examples[0], IPExample):
- # for IPython examples *only*, we swap the globals with the ipython
- # namespace, after updating it with the globals (which doctest
- # fills with the necessary info from the module being tested).
- self.user_ns_orig = {}
- self.user_ns_orig.update(_ip.user_ns)
- _ip.user_ns.update(self._dt_test.globs)
- # We must remove the _ key in the namespace, so that Python's
- # doctest code sets it naturally
- _ip.user_ns.pop('_', None)
- _ip.user_ns['__builtins__'] = builtin_mod
- self._dt_test.globs = _ip.user_ns
-
- super(DocTestCase, self).setUp()
-
- def tearDown(self):
-
- # Undo the test.globs reassignment we made, so that the parent class
- # teardown doesn't destroy the ipython namespace
- if isinstance(self._dt_test.examples[0], IPExample):
- self._dt_test.globs = self._dt_test_globs_ori
- _ip.user_ns.clear()
- _ip.user_ns.update(self.user_ns_orig)
-
- # XXX - fperez: I am not sure if this is truly a bug in nose 0.11, but
- # it does look like one to me: its tearDown method tries to run
- #
- # delattr(builtin_mod, self._result_var)
- #
- # without checking that the attribute really is there; it implicitly
- # assumes it should have been set via displayhook. But if the
- # displayhook was never called, this doesn't necessarily happen. I
- # haven't been able to find a little self-contained example outside of
- # ipython that would show the problem so I can report it to the nose
- # team, but it does happen a lot in our code.
- #
- # So here, we just protect as narrowly as possible by trapping an
- # attribute error whose message would be the name of self._result_var,
- # and letting any other error propagate.
- try:
- super(DocTestCase, self).tearDown()
- except AttributeError as exc:
- if exc.args[0] != self._result_var:
- raise
-
-
-# A simple subclassing of the original with a different class name, so we can
-# distinguish and treat differently IPython examples from pure python ones.
-class IPExample(doctest.Example): pass
-
-
-class IPExternalExample(doctest.Example):
- """Doctest examples to be run in an external process."""
-
- def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
- options=None):
- # Parent constructor
- doctest.Example.__init__(self,source,want,exc_msg,lineno,indent,options)
-
- # An EXTRA newline is needed to prevent pexpect hangs
- self.source += '\n'
-
-
-class IPDocTestParser(doctest.DocTestParser):
- """
- A class used to parse strings containing doctest examples.
-
- Note: This is a version modified to properly recognize IPython input and
- convert any IPython examples into valid Python ones.
- """
- # This regular expression is used to find doctest examples in a
- # string. It defines three groups: `source` is the source code
- # (including leading indentation and prompts); `indent` is the
- # indentation of the first (PS1) line of the source code; and
- # `want` is the expected output (including leading indentation).
-
- # Classic Python prompts or default IPython ones
- _PS1_PY = r'>>>'
- _PS2_PY = r'\.\.\.'
-
- _PS1_IP = r'In\ \[\d+\]:'
- _PS2_IP = r'\ \ \ \.\.\.+:'
-
- _RE_TPL = r'''
- # Source consists of a PS1 line followed by zero or more PS2 lines.
- (?P<source>
- (?:^(?P<indent> [ ]*) (?P<ps1> %s) .*) # PS1 line
- (?:\n [ ]* (?P<ps2> %s) .*)*) # PS2 lines
- \n? # a newline
- # Want consists of any non-blank lines that do not start with PS1.
- (?P<want> (?:(?![ ]*$) # Not a blank line
- (?![ ]*%s) # Not a line starting with PS1
- (?![ ]*%s) # Not a line starting with PS2
- .*$\n? # But any other line
- )*)
- '''
-
- _EXAMPLE_RE_PY = re.compile( _RE_TPL % (_PS1_PY,_PS2_PY,_PS1_PY,_PS2_PY),
- re.MULTILINE | re.VERBOSE)
-
- _EXAMPLE_RE_IP = re.compile( _RE_TPL % (_PS1_IP,_PS2_IP,_PS1_IP,_PS2_IP),
- re.MULTILINE | re.VERBOSE)
-
- # Mark a test as being fully random. In this case, we simply append the
- # random marker ('#random') to each individual example's output. This way
- # we don't need to modify any other code.
- _RANDOM_TEST = re.compile(r'#\s*all-random\s+')
-
- # Mark tests to be executed in an external process - currently unsupported.
- _EXTERNAL_IP = re.compile(r'#\s*ipdoctest:\s*EXTERNAL')
-
- def ip2py(self,source):
- """Convert input IPython source into valid Python."""
- block = _ip.input_transformer_manager.transform_cell(source)
- if len(block.splitlines()) == 1:
- return _ip.prefilter(block)
- else:
- return block
-
- def parse(self, string, name='<string>'):
- """
- Divide the given string into examples and intervening text,
- and return them as a list of alternating Examples and strings.
- Line numbers for the Examples are 0-based. The optional
- argument `name` is a name identifying this string, and is only
- used for error messages.
- """
-
- #print 'Parse string:\n',string # dbg
-
- string = string.expandtabs()
- # If all lines begin with the same indentation, then strip it.
- min_indent = self._min_indent(string)
- if min_indent > 0:
- string = '\n'.join([l[min_indent:] for l in string.split('\n')])
-
- output = []
- charno, lineno = 0, 0
-
- # We make 'all random' tests by adding the '# random' mark to every
- # block of output in the test.
- if self._RANDOM_TEST.search(string):
- random_marker = '\n# random'
- else:
- random_marker = ''
-
- # Whether to convert the input from ipython to python syntax
- ip2py = False
- # Find all doctest examples in the string. First, try them as Python
- # examples, then as IPython ones
- terms = list(self._EXAMPLE_RE_PY.finditer(string))
- if terms:
- # Normal Python example
- #print '-'*70 # dbg
- #print 'PyExample, Source:\n',string # dbg
- #print '-'*70 # dbg
- Example = doctest.Example
- else:
- # It's an ipython example. Note that IPExamples are run
- # in-process, so their syntax must be turned into valid python.
- # IPExternalExamples are run out-of-process (via pexpect) so they
- # don't need any filtering (a real ipython will be executing them).
- terms = list(self._EXAMPLE_RE_IP.finditer(string))
- if self._EXTERNAL_IP.search(string):
- #print '-'*70 # dbg
- #print 'IPExternalExample, Source:\n',string # dbg
- #print '-'*70 # dbg
- Example = IPExternalExample
- else:
- #print '-'*70 # dbg
- #print 'IPExample, Source:\n',string # dbg
- #print '-'*70 # dbg
- Example = IPExample
- ip2py = True
-
- for m in terms:
- # Add the pre-example text to `output`.
- output.append(string[charno:m.start()])
- # Update lineno (lines before this example)
- lineno += string.count('\n', charno, m.start())
- # Extract info from the regexp match.
- (source, options, want, exc_msg) = \
- self._parse_example(m, name, lineno,ip2py)
-
- # Append the random-output marker (it defaults to empty in most
- # cases, it's only non-empty for 'all-random' tests):
- want += random_marker
-
- if Example is IPExternalExample:
- options[doctest.NORMALIZE_WHITESPACE] = True
- want += '\n'
-
- # Create an Example, and add it to the list.
- if not self._IS_BLANK_OR_COMMENT(source):
- output.append(Example(source, want, exc_msg,
- lineno=lineno,
- indent=min_indent+len(m.group('indent')),
- options=options))
- # Update lineno (lines inside this example)
- lineno += string.count('\n', m.start(), m.end())
- # Update charno.
- charno = m.end()
- # Add any remaining post-example text to `output`.
- output.append(string[charno:])
- return output
-
- def _parse_example(self, m, name, lineno,ip2py=False):
- """
- Given a regular expression match from `_EXAMPLE_RE` (`m`),
- return a pair `(source, want)`, where `source` is the matched
- example's source code (with prompts and indentation stripped);
- and `want` is the example's expected output (with indentation
- stripped).
-
- `name` is the string's name, and `lineno` is the line number
- where the example starts; both are used for error messages.
-
- Optional:
- `ip2py`: if true, filter the input via IPython to convert the syntax
- into valid python.
- """
-
- # Get the example's indentation level.
- indent = len(m.group('indent'))
-
- # Divide source into lines; check that they're properly
- # indented; and then strip their indentation & prompts.
- source_lines = m.group('source').split('\n')
-
- # We're using variable-length input prompts
- ps1 = m.group('ps1')
- ps2 = m.group('ps2')
- ps1_len = len(ps1)
-
- self._check_prompt_blank(source_lines, indent, name, lineno,ps1_len)
- if ps2:
- self._check_prefix(source_lines[1:], ' '*indent + ps2, name, lineno)
-
- source = '\n'.join([sl[indent+ps1_len+1:] for sl in source_lines])
-
- if ip2py:
- # Convert source input from IPython into valid Python syntax
- source = self.ip2py(source)
-
- # Divide want into lines; check that it's properly indented; and
- # then strip the indentation. Spaces before the last newline should
- # be preserved, so plain rstrip() isn't good enough.
- want = m.group('want')
- want_lines = want.split('\n')
- if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
- del want_lines[-1] # forget final newline & spaces after it
- self._check_prefix(want_lines, ' '*indent, name,
- lineno + len(source_lines))
-
- # Remove ipython output prompt that might be present in the first line
- want_lines[0] = re.sub(r'Out\[\d+\]: \s*?\n?','',want_lines[0])
-
- want = '\n'.join([wl[indent:] for wl in want_lines])
-
- # If `want` contains a traceback message, then extract it.
- m = self._EXCEPTION_RE.match(want)
- if m:
- exc_msg = m.group('msg')
- else:
- exc_msg = None
-
- # Extract options from the source.
- options = self._find_options(source, name, lineno)
-
- return source, options, want, exc_msg
-
- def _check_prompt_blank(self, lines, indent, name, lineno, ps1_len):
- """
- Given the lines of a source string (including prompts and
- leading indentation), check to make sure that every prompt is
- followed by a space character. If any line is not followed by
- a space character, then raise ValueError.
-
- Note: IPython-modified version which takes the input prompt length as a
- parameter, so that prompts of variable length can be dealt with.
- """
- space_idx = indent+ps1_len
- min_len = space_idx+1
- for i, line in enumerate(lines):
- if len(line) >= min_len and line[space_idx] != ' ':
- raise ValueError('line %r of the docstring for %s '
- 'lacks blank after %s: %r' %
- (lineno+i+1, name,
- line[indent:space_idx], line))
-
-
-SKIP = doctest.register_optionflag('SKIP')
-
-
-class IPDocTestRunner(doctest.DocTestRunner,object):
- """Test runner that synchronizes the IPython namespace with test globals.
- """
-
- def run(self, test, compileflags=None, out=None, clear_globs=True):
-
- # Hack: ipython needs access to the execution context of the example,
- # so that it can propagate user variables loaded by %run into
- # test.globs. We put them here into our modified %run as a function
- # attribute. Our new %run will then only make the namespace update
- # when called (rather than unconconditionally updating test.globs here
- # for all examples, most of which won't be calling %run anyway).
- #_ip._ipdoctest_test_globs = test.globs
- #_ip._ipdoctest_test_filename = test.filename
-
- test.globs.update(_ip.user_ns)
-
+
+ # Look for tests in a module's contained objects.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ valname1 = '%s.%s' % (name, valname)
+ if ( (isroutine(val) or isclass(val))
+ and self._from_module(module, val) ):
+
+ self._find(tests, val, valname1, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if inspect.isclass(obj) and self._recurse:
+ #print 'RECURSE into class:',obj # dbg
+ for valname, val in obj.__dict__.items():
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).__func__
+
+ # Recurse to methods, properties, and nested classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val) or
+ inspect.ismethod(val) or
+ isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+
+class IPDoctestOutputChecker(doctest.OutputChecker):
+ """Second-chance checker with support for random tests.
+
+ If the default comparison doesn't pass, this checker looks in the expected
+ output string for flags that tell us to ignore the output.
+ """
+
+ random_re = re.compile(r'#\s*random\s+')
+
+ def check_output(self, want, got, optionflags):
+ """Check output, accepting special markers embedded in the output.
+
+ If the output didn't pass the default validation but the special string
+ '#random' is included, we accept it."""
+
+ # Let the original tester verify first, in case people have valid tests
+ # that happen to have a comment saying '#random' embedded in.
+ ret = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ if not ret and self.random_re.search(want):
+ #print >> sys.stderr, 'RANDOM OK:',want # dbg
+ return True
+
+ return ret
+
+
+class DocTestCase(doctests.DocTestCase):
+ """Proxy for DocTestCase: provides an address() method that
+ returns the correct address for the doctest case. Otherwise
+ acts as a proxy to the test case. To provide hints for address(),
+ an obj may also be passed -- this will be used as the test object
+ for purposes of determining the test address, if it is provided.
+ """
+
+ # Note: this method was taken from numpy's nosetester module.
+
+ # Subclass nose.plugins.doctests.DocTestCase to work around a bug in
+ # its constructor that blocks non-default arguments from being passed
+ # down into doctest.DocTestCase
+
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, obj=None, result_var='_'):
+ self._result_var = result_var
+ doctests.DocTestCase.__init__(self, test,
+ optionflags=optionflags,
+ setUp=setUp, tearDown=tearDown,
+ checker=checker)
+ # Now we must actually copy the original constructor from the stdlib
+ # doctest class, because we can't call it directly and a bug in nose
+ # means it never gets passed the right arguments.
+
+ self._dt_optionflags = optionflags
+ self._dt_checker = checker
+ self._dt_test = test
+ self._dt_test_globs_ori = test.globs
+ self._dt_setUp = setUp
+ self._dt_tearDown = tearDown
+
+ # XXX - store this runner once in the object!
+ runner = IPDocTestRunner(optionflags=optionflags,
+ checker=checker, verbose=False)
+ self._dt_runner = runner
+
+
+ # Each doctest should remember the directory it was loaded from, so
+ # things like %run work without too many contortions
+ self._ori_dir = os.path.dirname(test.filename)
+
+ # Modified runTest from the default stdlib
+ def runTest(self):
+ test = self._dt_test
+ runner = self._dt_runner
+
+ old = sys.stdout
+ new = StringIO()
+ optionflags = self._dt_optionflags
+
+ if not (optionflags & REPORTING_FLAGS):
+ # The option flags don't include any reporting flags,
+ # so add the default reporting flags
+ optionflags |= _unittest_reportflags
+
+ try:
+ # Save our current directory and switch out to the one where the
+ # test was originally created, in case another doctest did a
+ # directory change. We'll restore this in the finally clause.
+ curdir = getcwd()
+ #print 'runTest in dir:', self._ori_dir # dbg
+ os.chdir(self._ori_dir)
+
+ runner.DIVIDER = "-"*70
+ failures, tries = runner.run(test,out=new.write,
+ clear_globs=False)
+ finally:
+ sys.stdout = old
+ os.chdir(curdir)
+
+ if failures:
+ raise self.failureException(self.format_failure(new.getvalue()))
+
+ def setUp(self):
+ """Modified test setup that syncs with ipython namespace"""
+ #print "setUp test", self._dt_test.examples # dbg
+ if isinstance(self._dt_test.examples[0], IPExample):
+ # for IPython examples *only*, we swap the globals with the ipython
+ # namespace, after updating it with the globals (which doctest
+ # fills with the necessary info from the module being tested).
+ self.user_ns_orig = {}
+ self.user_ns_orig.update(_ip.user_ns)
+ _ip.user_ns.update(self._dt_test.globs)
+ # We must remove the _ key in the namespace, so that Python's
+ # doctest code sets it naturally
+ _ip.user_ns.pop('_', None)
+ _ip.user_ns['__builtins__'] = builtin_mod
+ self._dt_test.globs = _ip.user_ns
+
+ super(DocTestCase, self).setUp()
+
+ def tearDown(self):
+
+ # Undo the test.globs reassignment we made, so that the parent class
+ # teardown doesn't destroy the ipython namespace
+ if isinstance(self._dt_test.examples[0], IPExample):
+ self._dt_test.globs = self._dt_test_globs_ori
+ _ip.user_ns.clear()
+ _ip.user_ns.update(self.user_ns_orig)
+
+ # XXX - fperez: I am not sure if this is truly a bug in nose 0.11, but
+ # it does look like one to me: its tearDown method tries to run
+ #
+ # delattr(builtin_mod, self._result_var)
+ #
+ # without checking that the attribute really is there; it implicitly
+ # assumes it should have been set via displayhook. But if the
+ # displayhook was never called, this doesn't necessarily happen. I
+ # haven't been able to find a little self-contained example outside of
+ # ipython that would show the problem so I can report it to the nose
+ # team, but it does happen a lot in our code.
+ #
+ # So here, we just protect as narrowly as possible by trapping an
+ # attribute error whose message would be the name of self._result_var,
+ # and letting any other error propagate.
+ try:
+ super(DocTestCase, self).tearDown()
+ except AttributeError as exc:
+ if exc.args[0] != self._result_var:
+ raise
+
+
+# A simple subclassing of the original with a different class name, so we can
+# distinguish and treat differently IPython examples from pure python ones.
+class IPExample(doctest.Example): pass
+
+
+class IPExternalExample(doctest.Example):
+ """Doctest examples to be run in an external process."""
+
+ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
+ options=None):
+ # Parent constructor
+ doctest.Example.__init__(self,source,want,exc_msg,lineno,indent,options)
+
+ # An EXTRA newline is needed to prevent pexpect hangs
+ self.source += '\n'
+
+
+class IPDocTestParser(doctest.DocTestParser):
+ """
+ A class used to parse strings containing doctest examples.
+
+ Note: This is a version modified to properly recognize IPython input and
+ convert any IPython examples into valid Python ones.
+ """
+ # This regular expression is used to find doctest examples in a
+ # string. It defines three groups: `source` is the source code
+ # (including leading indentation and prompts); `indent` is the
+ # indentation of the first (PS1) line of the source code; and
+ # `want` is the expected output (including leading indentation).
+
+ # Classic Python prompts or default IPython ones
+ _PS1_PY = r'>>>'
+ _PS2_PY = r'\.\.\.'
+
+ _PS1_IP = r'In\ \[\d+\]:'
+ _PS2_IP = r'\ \ \ \.\.\.+:'
+
+ _RE_TPL = r'''
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?P<source>
+ (?:^(?P<indent> [ ]*) (?P<ps1> %s) .*) # PS1 line
+ (?:\n [ ]* (?P<ps2> %s) .*)*) # PS2 lines
+ \n? # a newline
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*%s) # Not a line starting with PS1
+ (?![ ]*%s) # Not a line starting with PS2
+ .*$\n? # But any other line
+ )*)
+ '''
+
+ _EXAMPLE_RE_PY = re.compile( _RE_TPL % (_PS1_PY,_PS2_PY,_PS1_PY,_PS2_PY),
+ re.MULTILINE | re.VERBOSE)
+
+ _EXAMPLE_RE_IP = re.compile( _RE_TPL % (_PS1_IP,_PS2_IP,_PS1_IP,_PS2_IP),
+ re.MULTILINE | re.VERBOSE)
+
+ # Mark a test as being fully random. In this case, we simply append the
+ # random marker ('#random') to each individual example's output. This way
+ # we don't need to modify any other code.
+ _RANDOM_TEST = re.compile(r'#\s*all-random\s+')
+
+ # Mark tests to be executed in an external process - currently unsupported.
+ _EXTERNAL_IP = re.compile(r'#\s*ipdoctest:\s*EXTERNAL')
+
+ def ip2py(self,source):
+ """Convert input IPython source into valid Python."""
+ block = _ip.input_transformer_manager.transform_cell(source)
+ if len(block.splitlines()) == 1:
+ return _ip.prefilter(block)
+ else:
+ return block
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+
+ #print 'Parse string:\n',string # dbg
+
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+
+ # We make 'all random' tests by adding the '# random' mark to every
+ # block of output in the test.
+ if self._RANDOM_TEST.search(string):
+ random_marker = '\n# random'
+ else:
+ random_marker = ''
+
+ # Whether to convert the input from ipython to python syntax
+ ip2py = False
+ # Find all doctest examples in the string. First, try them as Python
+ # examples, then as IPython ones
+ terms = list(self._EXAMPLE_RE_PY.finditer(string))
+ if terms:
+ # Normal Python example
+ #print '-'*70 # dbg
+ #print 'PyExample, Source:\n',string # dbg
+ #print '-'*70 # dbg
+ Example = doctest.Example
+ else:
+ # It's an ipython example. Note that IPExamples are run
+ # in-process, so their syntax must be turned into valid python.
+ # IPExternalExamples are run out-of-process (via pexpect) so they
+ # don't need any filtering (a real ipython will be executing them).
+ terms = list(self._EXAMPLE_RE_IP.finditer(string))
+ if self._EXTERNAL_IP.search(string):
+ #print '-'*70 # dbg
+ #print 'IPExternalExample, Source:\n',string # dbg
+ #print '-'*70 # dbg
+ Example = IPExternalExample
+ else:
+ #print '-'*70 # dbg
+ #print 'IPExample, Source:\n',string # dbg
+ #print '-'*70 # dbg
+ Example = IPExample
+ ip2py = True
+
+ for m in terms:
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno,ip2py)
+
+ # Append the random-output marker (it defaults to empty in most
+ # cases, it's only non-empty for 'all-random' tests):
+ want += random_marker
+
+ if Example is IPExternalExample:
+ options[doctest.NORMALIZE_WHITESPACE] = True
+ want += '\n'
+
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ output.append(Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent')),
+ options=options))
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+ def _parse_example(self, m, name, lineno,ip2py=False):
+ """
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+
+ Optional:
+ `ip2py`: if true, filter the input via IPython to convert the syntax
+ into valid python.
+ """
+
+ # Get the example's indentation level.
+ indent = len(m.group('indent'))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('source').split('\n')
+
+ # We're using variable-length input prompts
+ ps1 = m.group('ps1')
+ ps2 = m.group('ps2')
+ ps1_len = len(ps1)
+
+ self._check_prompt_blank(source_lines, indent, name, lineno,ps1_len)
+ if ps2:
+ self._check_prefix(source_lines[1:], ' '*indent + ps2, name, lineno)
+
+ source = '\n'.join([sl[indent+ps1_len+1:] for sl in source_lines])
+
+ if ip2py:
+ # Convert source input from IPython into valid Python syntax
+ source = self.ip2py(source)
+
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+
+ # Remove ipython output prompt that might be present in the first line
+ want_lines[0] = re.sub(r'Out\[\d+\]: \s*?\n?','',want_lines[0])
+
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+ def _check_prompt_blank(self, lines, indent, name, lineno, ps1_len):
+ """
+ Given the lines of a source string (including prompts and
+ leading indentation), check to make sure that every prompt is
+ followed by a space character. If any line is not followed by
+ a space character, then raise ValueError.
+
+ Note: IPython-modified version which takes the input prompt length as a
+ parameter, so that prompts of variable length can be dealt with.
+ """
+ space_idx = indent+ps1_len
+ min_len = space_idx+1
+ for i, line in enumerate(lines):
+ if len(line) >= min_len and line[space_idx] != ' ':
+ raise ValueError('line %r of the docstring for %s '
+ 'lacks blank after %s: %r' %
+ (lineno+i+1, name,
+ line[indent:space_idx], line))
+
+
+SKIP = doctest.register_optionflag('SKIP')
+
+
+class IPDocTestRunner(doctest.DocTestRunner,object):
+ """Test runner that synchronizes the IPython namespace with test globals.
+ """
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+
+ # Hack: ipython needs access to the execution context of the example,
+ # so that it can propagate user variables loaded by %run into
+ # test.globs. We put them here into our modified %run as a function
+ # attribute. Our new %run will then only make the namespace update
+ # when called (rather than unconconditionally updating test.globs here
+ # for all examples, most of which won't be calling %run anyway).
+ #_ip._ipdoctest_test_globs = test.globs
+ #_ip._ipdoctest_test_filename = test.filename
+
+ test.globs.update(_ip.user_ns)
+
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
return super(IPDocTestRunner,self).run(test,
compileflags,out,clear_globs)
-
-
-class DocFileCase(doctest.DocFileCase):
- """Overrides to provide filename
- """
- def address(self):
- return (self._dt_test.filename, None, None)
-
-
-class ExtensionDoctest(doctests.Doctest):
- """Nose Plugin that supports doctests in extension modules.
- """
- name = 'extdoctest' # call nosetests with --with-extdoctest
- enabled = True
-
- def options(self, parser, env=os.environ):
- Plugin.options(self, parser, env)
- parser.add_option('--doctest-tests', action='store_true',
- dest='doctest_tests',
- default=env.get('NOSE_DOCTEST_TESTS',True),
- help="Also look for doctests in test modules. "
- "Note that classes, methods and functions should "
- "have either doctests or non-doctest tests, "
- "not both. [NOSE_DOCTEST_TESTS]")
- parser.add_option('--doctest-extension', action="append",
- dest="doctestExtension",
- help="Also look for doctests in files with "
- "this extension [NOSE_DOCTEST_EXTENSION]")
- # Set the default as a list, if given in env; otherwise
- # an additional value set on the command line will cause
- # an error.
- env_setting = env.get('NOSE_DOCTEST_EXTENSION')
- if env_setting is not None:
- parser.set_defaults(doctestExtension=tolist(env_setting))
-
-
- def configure(self, options, config):
- Plugin.configure(self, options, config)
- # Pull standard doctest plugin out of config; we will do doctesting
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != 'doctest']
- self.doctest_tests = options.doctest_tests
- self.extension = tolist(options.doctestExtension)
-
- self.parser = doctest.DocTestParser()
- self.finder = DocTestFinder()
- self.checker = IPDoctestOutputChecker()
- self.globs = None
- self.extraglobs = None
-
-
- def loadTestsFromExtensionModule(self,filename):
- bpath,mod = os.path.split(filename)
- modname = os.path.splitext(mod)[0]
- try:
- sys.path.append(bpath)
- module = __import__(modname)
- tests = list(self.loadTestsFromModule(module))
- finally:
- sys.path.pop()
- return tests
-
- # NOTE: the method below is almost a copy of the original one in nose, with
- # a few modifications to control output checking.
-
- def loadTestsFromModule(self, module):
- #print '*** ipdoctest - lTM',module # dbg
-
- if not self.matches(module.__name__):
- log.debug("Doctest doesn't want module %s", module)
- return
-
- tests = self.finder.find(module,globs=self.globs,
- extraglobs=self.extraglobs)
- if not tests:
- return
-
- # always use whitespace and ellipsis options
- optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
-
- tests.sort()
- module_file = module.__file__
- if module_file[-4:] in ('.pyc', '.pyo'):
- module_file = module_file[:-1]
- for test in tests:
- if not test.examples:
- continue
- if not test.filename:
- test.filename = module_file
-
- yield DocTestCase(test,
- optionflags=optionflags,
- checker=self.checker)
-
-
- def loadTestsFromFile(self, filename):
- #print "ipdoctest - from file", filename # dbg
- if is_extension_module(filename):
- for t in self.loadTestsFromExtensionModule(filename):
- yield t
- else:
- if self.extension and anyp(filename.endswith, self.extension):
- name = os.path.basename(filename)
- dh = open(filename)
- try:
- doc = dh.read()
- finally:
- dh.close()
- test = self.parser.get_doctest(
- doc, globs={'__file__': filename}, name=name,
- filename=filename, lineno=0)
- if test.examples:
- #print 'FileCase:',test.examples # dbg
- yield DocFileCase(test)
- else:
- yield False # no tests to load
-
-
-class IPythonDoctest(ExtensionDoctest):
- """Nose Plugin that supports doctests in extension modules.
- """
- name = 'ipdoctest' # call nosetests with --with-ipdoctest
- enabled = True
-
- def makeTest(self, obj, parent):
- """Look for doctests in the given object, which will be a
- function, method or class.
- """
- #print 'Plugin analyzing:', obj, parent # dbg
- # always use whitespace and ellipsis options
- optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
-
- doctests = self.finder.find(obj, module=getmodule(parent))
- if doctests:
- for test in doctests:
- if len(test.examples) == 0:
- continue
-
- yield DocTestCase(test, obj=obj,
- optionflags=optionflags,
- checker=self.checker)
-
- def options(self, parser, env=os.environ):
- #print "Options for nose plugin:", self.name # dbg
- Plugin.options(self, parser, env)
- parser.add_option('--ipdoctest-tests', action='store_true',
- dest='ipdoctest_tests',
- default=env.get('NOSE_IPDOCTEST_TESTS',True),
- help="Also look for doctests in test modules. "
- "Note that classes, methods and functions should "
- "have either doctests or non-doctest tests, "
- "not both. [NOSE_IPDOCTEST_TESTS]")
- parser.add_option('--ipdoctest-extension', action="append",
- dest="ipdoctest_extension",
- help="Also look for doctests in files with "
- "this extension [NOSE_IPDOCTEST_EXTENSION]")
- # Set the default as a list, if given in env; otherwise
- # an additional value set on the command line will cause
- # an error.
- env_setting = env.get('NOSE_IPDOCTEST_EXTENSION')
- if env_setting is not None:
- parser.set_defaults(ipdoctest_extension=tolist(env_setting))
-
- def configure(self, options, config):
- #print "Configuring nose plugin:", self.name # dbg
- Plugin.configure(self, options, config)
- # Pull standard doctest plugin out of config; we will do doctesting
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != 'doctest']
- self.doctest_tests = options.ipdoctest_tests
- self.extension = tolist(options.ipdoctest_extension)
-
- self.parser = IPDocTestParser()
- self.finder = DocTestFinder(parser=self.parser)
- self.checker = IPDoctestOutputChecker()
- self.globs = None
- self.extraglobs = None
+
+
+class DocFileCase(doctest.DocFileCase):
+ """Overrides to provide filename
+ """
+ def address(self):
+ return (self._dt_test.filename, None, None)
+
+
+class ExtensionDoctest(doctests.Doctest):
+ """Nose Plugin that supports doctests in extension modules.
+ """
+ name = 'extdoctest' # call nosetests with --with-extdoctest
+ enabled = True
+
+ def options(self, parser, env=os.environ):
+ Plugin.options(self, parser, env)
+ parser.add_option('--doctest-tests', action='store_true',
+ dest='doctest_tests',
+ default=env.get('NOSE_DOCTEST_TESTS',True),
+ help="Also look for doctests in test modules. "
+ "Note that classes, methods and functions should "
+ "have either doctests or non-doctest tests, "
+ "not both. [NOSE_DOCTEST_TESTS]")
+ parser.add_option('--doctest-extension', action="append",
+ dest="doctestExtension",
+ help="Also look for doctests in files with "
+ "this extension [NOSE_DOCTEST_EXTENSION]")
+ # Set the default as a list, if given in env; otherwise
+ # an additional value set on the command line will cause
+ # an error.
+ env_setting = env.get('NOSE_DOCTEST_EXTENSION')
+ if env_setting is not None:
+ parser.set_defaults(doctestExtension=tolist(env_setting))
+
+
+ def configure(self, options, config):
+ Plugin.configure(self, options, config)
+ # Pull standard doctest plugin out of config; we will do doctesting
+ config.plugins.plugins = [p for p in config.plugins.plugins
+ if p.name != 'doctest']
+ self.doctest_tests = options.doctest_tests
+ self.extension = tolist(options.doctestExtension)
+
+ self.parser = doctest.DocTestParser()
+ self.finder = DocTestFinder()
+ self.checker = IPDoctestOutputChecker()
+ self.globs = None
+ self.extraglobs = None
+
+
+ def loadTestsFromExtensionModule(self,filename):
+ bpath,mod = os.path.split(filename)
+ modname = os.path.splitext(mod)[0]
+ try:
+ sys.path.append(bpath)
+ module = __import__(modname)
+ tests = list(self.loadTestsFromModule(module))
+ finally:
+ sys.path.pop()
+ return tests
+
+ # NOTE: the method below is almost a copy of the original one in nose, with
+ # a few modifications to control output checking.
+
+ def loadTestsFromModule(self, module):
+ #print '*** ipdoctest - lTM',module # dbg
+
+ if not self.matches(module.__name__):
+ log.debug("Doctest doesn't want module %s", module)
+ return
+
+ tests = self.finder.find(module,globs=self.globs,
+ extraglobs=self.extraglobs)
+ if not tests:
+ return
+
+ # always use whitespace and ellipsis options
+ optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
+
+ tests.sort()
+ module_file = module.__file__
+ if module_file[-4:] in ('.pyc', '.pyo'):
+ module_file = module_file[:-1]
+ for test in tests:
+ if not test.examples:
+ continue
+ if not test.filename:
+ test.filename = module_file
+
+ yield DocTestCase(test,
+ optionflags=optionflags,
+ checker=self.checker)
+
+
+ def loadTestsFromFile(self, filename):
+ #print "ipdoctest - from file", filename # dbg
+ if is_extension_module(filename):
+ for t in self.loadTestsFromExtensionModule(filename):
+ yield t
+ else:
+ if self.extension and anyp(filename.endswith, self.extension):
+ name = os.path.basename(filename)
+ dh = open(filename)
+ try:
+ doc = dh.read()
+ finally:
+ dh.close()
+ test = self.parser.get_doctest(
+ doc, globs={'__file__': filename}, name=name,
+ filename=filename, lineno=0)
+ if test.examples:
+ #print 'FileCase:',test.examples # dbg
+ yield DocFileCase(test)
+ else:
+ yield False # no tests to load
+
+
+class IPythonDoctest(ExtensionDoctest):
+ """Nose Plugin that supports doctests in extension modules.
+ """
+ name = 'ipdoctest' # call nosetests with --with-ipdoctest
+ enabled = True
+
+ def makeTest(self, obj, parent):
+ """Look for doctests in the given object, which will be a
+ function, method or class.
+ """
+ #print 'Plugin analyzing:', obj, parent # dbg
+ # always use whitespace and ellipsis options
+ optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
+
+ doctests = self.finder.find(obj, module=getmodule(parent))
+ if doctests:
+ for test in doctests:
+ if len(test.examples) == 0:
+ continue
+
+ yield DocTestCase(test, obj=obj,
+ optionflags=optionflags,
+ checker=self.checker)
+
+ def options(self, parser, env=os.environ):
+ #print "Options for nose plugin:", self.name # dbg
+ Plugin.options(self, parser, env)
+ parser.add_option('--ipdoctest-tests', action='store_true',
+ dest='ipdoctest_tests',
+ default=env.get('NOSE_IPDOCTEST_TESTS',True),
+ help="Also look for doctests in test modules. "
+ "Note that classes, methods and functions should "
+ "have either doctests or non-doctest tests, "
+ "not both. [NOSE_IPDOCTEST_TESTS]")
+ parser.add_option('--ipdoctest-extension', action="append",
+ dest="ipdoctest_extension",
+ help="Also look for doctests in files with "
+ "this extension [NOSE_IPDOCTEST_EXTENSION]")
+ # Set the default as a list, if given in env; otherwise
+ # an additional value set on the command line will cause
+ # an error.
+ env_setting = env.get('NOSE_IPDOCTEST_EXTENSION')
+ if env_setting is not None:
+ parser.set_defaults(ipdoctest_extension=tolist(env_setting))
+
+ def configure(self, options, config):
+ #print "Configuring nose plugin:", self.name # dbg
+ Plugin.configure(self, options, config)
+ # Pull standard doctest plugin out of config; we will do doctesting
+ config.plugins.plugins = [p for p in config.plugins.plugins
+ if p.name != 'doctest']
+ self.doctest_tests = options.ipdoctest_tests
+ self.extension = tolist(options.ipdoctest_extension)
+
+ self.parser = IPDocTestParser()
+ self.finder = DocTestFinder(parser=self.parser)
+ self.checker = IPDoctestOutputChecker()
+ self.globs = None
+ self.extraglobs = None
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/iptest.py b/contrib/python/ipython/py2/IPython/testing/plugin/iptest.py
index 25b4634f2f..a75cab993f 100755
--- a/contrib/python/ipython/py2/IPython/testing/plugin/iptest.py
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/iptest.py
@@ -1,19 +1,19 @@
-#!/usr/bin/env python
-"""Nose-based test runner.
-"""
-from __future__ import print_function
-
-from nose.core import main
-from nose.plugins.builtin import plugins
-from nose.plugins.doctests import Doctest
-
-from . import ipdoctest
-from .ipdoctest import IPDocTestRunner
-
-if __name__ == '__main__':
- print('WARNING: this code is incomplete!')
- print()
-
- pp = [x() for x in plugins] # activate all builtin plugins first
- main(testRunner=IPDocTestRunner(),
- plugins=pp+[ipdoctest.IPythonDoctest(),Doctest()])
+#!/usr/bin/env python
+"""Nose-based test runner.
+"""
+from __future__ import print_function
+
+from nose.core import main
+from nose.plugins.builtin import plugins
+from nose.plugins.doctests import Doctest
+
+from . import ipdoctest
+from .ipdoctest import IPDocTestRunner
+
+if __name__ == '__main__':
+ print('WARNING: this code is incomplete!')
+ print()
+
+ pp = [x() for x in plugins] # activate all builtin plugins first
+ main(testRunner=IPDocTestRunner(),
+ plugins=pp+[ipdoctest.IPythonDoctest(),Doctest()])
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/setup.py b/contrib/python/ipython/py2/IPython/testing/plugin/setup.py
index 785704337b..a3281d30c8 100755
--- a/contrib/python/ipython/py2/IPython/testing/plugin/setup.py
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/setup.py
@@ -1,18 +1,18 @@
-#!/usr/bin/env python
-"""A Nose plugin to support IPython doctests.
-"""
-
-from setuptools import setup
-
-setup(name='IPython doctest plugin',
- version='0.1',
- author='The IPython Team',
- description = 'Nose plugin to load IPython-extended doctests',
- license = 'LGPL',
- py_modules = ['ipdoctest'],
- entry_points = {
- 'nose.plugins.0.10': ['ipdoctest = ipdoctest:IPythonDoctest',
- 'extdoctest = ipdoctest:ExtensionDoctest',
- ],
- },
- )
+#!/usr/bin/env python
+"""A Nose plugin to support IPython doctests.
+"""
+
+from setuptools import setup
+
+setup(name='IPython doctest plugin',
+ version='0.1',
+ author='The IPython Team',
+ description = 'Nose plugin to load IPython-extended doctests',
+ license = 'LGPL',
+ py_modules = ['ipdoctest'],
+ entry_points = {
+ 'nose.plugins.0.10': ['ipdoctest = ipdoctest:IPythonDoctest',
+ 'extdoctest = ipdoctest:ExtensionDoctest',
+ ],
+ },
+ )
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/show_refs.py b/contrib/python/ipython/py2/IPython/testing/plugin/show_refs.py
index 4c517da949..ef7dd157ae 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/show_refs.py
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/show_refs.py
@@ -1,20 +1,20 @@
-"""Simple script to show reference holding behavior.
-
-This is used by a companion test case.
-"""
-from __future__ import print_function
-
-import gc
-
-class C(object):
- def __del__(self):
- pass
- #print 'deleting object...' # dbg
-
-if __name__ == '__main__':
- c = C()
-
- c_refs = gc.get_referrers(c)
- ref_ids = list(map(id,c_refs))
-
- print('c referrers:',list(map(type,c_refs)))
+"""Simple script to show reference holding behavior.
+
+This is used by a companion test case.
+"""
+from __future__ import print_function
+
+import gc
+
+class C(object):
+ def __del__(self):
+ pass
+ #print 'deleting object...' # dbg
+
+if __name__ == '__main__':
+ c = C()
+
+ c_refs = gc.get_referrers(c)
+ ref_ids = list(map(id,c_refs))
+
+ print('c referrers:',list(map(type,c_refs)))
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/simple.py b/contrib/python/ipython/py2/IPython/testing/plugin/simple.py
index bcc43f55e8..a7d33d9a16 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/simple.py
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/simple.py
@@ -1,34 +1,34 @@
-"""Simple example using doctests.
-
-This file just contains doctests both using plain python and IPython prompts.
-All tests should be loaded by nose.
-"""
-from __future__ import print_function
-
-def pyfunc():
- """Some pure python tests...
-
- >>> pyfunc()
- 'pyfunc'
-
- >>> import os
-
- >>> 2+3
- 5
-
- >>> for i in range(3):
- ... print(i, end=' ')
- ... print(i+1, end=' ')
- ...
- 0 1 1 2 2 3
- """
- return 'pyfunc'
-
-
-def ipyfunc2():
- """Some pure python tests...
-
- >>> 1+1
- 2
- """
- return 'pyfunc2'
+"""Simple example using doctests.
+
+This file just contains doctests both using plain python and IPython prompts.
+All tests should be loaded by nose.
+"""
+from __future__ import print_function
+
+def pyfunc():
+ """Some pure python tests...
+
+ >>> pyfunc()
+ 'pyfunc'
+
+ >>> import os
+
+ >>> 2+3
+ 5
+
+ >>> for i in range(3):
+ ... print(i, end=' ')
+ ... print(i+1, end=' ')
+ ...
+ 0 1 1 2 2 3
+ """
+ return 'pyfunc'
+
+
+def ipyfunc2():
+ """Some pure python tests...
+
+ >>> 1+1
+ 2
+ """
+ return 'pyfunc2'
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/simplevars.py b/contrib/python/ipython/py2/IPython/testing/plugin/simplevars.py
index ee4039a59a..5134c6e928 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/simplevars.py
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/simplevars.py
@@ -1,3 +1,3 @@
-from __future__ import print_function
-x = 1
-print('x is:',x)
+from __future__ import print_function
+x = 1
+print('x is:',x)
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/test_combo.txt b/contrib/python/ipython/py2/IPython/testing/plugin/test_combo.txt
index 0de694fe7e..6c8759f3e7 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/test_combo.txt
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/test_combo.txt
@@ -1,36 +1,36 @@
-=======================
- Combo testing example
-=======================
-
-This is a simple example that mixes ipython doctests::
-
- In [1]: import code
-
- In [2]: 2**12
- Out[2]: 4096
-
-with command-line example information that does *not* get executed::
-
- $ mpirun -n 4 ipengine --controller-port=10000 --controller-ip=host0
-
-and with literal examples of Python source code::
-
- controller = dict(host='myhost',
- engine_port=None, # default is 10105
- control_port=None,
- )
-
- # keys are hostnames, values are the number of engine on that host
- engines = dict(node1=2,
- node2=2,
- node3=2,
- node3=2,
- )
-
- # Force failure to detect that this test is being run.
- 1/0
-
-These source code examples are executed but no output is compared at all. An
-error or failure is reported only if an exception is raised.
-
-NOTE: the execution of pure python blocks is not yet working!
+=======================
+ Combo testing example
+=======================
+
+This is a simple example that mixes ipython doctests::
+
+ In [1]: import code
+
+ In [2]: 2**12
+ Out[2]: 4096
+
+with command-line example information that does *not* get executed::
+
+ $ mpirun -n 4 ipengine --controller-port=10000 --controller-ip=host0
+
+and with literal examples of Python source code::
+
+ controller = dict(host='myhost',
+ engine_port=None, # default is 10105
+ control_port=None,
+ )
+
+ # keys are hostnames, values are the number of engine on that host
+ engines = dict(node1=2,
+ node2=2,
+ node3=2,
+ node3=2,
+ )
+
+ # Force failure to detect that this test is being run.
+ 1/0
+
+These source code examples are executed but no output is compared at all. An
+error or failure is reported only if an exception is raised.
+
+NOTE: the execution of pure python blocks is not yet working!
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/test_example.txt b/contrib/python/ipython/py2/IPython/testing/plugin/test_example.txt
index f6258b0615..f8b681eb4f 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/test_example.txt
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/test_example.txt
@@ -1,24 +1,24 @@
-=====================================
- Tests in example form - pure python
-=====================================
-
-This file contains doctest examples embedded as code blocks, using normal
-Python prompts. See the accompanying file for similar examples using IPython
-prompts (you can't mix both types within one file). The following will be run
-as a test::
-
- >>> 1+1
- 2
- >>> print ("hello")
- hello
-
-More than one example works::
-
- >>> s="Hello World"
-
- >>> s.upper()
- 'HELLO WORLD'
-
-but you should note that the *entire* test file is considered to be a single
-test. Individual code blocks that fail are printed separately as ``example
-failures``, but the whole file is still counted and reported as one test.
+=====================================
+ Tests in example form - pure python
+=====================================
+
+This file contains doctest examples embedded as code blocks, using normal
+Python prompts. See the accompanying file for similar examples using IPython
+prompts (you can't mix both types within one file). The following will be run
+as a test::
+
+ >>> 1+1
+ 2
+ >>> print ("hello")
+ hello
+
+More than one example works::
+
+ >>> s="Hello World"
+
+ >>> s.upper()
+ 'HELLO WORLD'
+
+but you should note that the *entire* test file is considered to be a single
+test. Individual code blocks that fail are printed separately as ``example
+failures``, but the whole file is still counted and reported as one test.
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/test_exampleip.txt b/contrib/python/ipython/py2/IPython/testing/plugin/test_exampleip.txt
index cbc00cc976..8afcbfdf7d 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/test_exampleip.txt
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/test_exampleip.txt
@@ -1,30 +1,30 @@
-=================================
- Tests in example form - IPython
-=================================
-
-You can write text files with examples that use IPython prompts (as long as you
-use the nose ipython doctest plugin), but you can not mix and match prompt
-styles in a single file. That is, you either use all ``>>>`` prompts or all
-IPython-style prompts. Your test suite *can* have both types, you just need to
-put each type of example in a separate. Using IPython prompts, you can paste
-directly from your session::
-
- In [5]: s="Hello World"
-
- In [6]: s.upper()
- Out[6]: 'HELLO WORLD'
-
-Another example::
-
- In [8]: 1+3
- Out[8]: 4
-
-Just like in IPython docstrings, you can use all IPython syntax and features::
-
- In [9]: !echo "hello"
- hello
-
- In [10]: a='hi'
-
- In [11]: !echo $a
- hi
+=================================
+ Tests in example form - IPython
+=================================
+
+You can write text files with examples that use IPython prompts (as long as you
+use the nose ipython doctest plugin), but you can not mix and match prompt
+styles in a single file. That is, you either use all ``>>>`` prompts or all
+IPython-style prompts. Your test suite *can* have both types, you just need to
+put each type of example in a separate. Using IPython prompts, you can paste
+directly from your session::
+
+ In [5]: s="Hello World"
+
+ In [6]: s.upper()
+ Out[6]: 'HELLO WORLD'
+
+Another example::
+
+ In [8]: 1+3
+ Out[8]: 4
+
+Just like in IPython docstrings, you can use all IPython syntax and features::
+
+ In [9]: !echo "hello"
+ hello
+
+ In [10]: a='hi'
+
+ In [11]: !echo $a
+ hi
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/test_ipdoctest.py b/contrib/python/ipython/py2/IPython/testing/plugin/test_ipdoctest.py
index 05dc387d9b..a7add7da79 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/test_ipdoctest.py
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/test_ipdoctest.py
@@ -1,80 +1,80 @@
-"""Tests for the ipdoctest machinery itself.
-
-Note: in a file named test_X, functions whose only test is their docstring (as
-a doctest) and which have no test functionality of their own, should be called
-'doctest_foo' instead of 'test_foo', otherwise they get double-counted (the
-empty function call is counted as a test, which just inflates tests numbers
-artificially).
-"""
-from IPython.utils.py3compat import doctest_refactor_print
-
-@doctest_refactor_print
-def doctest_simple():
- """ipdoctest must handle simple inputs
-
- In [1]: 1
- Out[1]: 1
-
- In [2]: print 1
- 1
- """
-
-@doctest_refactor_print
-def doctest_multiline1():
- """The ipdoctest machinery must handle multiline examples gracefully.
-
- In [2]: for i in range(4):
- ...: print i
- ...:
- 0
- 1
- 2
- 3
- """
-
-@doctest_refactor_print
-def doctest_multiline2():
- """Multiline examples that define functions and print output.
-
- In [7]: def f(x):
- ...: return x+1
- ...:
-
- In [8]: f(1)
- Out[8]: 2
-
- In [9]: def g(x):
- ...: print 'x is:',x
- ...:
-
- In [10]: g(1)
- x is: 1
-
- In [11]: g('hello')
- x is: hello
- """
-
-
-def doctest_multiline3():
- """Multiline examples with blank lines.
-
- In [12]: def h(x):
- ....: if x>1:
- ....: return x**2
- ....: # To leave a blank line in the input, you must mark it
- ....: # with a comment character:
- ....: #
- ....: # otherwise the doctest parser gets confused.
- ....: else:
- ....: return -1
- ....:
-
- In [13]: h(5)
- Out[13]: 25
-
- In [14]: h(1)
- Out[14]: -1
-
- In [15]: h(0)
- Out[15]: -1
- """
+"""Tests for the ipdoctest machinery itself.
+
+Note: in a file named test_X, functions whose only test is their docstring (as
+a doctest) and which have no test functionality of their own, should be called
+'doctest_foo' instead of 'test_foo', otherwise they get double-counted (the
+empty function call is counted as a test, which just inflates tests numbers
+artificially).
+"""
+from IPython.utils.py3compat import doctest_refactor_print
+
+@doctest_refactor_print
+def doctest_simple():
+ """ipdoctest must handle simple inputs
+
+ In [1]: 1
+ Out[1]: 1
+
+ In [2]: print 1
+ 1
+ """
+
+@doctest_refactor_print
+def doctest_multiline1():
+ """The ipdoctest machinery must handle multiline examples gracefully.
+
+ In [2]: for i in range(4):
+ ...: print i
+ ...:
+ 0
+ 1
+ 2
+ 3
+ """
+
+@doctest_refactor_print
+def doctest_multiline2():
+ """Multiline examples that define functions and print output.
+
+ In [7]: def f(x):
+ ...: return x+1
+ ...:
+
+ In [8]: f(1)
+ Out[8]: 2
+
+ In [9]: def g(x):
+ ...: print 'x is:',x
+ ...:
+
+ In [10]: g(1)
+ x is: 1
+
+ In [11]: g('hello')
+ x is: hello
+ """
+
+
+def doctest_multiline3():
+ """Multiline examples with blank lines.
+
+ In [12]: def h(x):
+ ....: if x>1:
+ ....: return x**2
+ ....: # To leave a blank line in the input, you must mark it
+ ....: # with a comment character:
+ ....: #
+ ....: # otherwise the doctest parser gets confused.
+ ....: else:
+ ....: return -1
+ ....:
+
+ In [13]: h(5)
+ Out[13]: 25
+
+ In [14]: h(1)
+ Out[14]: -1
+
+ In [15]: h(0)
+ Out[15]: -1
+ """
diff --git a/contrib/python/ipython/py2/IPython/testing/plugin/test_refs.py b/contrib/python/ipython/py2/IPython/testing/plugin/test_refs.py
index 8a2a78b75d..50d0857134 100644
--- a/contrib/python/ipython/py2/IPython/testing/plugin/test_refs.py
+++ b/contrib/python/ipython/py2/IPython/testing/plugin/test_refs.py
@@ -1,46 +1,46 @@
-"""Some simple tests for the plugin while running scripts.
-"""
-# Module imports
-# Std lib
-import inspect
-
-# Our own
-
-#-----------------------------------------------------------------------------
-# Testing functions
-
-def test_trivial():
- """A trivial passing test."""
- pass
-
-def doctest_run():
- """Test running a trivial script.
-
- In [13]: run simplevars.py
- x is: 1
- """
-
-def doctest_runvars():
- """Test that variables defined in scripts get loaded correcly via %run.
-
- In [13]: run simplevars.py
- x is: 1
-
- In [14]: x
- Out[14]: 1
- """
-
-def doctest_ivars():
- """Test that variables defined interactively are picked up.
- In [5]: zz=1
-
- In [6]: zz
- Out[6]: 1
- """
-
-def doctest_refs():
- """DocTest reference holding issues when running scripts.
-
- In [32]: run show_refs.py
- c referrers: [<... 'dict'>]
- """
+"""Some simple tests for the plugin while running scripts.
+"""
+# Module imports
+# Std lib
+import inspect
+
+# Our own
+
+#-----------------------------------------------------------------------------
+# Testing functions
+
+def test_trivial():
+ """A trivial passing test."""
+ pass
+
+def doctest_run():
+ """Test running a trivial script.
+
+ In [13]: run simplevars.py
+ x is: 1
+ """
+
+def doctest_runvars():
+ """Test that variables defined in scripts get loaded correcly via %run.
+
+ In [13]: run simplevars.py
+ x is: 1
+
+ In [14]: x
+ Out[14]: 1
+ """
+
+def doctest_ivars():
+ """Test that variables defined interactively are picked up.
+ In [5]: zz=1
+
+ In [6]: zz
+ Out[6]: 1
+ """
+
+def doctest_refs():
+ """DocTest reference holding issues when running scripts.
+
+ In [32]: run show_refs.py
+ c referrers: [<... 'dict'>]
+ """
diff --git a/contrib/python/ipython/py2/IPython/testing/skipdoctest.py b/contrib/python/ipython/py2/IPython/testing/skipdoctest.py
index 8357f609b0..564ca54027 100644
--- a/contrib/python/ipython/py2/IPython/testing/skipdoctest.py
+++ b/contrib/python/ipython/py2/IPython/testing/skipdoctest.py
@@ -1,41 +1,41 @@
-"""Decorators marks that a doctest should be skipped, for both python 2 and 3.
-
-The IPython.testing.decorators module triggers various extra imports, including
-numpy and sympy if they're present. Since this decorator is used in core parts
-of IPython, it's in a separate module so that running IPython doesn't trigger
-those imports."""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2009-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import sys
-
-#-----------------------------------------------------------------------------
-# Decorators
-#-----------------------------------------------------------------------------
-
-def skip_doctest(f):
- """Decorator - mark a function or method for skipping its doctest.
-
- This decorator allows you to mark a function whose docstring you wish to
- omit from testing, while preserving the docstring for introspection, help,
- etc."""
- f.skip_doctest = True
- return f
-
-
-def skip_doctest_py3(f):
- """Decorator - skip the doctest under Python 3."""
- f.skip_doctest = (sys.version_info[0] >= 3)
- return f
+"""Decorators marks that a doctest should be skipped, for both python 2 and 3.
+
+The IPython.testing.decorators module triggers various extra imports, including
+numpy and sympy if they're present. Since this decorator is used in core parts
+of IPython, it's in a separate module so that running IPython doesn't trigger
+those imports."""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2009-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+
+#-----------------------------------------------------------------------------
+# Decorators
+#-----------------------------------------------------------------------------
+
+def skip_doctest(f):
+ """Decorator - mark a function or method for skipping its doctest.
+
+ This decorator allows you to mark a function whose docstring you wish to
+ omit from testing, while preserving the docstring for introspection, help,
+ etc."""
+ f.skip_doctest = True
+ return f
+
+
+def skip_doctest_py3(f):
+ """Decorator - skip the doctest under Python 3."""
+ f.skip_doctest = (sys.version_info[0] >= 3)
+ return f
def skip_doctest_py2(f):
"""Decorator - skip the doctest under Python 3."""
diff --git a/contrib/python/ipython/py2/IPython/testing/tools.py b/contrib/python/ipython/py2/IPython/testing/tools.py
index 98f796fe3c..23bf6a68cb 100644
--- a/contrib/python/ipython/py2/IPython/testing/tools.py
+++ b/contrib/python/ipython/py2/IPython/testing/tools.py
@@ -1,296 +1,296 @@
-"""Generic testing tools.
-
-Authors
--------
-- Fernando Perez <Fernando.Perez@berkeley.edu>
-"""
-
-from __future__ import absolute_import
-
+"""Generic testing tools.
+
+Authors
+-------
+- Fernando Perez <Fernando.Perez@berkeley.edu>
+"""
+
+from __future__ import absolute_import
+
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
-
-import os
-import re
-import sys
-import tempfile
-
-from contextlib import contextmanager
-from io import StringIO
-from subprocess import Popen, PIPE
+
+import os
+import re
+import sys
+import tempfile
+
+from contextlib import contextmanager
+from io import StringIO
+from subprocess import Popen, PIPE
try:
from unittest.mock import patch
except ImportError:
# Python 2 compatibility
from mock import patch
-
-try:
- # These tools are used by parts of the runtime, so we make the nose
- # dependency optional at this point. Nose is a hard dependency to run the
- # test suite, but NOT to use ipython itself.
- import nose.tools as nt
- has_nose = True
-except ImportError:
- has_nose = False
-
-from traitlets.config.loader import Config
-from IPython.utils.process import get_output_error_code
-from IPython.utils.text import list_strings
-from IPython.utils.io import temp_pyfile, Tee
-from IPython.utils import py3compat
-from IPython.utils.encoding import DEFAULT_ENCODING
-
-from . import decorators as dec
-from . import skipdoctest
-
-
-# The docstring for full_path doctests differently on win32 (different path
-# separator) so just skip the doctest there. The example remains informative.
-doctest_deco = skipdoctest.skip_doctest if sys.platform == 'win32' else dec.null_deco
-
-@doctest_deco
-def full_path(startPath,files):
- """Make full paths for all the listed files, based on startPath.
-
- Only the base part of startPath is kept, since this routine is typically
- used with a script's ``__file__`` variable as startPath. The base of startPath
- is then prepended to all the listed files, forming the output list.
-
- Parameters
- ----------
- startPath : string
- Initial path to use as the base for the results. This path is split
- using os.path.split() and only its first component is kept.
-
- files : string or list
- One or more files.
-
- Examples
- --------
-
- >>> full_path('/foo/bar.py',['a.txt','b.txt'])
- ['/foo/a.txt', '/foo/b.txt']
-
- >>> full_path('/foo',['a.txt','b.txt'])
- ['/a.txt', '/b.txt']
-
- If a single file is given, the output is still a list::
-
- >>> full_path('/foo','a.txt')
- ['/a.txt']
- """
-
- files = list_strings(files)
- base = os.path.split(startPath)[0]
- return [ os.path.join(base,f) for f in files ]
-
-
-def parse_test_output(txt):
- """Parse the output of a test run and return errors, failures.
-
- Parameters
- ----------
- txt : str
- Text output of a test run, assumed to contain a line of one of the
- following forms::
-
- 'FAILED (errors=1)'
- 'FAILED (failures=1)'
- 'FAILED (errors=1, failures=1)'
-
- Returns
- -------
- nerr, nfail
- number of errors and failures.
- """
-
- err_m = re.search(r'^FAILED \(errors=(\d+)\)', txt, re.MULTILINE)
- if err_m:
- nerr = int(err_m.group(1))
- nfail = 0
- return nerr, nfail
-
- fail_m = re.search(r'^FAILED \(failures=(\d+)\)', txt, re.MULTILINE)
- if fail_m:
- nerr = 0
- nfail = int(fail_m.group(1))
- return nerr, nfail
-
- both_m = re.search(r'^FAILED \(errors=(\d+), failures=(\d+)\)', txt,
- re.MULTILINE)
- if both_m:
- nerr = int(both_m.group(1))
- nfail = int(both_m.group(2))
- return nerr, nfail
-
- # If the input didn't match any of these forms, assume no error/failures
- return 0, 0
-
-
-# So nose doesn't think this is a test
-parse_test_output.__test__ = False
-
-
-def default_argv():
- """Return a valid default argv for creating testing instances of ipython"""
-
- return ['--quick', # so no config file is loaded
- # Other defaults to minimize side effects on stdout
- '--colors=NoColor', '--no-term-title','--no-banner',
- '--autocall=0']
-
-
-def default_config():
- """Return a config object with good defaults for testing."""
- config = Config()
- config.TerminalInteractiveShell.colors = 'NoColor'
- config.TerminalTerminalInteractiveShell.term_title = False,
- config.TerminalInteractiveShell.autocall = 0
- f = tempfile.NamedTemporaryFile(suffix=u'test_hist.sqlite', delete=False)
- config.HistoryManager.hist_file = f.name
- f.close()
- config.HistoryManager.db_cache_size = 10000
- return config
-
-
-def get_ipython_cmd(as_string=False):
- """
- Return appropriate IPython command line name. By default, this will return
- a list that can be used with subprocess.Popen, for example, but passing
- `as_string=True` allows for returning the IPython command as a string.
-
- Parameters
- ----------
- as_string: bool
- Flag to allow to return the command as a string.
- """
- ipython_cmd = [sys.executable, "-m", "IPython"]
-
- if as_string:
- ipython_cmd = " ".join(ipython_cmd)
-
- return ipython_cmd
-
-def ipexec(fname, options=None, commands=()):
- """Utility to call 'ipython filename'.
-
- Starts IPython with a minimal and safe configuration to make startup as fast
- as possible.
-
- Note that this starts IPython in a subprocess!
-
- Parameters
- ----------
- fname : str
- Name of file to be executed (should have .py or .ipy extension).
-
- options : optional, list
- Extra command-line flags to be passed to IPython.
-
- commands : optional, list
- Commands to send in on stdin
-
- Returns
- -------
- (stdout, stderr) of ipython subprocess.
- """
- if options is None: options = []
-
+
+try:
+ # These tools are used by parts of the runtime, so we make the nose
+ # dependency optional at this point. Nose is a hard dependency to run the
+ # test suite, but NOT to use ipython itself.
+ import nose.tools as nt
+ has_nose = True
+except ImportError:
+ has_nose = False
+
+from traitlets.config.loader import Config
+from IPython.utils.process import get_output_error_code
+from IPython.utils.text import list_strings
+from IPython.utils.io import temp_pyfile, Tee
+from IPython.utils import py3compat
+from IPython.utils.encoding import DEFAULT_ENCODING
+
+from . import decorators as dec
+from . import skipdoctest
+
+
+# The docstring for full_path doctests differently on win32 (different path
+# separator) so just skip the doctest there. The example remains informative.
+doctest_deco = skipdoctest.skip_doctest if sys.platform == 'win32' else dec.null_deco
+
+@doctest_deco
+def full_path(startPath,files):
+ """Make full paths for all the listed files, based on startPath.
+
+ Only the base part of startPath is kept, since this routine is typically
+ used with a script's ``__file__`` variable as startPath. The base of startPath
+ is then prepended to all the listed files, forming the output list.
+
+ Parameters
+ ----------
+ startPath : string
+ Initial path to use as the base for the results. This path is split
+ using os.path.split() and only its first component is kept.
+
+ files : string or list
+ One or more files.
+
+ Examples
+ --------
+
+ >>> full_path('/foo/bar.py',['a.txt','b.txt'])
+ ['/foo/a.txt', '/foo/b.txt']
+
+ >>> full_path('/foo',['a.txt','b.txt'])
+ ['/a.txt', '/b.txt']
+
+ If a single file is given, the output is still a list::
+
+ >>> full_path('/foo','a.txt')
+ ['/a.txt']
+ """
+
+ files = list_strings(files)
+ base = os.path.split(startPath)[0]
+ return [ os.path.join(base,f) for f in files ]
+
+
+def parse_test_output(txt):
+ """Parse the output of a test run and return errors, failures.
+
+ Parameters
+ ----------
+ txt : str
+ Text output of a test run, assumed to contain a line of one of the
+ following forms::
+
+ 'FAILED (errors=1)'
+ 'FAILED (failures=1)'
+ 'FAILED (errors=1, failures=1)'
+
+ Returns
+ -------
+ nerr, nfail
+ number of errors and failures.
+ """
+
+ err_m = re.search(r'^FAILED \(errors=(\d+)\)', txt, re.MULTILINE)
+ if err_m:
+ nerr = int(err_m.group(1))
+ nfail = 0
+ return nerr, nfail
+
+ fail_m = re.search(r'^FAILED \(failures=(\d+)\)', txt, re.MULTILINE)
+ if fail_m:
+ nerr = 0
+ nfail = int(fail_m.group(1))
+ return nerr, nfail
+
+ both_m = re.search(r'^FAILED \(errors=(\d+), failures=(\d+)\)', txt,
+ re.MULTILINE)
+ if both_m:
+ nerr = int(both_m.group(1))
+ nfail = int(both_m.group(2))
+ return nerr, nfail
+
+ # If the input didn't match any of these forms, assume no error/failures
+ return 0, 0
+
+
+# So nose doesn't think this is a test
+parse_test_output.__test__ = False
+
+
+def default_argv():
+ """Return a valid default argv for creating testing instances of ipython"""
+
+ return ['--quick', # so no config file is loaded
+ # Other defaults to minimize side effects on stdout
+ '--colors=NoColor', '--no-term-title','--no-banner',
+ '--autocall=0']
+
+
+def default_config():
+ """Return a config object with good defaults for testing."""
+ config = Config()
+ config.TerminalInteractiveShell.colors = 'NoColor'
+ config.TerminalTerminalInteractiveShell.term_title = False,
+ config.TerminalInteractiveShell.autocall = 0
+ f = tempfile.NamedTemporaryFile(suffix=u'test_hist.sqlite', delete=False)
+ config.HistoryManager.hist_file = f.name
+ f.close()
+ config.HistoryManager.db_cache_size = 10000
+ return config
+
+
+def get_ipython_cmd(as_string=False):
+ """
+ Return appropriate IPython command line name. By default, this will return
+ a list that can be used with subprocess.Popen, for example, but passing
+ `as_string=True` allows for returning the IPython command as a string.
+
+ Parameters
+ ----------
+ as_string: bool
+ Flag to allow to return the command as a string.
+ """
+ ipython_cmd = [sys.executable, "-m", "IPython"]
+
+ if as_string:
+ ipython_cmd = " ".join(ipython_cmd)
+
+ return ipython_cmd
+
+def ipexec(fname, options=None, commands=()):
+ """Utility to call 'ipython filename'.
+
+ Starts IPython with a minimal and safe configuration to make startup as fast
+ as possible.
+
+ Note that this starts IPython in a subprocess!
+
+ Parameters
+ ----------
+ fname : str
+ Name of file to be executed (should have .py or .ipy extension).
+
+ options : optional, list
+ Extra command-line flags to be passed to IPython.
+
+ commands : optional, list
+ Commands to send in on stdin
+
+ Returns
+ -------
+ (stdout, stderr) of ipython subprocess.
+ """
+ if options is None: options = []
+
cmdargs = default_argv() + options
-
- test_dir = os.path.dirname(__file__)
-
- ipython_cmd = get_ipython_cmd()
- # Absolute path for filename
- full_fname = os.path.join(test_dir, fname)
- full_cmd = ipython_cmd + cmdargs + [full_fname]
- env = os.environ.copy()
- # FIXME: ignore all warnings in ipexec while we have shims
- # should we keep suppressing warnings here, even after removing shims?
- env['PYTHONWARNINGS'] = 'ignore'
- # env.pop('PYTHONWARNINGS', None) # Avoid extraneous warnings appearing on stderr
- for k, v in env.items():
- # Debug a bizarre failure we've seen on Windows:
- # TypeError: environment can only contain strings
- if not isinstance(v, str):
- print(k, v)
- p = Popen(full_cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=env)
- out, err = p.communicate(input=py3compat.str_to_bytes('\n'.join(commands)) or None)
- out, err = py3compat.bytes_to_str(out), py3compat.bytes_to_str(err)
- # `import readline` causes 'ESC[?1034h' to be output sometimes,
- # so strip that out before doing comparisons
- if out:
- out = re.sub(r'\x1b\[[^h]+h', '', out)
- return out, err
-
-
-def ipexec_validate(fname, expected_out, expected_err='',
- options=None, commands=()):
- """Utility to call 'ipython filename' and validate output/error.
-
- This function raises an AssertionError if the validation fails.
-
- Note that this starts IPython in a subprocess!
-
- Parameters
- ----------
- fname : str
- Name of the file to be executed (should have .py or .ipy extension).
-
- expected_out : str
- Expected stdout of the process.
-
- expected_err : optional, str
- Expected stderr of the process.
-
- options : optional, list
- Extra command-line flags to be passed to IPython.
-
- Returns
- -------
- None
- """
-
- import nose.tools as nt
-
- out, err = ipexec(fname, options, commands)
- #print 'OUT', out # dbg
- #print 'ERR', err # dbg
- # If there are any errors, we must check those befor stdout, as they may be
- # more informative than simply having an empty stdout.
- if err:
- if expected_err:
- nt.assert_equal("\n".join(err.strip().splitlines()), "\n".join(expected_err.strip().splitlines()))
- else:
- raise ValueError('Running file %r produced error: %r' %
- (fname, err))
- # If no errors or output on stderr was expected, match stdout
- nt.assert_equal("\n".join(out.strip().splitlines()), "\n".join(expected_out.strip().splitlines()))
-
-
-class TempFileMixin(object):
- """Utility class to create temporary Python/IPython files.
-
- Meant as a mixin class for test cases."""
-
- def mktmp(self, src, ext='.py'):
- """Make a valid python temp file."""
- fname, f = temp_pyfile(src, ext)
- self.tmpfile = f
- self.fname = fname
-
- def tearDown(self):
- if hasattr(self, 'tmpfile'):
- # If the tmpfile wasn't made because of skipped tests, like in
- # win32, there's nothing to cleanup.
- self.tmpfile.close()
- try:
- os.unlink(self.fname)
- except:
- # On Windows, even though we close the file, we still can't
- # delete it. I have no clue why
- pass
-
+
+ test_dir = os.path.dirname(__file__)
+
+ ipython_cmd = get_ipython_cmd()
+ # Absolute path for filename
+ full_fname = os.path.join(test_dir, fname)
+ full_cmd = ipython_cmd + cmdargs + [full_fname]
+ env = os.environ.copy()
+ # FIXME: ignore all warnings in ipexec while we have shims
+ # should we keep suppressing warnings here, even after removing shims?
+ env['PYTHONWARNINGS'] = 'ignore'
+ # env.pop('PYTHONWARNINGS', None) # Avoid extraneous warnings appearing on stderr
+ for k, v in env.items():
+ # Debug a bizarre failure we've seen on Windows:
+ # TypeError: environment can only contain strings
+ if not isinstance(v, str):
+ print(k, v)
+ p = Popen(full_cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=env)
+ out, err = p.communicate(input=py3compat.str_to_bytes('\n'.join(commands)) or None)
+ out, err = py3compat.bytes_to_str(out), py3compat.bytes_to_str(err)
+ # `import readline` causes 'ESC[?1034h' to be output sometimes,
+ # so strip that out before doing comparisons
+ if out:
+ out = re.sub(r'\x1b\[[^h]+h', '', out)
+ return out, err
+
+
+def ipexec_validate(fname, expected_out, expected_err='',
+ options=None, commands=()):
+ """Utility to call 'ipython filename' and validate output/error.
+
+ This function raises an AssertionError if the validation fails.
+
+ Note that this starts IPython in a subprocess!
+
+ Parameters
+ ----------
+ fname : str
+ Name of the file to be executed (should have .py or .ipy extension).
+
+ expected_out : str
+ Expected stdout of the process.
+
+ expected_err : optional, str
+ Expected stderr of the process.
+
+ options : optional, list
+ Extra command-line flags to be passed to IPython.
+
+ Returns
+ -------
+ None
+ """
+
+ import nose.tools as nt
+
+ out, err = ipexec(fname, options, commands)
+ #print 'OUT', out # dbg
+ #print 'ERR', err # dbg
+ # If there are any errors, we must check those befor stdout, as they may be
+ # more informative than simply having an empty stdout.
+ if err:
+ if expected_err:
+ nt.assert_equal("\n".join(err.strip().splitlines()), "\n".join(expected_err.strip().splitlines()))
+ else:
+ raise ValueError('Running file %r produced error: %r' %
+ (fname, err))
+ # If no errors or output on stderr was expected, match stdout
+ nt.assert_equal("\n".join(out.strip().splitlines()), "\n".join(expected_out.strip().splitlines()))
+
+
+class TempFileMixin(object):
+ """Utility class to create temporary Python/IPython files.
+
+ Meant as a mixin class for test cases."""
+
+ def mktmp(self, src, ext='.py'):
+ """Make a valid python temp file."""
+ fname, f = temp_pyfile(src, ext)
+ self.tmpfile = f
+ self.fname = fname
+
+ def tearDown(self):
+ if hasattr(self, 'tmpfile'):
+ # If the tmpfile wasn't made because of skipped tests, like in
+ # win32, there's nothing to cleanup.
+ self.tmpfile.close()
+ try:
+ os.unlink(self.fname)
+ except:
+ # On Windows, even though we close the file, we still can't
+ # delete it. I have no clue why
+ pass
+
def __enter__(self):
return self
@@ -298,149 +298,149 @@ class TempFileMixin(object):
self.tearDown()
-pair_fail_msg = ("Testing {0}\n\n"
- "In:\n"
- " {1!r}\n"
- "Expected:\n"
- " {2!r}\n"
- "Got:\n"
- " {3!r}\n")
-def check_pairs(func, pairs):
- """Utility function for the common case of checking a function with a
- sequence of input/output pairs.
-
- Parameters
- ----------
- func : callable
- The function to be tested. Should accept a single argument.
- pairs : iterable
- A list of (input, expected_output) tuples.
-
- Returns
- -------
- None. Raises an AssertionError if any output does not match the expected
- value.
- """
- name = getattr(func, "func_name", getattr(func, "__name__", "<unknown>"))
- for inp, expected in pairs:
- out = func(inp)
- assert out == expected, pair_fail_msg.format(name, inp, expected, out)
-
-
-if py3compat.PY3:
- MyStringIO = StringIO
-else:
- # In Python 2, stdout/stderr can have either bytes or unicode written to them,
- # so we need a class that can handle both.
- class MyStringIO(StringIO):
- def write(self, s):
- s = py3compat.cast_unicode(s, encoding=DEFAULT_ENCODING)
- super(MyStringIO, self).write(s)
-
-_re_type = type(re.compile(r''))
-
-notprinted_msg = """Did not find {0!r} in printed output (on {1}):
--------
-{2!s}
--------
-"""
-
-class AssertPrints(object):
- """Context manager for testing that code prints certain text.
-
- Examples
- --------
- >>> with AssertPrints("abc", suppress=False):
- ... print("abcd")
- ... print("def")
- ...
- abcd
- def
- """
- def __init__(self, s, channel='stdout', suppress=True):
- self.s = s
- if isinstance(self.s, (py3compat.string_types, _re_type)):
- self.s = [self.s]
- self.channel = channel
- self.suppress = suppress
-
- def __enter__(self):
- self.orig_stream = getattr(sys, self.channel)
- self.buffer = MyStringIO()
- self.tee = Tee(self.buffer, channel=self.channel)
- setattr(sys, self.channel, self.buffer if self.suppress else self.tee)
-
- def __exit__(self, etype, value, traceback):
- try:
- if value is not None:
- # If an error was raised, don't check anything else
- return False
- self.tee.flush()
- setattr(sys, self.channel, self.orig_stream)
- printed = self.buffer.getvalue()
- for s in self.s:
- if isinstance(s, _re_type):
- assert s.search(printed), notprinted_msg.format(s.pattern, self.channel, printed)
- else:
- assert s in printed, notprinted_msg.format(s, self.channel, printed)
- return False
- finally:
- self.tee.close()
-
-printed_msg = """Found {0!r} in printed output (on {1}):
--------
-{2!s}
--------
-"""
-
-class AssertNotPrints(AssertPrints):
- """Context manager for checking that certain output *isn't* produced.
-
- Counterpart of AssertPrints"""
- def __exit__(self, etype, value, traceback):
- try:
- if value is not None:
- # If an error was raised, don't check anything else
- self.tee.close()
- return False
- self.tee.flush()
- setattr(sys, self.channel, self.orig_stream)
- printed = self.buffer.getvalue()
- for s in self.s:
- if isinstance(s, _re_type):
- assert not s.search(printed),printed_msg.format(
- s.pattern, self.channel, printed)
- else:
- assert s not in printed, printed_msg.format(
- s, self.channel, printed)
- return False
- finally:
- self.tee.close()
-
-@contextmanager
-def mute_warn():
- from IPython.utils import warn
- save_warn = warn.warn
- warn.warn = lambda *a, **kw: None
- try:
- yield
- finally:
- warn.warn = save_warn
-
-@contextmanager
-def make_tempfile(name):
- """ Create an empty, named, temporary file for the duration of the context.
- """
- f = open(name, 'w')
- f.close()
- try:
- yield
- finally:
- os.unlink(name)
-
+pair_fail_msg = ("Testing {0}\n\n"
+ "In:\n"
+ " {1!r}\n"
+ "Expected:\n"
+ " {2!r}\n"
+ "Got:\n"
+ " {3!r}\n")
+def check_pairs(func, pairs):
+ """Utility function for the common case of checking a function with a
+ sequence of input/output pairs.
+
+ Parameters
+ ----------
+ func : callable
+ The function to be tested. Should accept a single argument.
+ pairs : iterable
+ A list of (input, expected_output) tuples.
+
+ Returns
+ -------
+ None. Raises an AssertionError if any output does not match the expected
+ value.
+ """
+ name = getattr(func, "func_name", getattr(func, "__name__", "<unknown>"))
+ for inp, expected in pairs:
+ out = func(inp)
+ assert out == expected, pair_fail_msg.format(name, inp, expected, out)
+
+
+if py3compat.PY3:
+ MyStringIO = StringIO
+else:
+ # In Python 2, stdout/stderr can have either bytes or unicode written to them,
+ # so we need a class that can handle both.
+ class MyStringIO(StringIO):
+ def write(self, s):
+ s = py3compat.cast_unicode(s, encoding=DEFAULT_ENCODING)
+ super(MyStringIO, self).write(s)
+
+_re_type = type(re.compile(r''))
+
+notprinted_msg = """Did not find {0!r} in printed output (on {1}):
+-------
+{2!s}
+-------
+"""
+
+class AssertPrints(object):
+ """Context manager for testing that code prints certain text.
+
+ Examples
+ --------
+ >>> with AssertPrints("abc", suppress=False):
+ ... print("abcd")
+ ... print("def")
+ ...
+ abcd
+ def
+ """
+ def __init__(self, s, channel='stdout', suppress=True):
+ self.s = s
+ if isinstance(self.s, (py3compat.string_types, _re_type)):
+ self.s = [self.s]
+ self.channel = channel
+ self.suppress = suppress
+
+ def __enter__(self):
+ self.orig_stream = getattr(sys, self.channel)
+ self.buffer = MyStringIO()
+ self.tee = Tee(self.buffer, channel=self.channel)
+ setattr(sys, self.channel, self.buffer if self.suppress else self.tee)
+
+ def __exit__(self, etype, value, traceback):
+ try:
+ if value is not None:
+ # If an error was raised, don't check anything else
+ return False
+ self.tee.flush()
+ setattr(sys, self.channel, self.orig_stream)
+ printed = self.buffer.getvalue()
+ for s in self.s:
+ if isinstance(s, _re_type):
+ assert s.search(printed), notprinted_msg.format(s.pattern, self.channel, printed)
+ else:
+ assert s in printed, notprinted_msg.format(s, self.channel, printed)
+ return False
+ finally:
+ self.tee.close()
+
+printed_msg = """Found {0!r} in printed output (on {1}):
+-------
+{2!s}
+-------
+"""
+
+class AssertNotPrints(AssertPrints):
+ """Context manager for checking that certain output *isn't* produced.
+
+ Counterpart of AssertPrints"""
+ def __exit__(self, etype, value, traceback):
+ try:
+ if value is not None:
+ # If an error was raised, don't check anything else
+ self.tee.close()
+ return False
+ self.tee.flush()
+ setattr(sys, self.channel, self.orig_stream)
+ printed = self.buffer.getvalue()
+ for s in self.s:
+ if isinstance(s, _re_type):
+ assert not s.search(printed),printed_msg.format(
+ s.pattern, self.channel, printed)
+ else:
+ assert s not in printed, printed_msg.format(
+ s, self.channel, printed)
+ return False
+ finally:
+ self.tee.close()
+
+@contextmanager
+def mute_warn():
+ from IPython.utils import warn
+ save_warn = warn.warn
+ warn.warn = lambda *a, **kw: None
+ try:
+ yield
+ finally:
+ warn.warn = save_warn
+
+@contextmanager
+def make_tempfile(name):
+ """ Create an empty, named, temporary file for the duration of the context.
+ """
+ f = open(name, 'w')
+ f.close()
+ try:
+ yield
+ finally:
+ os.unlink(name)
+
def fake_input(inputs):
"""Temporarily replace the input() function to return the given values
-
+
Use as a context manager:
with fake_input(['result1', 'result2']):
@@ -460,24 +460,24 @@ def fake_input(inputs):
'input' if py3compat.PY3 else 'raw_input')
return patch(input_name, mock_input)
-def help_output_test(subcommand=''):
- """test that `ipython [subcommand] -h` works"""
- cmd = get_ipython_cmd() + [subcommand, '-h']
- out, err, rc = get_output_error_code(cmd)
- nt.assert_equal(rc, 0, err)
- nt.assert_not_in("Traceback", err)
- nt.assert_in("Options", out)
- nt.assert_in("--help-all", out)
- return out, err
-
-
-def help_all_output_test(subcommand=''):
- """test that `ipython [subcommand] --help-all` works"""
- cmd = get_ipython_cmd() + [subcommand, '--help-all']
- out, err, rc = get_output_error_code(cmd)
- nt.assert_equal(rc, 0, err)
- nt.assert_not_in("Traceback", err)
- nt.assert_in("Options", out)
+def help_output_test(subcommand=''):
+ """test that `ipython [subcommand] -h` works"""
+ cmd = get_ipython_cmd() + [subcommand, '-h']
+ out, err, rc = get_output_error_code(cmd)
+ nt.assert_equal(rc, 0, err)
+ nt.assert_not_in("Traceback", err)
+ nt.assert_in("Options", out)
+ nt.assert_in("--help-all", out)
+ return out, err
+
+
+def help_all_output_test(subcommand=''):
+ """test that `ipython [subcommand] --help-all` works"""
+ cmd = get_ipython_cmd() + [subcommand, '--help-all']
+ out, err, rc = get_output_error_code(cmd)
+ nt.assert_equal(rc, 0, err)
+ nt.assert_not_in("Traceback", err)
+ nt.assert_in("Options", out)
nt.assert_in("Class", out)
- return out, err
-
+ return out, err
+
diff --git a/contrib/python/ipython/py2/IPython/utils/PyColorize.py b/contrib/python/ipython/py2/IPython/utils/PyColorize.py
index 13c03c3398..124eb2d4e3 100644
--- a/contrib/python/ipython/py2/IPython/utils/PyColorize.py
+++ b/contrib/python/ipython/py2/IPython/utils/PyColorize.py
@@ -1,127 +1,127 @@
-# -*- coding: utf-8 -*-
-"""
-Class and program to colorize python source code for ANSI terminals.
-
-Based on an HTML code highlighter by Jurgen Hermann found at:
-http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
-
-Modifications by Fernando Perez (fperez@colorado.edu).
-
-Information on the original HTML highlighter follows:
-
-MoinMoin - Python Source Parser
-
-Title: Colorize Python source using the built-in tokenizer
-
-Submitter: Jurgen Hermann
-Last Updated:2001/04/06
-
-Version no:1.2
-
-Description:
-
-This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
-Python source code to HTML markup, rendering comments, keywords,
-operators, numeric and string literals in different colors.
-
-It shows how to use the built-in keyword, token and tokenize modules to
-scan Python source code and re-emit it with no changes to its original
-formatting (which is the hard part).
-"""
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-__all__ = ['ANSICodeColors','Parser']
-
-_scheme_default = 'Linux'
-
-
-# Imports
-import keyword
-import os
-import sys
-import token
-import tokenize
-
-try:
- generate_tokens = tokenize.generate_tokens
-except AttributeError:
- # Python 3. Note that we use the undocumented _tokenize because it expects
- # strings, not bytes. See also Python issue #9969.
- generate_tokens = tokenize._tokenize
-
-from IPython.utils.coloransi import TermColors, InputTermColors ,ColorScheme, ColorSchemeTable
-from IPython.utils.py3compat import PY3
-
+# -*- coding: utf-8 -*-
+"""
+Class and program to colorize python source code for ANSI terminals.
+
+Based on an HTML code highlighter by Jurgen Hermann found at:
+http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
+
+Modifications by Fernando Perez (fperez@colorado.edu).
+
+Information on the original HTML highlighter follows:
+
+MoinMoin - Python Source Parser
+
+Title: Colorize Python source using the built-in tokenizer
+
+Submitter: Jurgen Hermann
+Last Updated:2001/04/06
+
+Version no:1.2
+
+Description:
+
+This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
+Python source code to HTML markup, rendering comments, keywords,
+operators, numeric and string literals in different colors.
+
+It shows how to use the built-in keyword, token and tokenize modules to
+scan Python source code and re-emit it with no changes to its original
+formatting (which is the hard part).
+"""
+from __future__ import print_function
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+__all__ = ['ANSICodeColors','Parser']
+
+_scheme_default = 'Linux'
+
+
+# Imports
+import keyword
+import os
+import sys
+import token
+import tokenize
+
+try:
+ generate_tokens = tokenize.generate_tokens
+except AttributeError:
+ # Python 3. Note that we use the undocumented _tokenize because it expects
+ # strings, not bytes. See also Python issue #9969.
+ generate_tokens = tokenize._tokenize
+
+from IPython.utils.coloransi import TermColors, InputTermColors ,ColorScheme, ColorSchemeTable
+from IPython.utils.py3compat import PY3
+
from .colorable import Colorable
-if PY3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
-#############################################################################
-### Python Source Parser (does Hilighting)
-#############################################################################
-
-_KEYWORD = token.NT_OFFSET + 1
-_TEXT = token.NT_OFFSET + 2
-
-#****************************************************************************
-# Builtin color schemes
-
-Colors = TermColors # just a shorthand
-
-# Build a few color schemes
-NoColor = ColorScheme(
- 'NoColor',{
- 'header' : Colors.NoColor,
- token.NUMBER : Colors.NoColor,
- token.OP : Colors.NoColor,
- token.STRING : Colors.NoColor,
- tokenize.COMMENT : Colors.NoColor,
- token.NAME : Colors.NoColor,
- token.ERRORTOKEN : Colors.NoColor,
-
- _KEYWORD : Colors.NoColor,
- _TEXT : Colors.NoColor,
-
- 'in_prompt' : InputTermColors.NoColor, # Input prompt
- 'in_number' : InputTermColors.NoColor, # Input prompt number
- 'in_prompt2' : InputTermColors.NoColor, # Continuation prompt
- 'in_normal' : InputTermColors.NoColor, # color off (usu. Colors.Normal)
-
- 'out_prompt' : Colors.NoColor, # Output prompt
- 'out_number' : Colors.NoColor, # Output prompt number
-
- 'normal' : Colors.NoColor # color off (usu. Colors.Normal)
- } )
-
-LinuxColors = ColorScheme(
- 'Linux',{
- 'header' : Colors.LightRed,
- token.NUMBER : Colors.LightCyan,
- token.OP : Colors.Yellow,
- token.STRING : Colors.LightBlue,
- tokenize.COMMENT : Colors.LightRed,
- token.NAME : Colors.Normal,
- token.ERRORTOKEN : Colors.Red,
-
- _KEYWORD : Colors.LightGreen,
- _TEXT : Colors.Yellow,
-
- 'in_prompt' : InputTermColors.Green,
- 'in_number' : InputTermColors.LightGreen,
- 'in_prompt2' : InputTermColors.Green,
- 'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
-
- 'out_prompt' : Colors.Red,
- 'out_number' : Colors.LightRed,
-
- 'normal' : Colors.Normal # color off (usu. Colors.Normal)
- } )
-
+if PY3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+#############################################################################
+### Python Source Parser (does Hilighting)
+#############################################################################
+
+_KEYWORD = token.NT_OFFSET + 1
+_TEXT = token.NT_OFFSET + 2
+
+#****************************************************************************
+# Builtin color schemes
+
+Colors = TermColors # just a shorthand
+
+# Build a few color schemes
+NoColor = ColorScheme(
+ 'NoColor',{
+ 'header' : Colors.NoColor,
+ token.NUMBER : Colors.NoColor,
+ token.OP : Colors.NoColor,
+ token.STRING : Colors.NoColor,
+ tokenize.COMMENT : Colors.NoColor,
+ token.NAME : Colors.NoColor,
+ token.ERRORTOKEN : Colors.NoColor,
+
+ _KEYWORD : Colors.NoColor,
+ _TEXT : Colors.NoColor,
+
+ 'in_prompt' : InputTermColors.NoColor, # Input prompt
+ 'in_number' : InputTermColors.NoColor, # Input prompt number
+ 'in_prompt2' : InputTermColors.NoColor, # Continuation prompt
+ 'in_normal' : InputTermColors.NoColor, # color off (usu. Colors.Normal)
+
+ 'out_prompt' : Colors.NoColor, # Output prompt
+ 'out_number' : Colors.NoColor, # Output prompt number
+
+ 'normal' : Colors.NoColor # color off (usu. Colors.Normal)
+ } )
+
+LinuxColors = ColorScheme(
+ 'Linux',{
+ 'header' : Colors.LightRed,
+ token.NUMBER : Colors.LightCyan,
+ token.OP : Colors.Yellow,
+ token.STRING : Colors.LightBlue,
+ tokenize.COMMENT : Colors.LightRed,
+ token.NAME : Colors.Normal,
+ token.ERRORTOKEN : Colors.Red,
+
+ _KEYWORD : Colors.LightGreen,
+ _TEXT : Colors.Yellow,
+
+ 'in_prompt' : InputTermColors.Green,
+ 'in_number' : InputTermColors.LightGreen,
+ 'in_prompt2' : InputTermColors.Green,
+ 'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
+
+ 'out_prompt' : Colors.Red,
+ 'out_number' : Colors.LightRed,
+
+ 'normal' : Colors.Normal # color off (usu. Colors.Normal)
+ } )
+
NeutralColors = ColorScheme(
'Neutral',{
'header' : Colors.Red,
@@ -156,227 +156,227 @@ NeutralColors = ColorScheme(
if os.name == 'nt':
NeutralColors = LinuxColors.copy(name='Neutral')
-LightBGColors = ColorScheme(
- 'LightBG',{
- 'header' : Colors.Red,
- token.NUMBER : Colors.Cyan,
- token.OP : Colors.Blue,
- token.STRING : Colors.Blue,
- tokenize.COMMENT : Colors.Red,
- token.NAME : Colors.Normal,
- token.ERRORTOKEN : Colors.Red,
-
-
- _KEYWORD : Colors.Green,
- _TEXT : Colors.Blue,
-
- 'in_prompt' : InputTermColors.Blue,
- 'in_number' : InputTermColors.LightBlue,
- 'in_prompt2' : InputTermColors.Blue,
- 'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
-
- 'out_prompt' : Colors.Red,
- 'out_number' : Colors.LightRed,
-
- 'normal' : Colors.Normal # color off (usu. Colors.Normal)
- } )
-
-# Build table of color schemes (needed by the parser)
+LightBGColors = ColorScheme(
+ 'LightBG',{
+ 'header' : Colors.Red,
+ token.NUMBER : Colors.Cyan,
+ token.OP : Colors.Blue,
+ token.STRING : Colors.Blue,
+ tokenize.COMMENT : Colors.Red,
+ token.NAME : Colors.Normal,
+ token.ERRORTOKEN : Colors.Red,
+
+
+ _KEYWORD : Colors.Green,
+ _TEXT : Colors.Blue,
+
+ 'in_prompt' : InputTermColors.Blue,
+ 'in_number' : InputTermColors.LightBlue,
+ 'in_prompt2' : InputTermColors.Blue,
+ 'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
+
+ 'out_prompt' : Colors.Red,
+ 'out_number' : Colors.LightRed,
+
+ 'normal' : Colors.Normal # color off (usu. Colors.Normal)
+ } )
+
+# Build table of color schemes (needed by the parser)
ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors, NeutralColors],
- _scheme_default)
-
+ _scheme_default)
+
class Parser(Colorable):
- """ Format colored Python source.
- """
-
+ """ Format colored Python source.
+ """
+
def __init__(self, color_table=None, out = sys.stdout, parent=None, style=None):
- """ Create a parser with a specified color table and output channel.
-
- Call format() to process code.
- """
+ """ Create a parser with a specified color table and output channel.
+
+ Call format() to process code.
+ """
super(Parser, self).__init__(parent=parent)
- self.color_table = color_table and color_table or ANSICodeColors
- self.out = out
-
- def format(self, raw, out = None, scheme = ''):
- return self.format2(raw, out, scheme)[0]
-
- def format2(self, raw, out = None, scheme = ''):
- """ Parse and send the colored source.
-
- If out and scheme are not specified, the defaults (given to
- constructor) are used.
-
- out should be a file-type object. Optionally, out can be given as the
- string 'str' and the parser will automatically return the output in a
- string."""
-
- string_output = 0
- if out == 'str' or self.out == 'str' or \
- isinstance(self.out,StringIO):
- # XXX - I don't really like this state handling logic, but at this
- # point I don't want to make major changes, so adding the
- # isinstance() check is the simplest I can do to ensure correct
- # behavior.
- out_old = self.out
- self.out = StringIO()
- string_output = 1
- elif out is not None:
- self.out = out
-
- # Fast return of the unmodified input for NoColor scheme
- if scheme == 'NoColor':
- error = False
- self.out.write(raw)
- if string_output:
- return raw,error
- else:
- return None,error
-
- # local shorthands
- colors = self.color_table[scheme].colors
- self.colors = colors # put in object so __call__ sees it
-
- # Remove trailing whitespace and normalize tabs
- self.raw = raw.expandtabs().rstrip()
-
- # store line offsets in self.lines
- self.lines = [0, 0]
- pos = 0
- raw_find = self.raw.find
- lines_append = self.lines.append
- while 1:
- pos = raw_find('\n', pos) + 1
- if not pos: break
- lines_append(pos)
- lines_append(len(self.raw))
-
- # parse the source and write it
- self.pos = 0
- text = StringIO(self.raw)
-
- error = False
- try:
- for atoken in generate_tokens(text.readline):
- self(*atoken)
- except tokenize.TokenError as ex:
- msg = ex.args[0]
- line = ex.args[1][0]
- self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
- (colors[token.ERRORTOKEN],
- msg, self.raw[self.lines[line]:],
- colors.normal)
- )
- error = True
- self.out.write(colors.normal+'\n')
- if string_output:
- output = self.out.getvalue()
- self.out = out_old
- return (output, error)
- return (None, error)
-
- def __call__(self, toktype, toktext, start_pos, end_pos, line):
- """ Token handler, with syntax highlighting."""
- (srow,scol) = start_pos
- (erow,ecol) = end_pos
- colors = self.colors
- owrite = self.out.write
-
- # line separator, so this works across platforms
- linesep = os.linesep
-
- # calculate new positions
- oldpos = self.pos
- newpos = self.lines[srow] + scol
- self.pos = newpos + len(toktext)
-
- # send the original whitespace, if needed
- if newpos > oldpos:
- owrite(self.raw[oldpos:newpos])
-
- # skip indenting tokens
- if toktype in [token.INDENT, token.DEDENT]:
- self.pos = newpos
- return
-
- # map token type to a color group
- if token.LPAR <= toktype <= token.OP:
- toktype = token.OP
- elif toktype == token.NAME and keyword.iskeyword(toktext):
- toktype = _KEYWORD
- color = colors.get(toktype, colors[_TEXT])
-
- #print '<%s>' % toktext, # dbg
-
- # Triple quoted strings must be handled carefully so that backtracking
- # in pagers works correctly. We need color terminators on _each_ line.
- if linesep in toktext:
- toktext = toktext.replace(linesep, '%s%s%s' %
- (colors.normal,linesep,color))
-
- # send text
- owrite('%s%s%s' % (color,toktext,colors.normal))
-
-def main(argv=None):
- """Run as a command-line script: colorize a python file or stdin using ANSI
- color escapes and print to stdout.
-
- Inputs:
-
- - argv(None): a list of strings like sys.argv[1:] giving the command-line
- arguments. If None, use sys.argv[1:].
- """
-
- usage_msg = """%prog [options] [filename]
-
-Colorize a python file or stdin using ANSI color escapes and print to stdout.
-If no filename is given, or if filename is -, read standard input."""
-
- import optparse
- parser = optparse.OptionParser(usage=usage_msg)
- newopt = parser.add_option
- newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store',
- choices=['Linux','LightBG','NoColor'],default=_scheme_default,
- help="give the color scheme to use. Currently only 'Linux'\
- (default) and 'LightBG' and 'NoColor' are implemented (give without\
- quotes)")
-
- opts,args = parser.parse_args(argv)
-
- if len(args) > 1:
- parser.error("you must give at most one filename.")
-
- if len(args) == 0:
- fname = '-' # no filename given; setup to read from stdin
- else:
- fname = args[0]
-
- if fname == '-':
- stream = sys.stdin
- else:
- try:
- stream = open(fname)
- except IOError as msg:
- print(msg, file=sys.stderr)
- sys.exit(1)
-
- parser = Parser()
-
- # we need nested try blocks because pre-2.5 python doesn't support unified
- # try-except-finally
- try:
- try:
- # write colorized version to stdout
- parser.format(stream.read(),scheme=opts.scheme_name)
- except IOError as msg:
- # if user reads through a pager and quits, don't print traceback
- if msg.args != (32,'Broken pipe'):
- raise
- finally:
- if stream is not sys.stdin:
- stream.close() # in case a non-handled exception happened above
-
-if __name__ == "__main__":
- main()
+ self.color_table = color_table and color_table or ANSICodeColors
+ self.out = out
+
+ def format(self, raw, out = None, scheme = ''):
+ return self.format2(raw, out, scheme)[0]
+
+ def format2(self, raw, out = None, scheme = ''):
+ """ Parse and send the colored source.
+
+ If out and scheme are not specified, the defaults (given to
+ constructor) are used.
+
+ out should be a file-type object. Optionally, out can be given as the
+ string 'str' and the parser will automatically return the output in a
+ string."""
+
+ string_output = 0
+ if out == 'str' or self.out == 'str' or \
+ isinstance(self.out,StringIO):
+ # XXX - I don't really like this state handling logic, but at this
+ # point I don't want to make major changes, so adding the
+ # isinstance() check is the simplest I can do to ensure correct
+ # behavior.
+ out_old = self.out
+ self.out = StringIO()
+ string_output = 1
+ elif out is not None:
+ self.out = out
+
+ # Fast return of the unmodified input for NoColor scheme
+ if scheme == 'NoColor':
+ error = False
+ self.out.write(raw)
+ if string_output:
+ return raw,error
+ else:
+ return None,error
+
+ # local shorthands
+ colors = self.color_table[scheme].colors
+ self.colors = colors # put in object so __call__ sees it
+
+ # Remove trailing whitespace and normalize tabs
+ self.raw = raw.expandtabs().rstrip()
+
+ # store line offsets in self.lines
+ self.lines = [0, 0]
+ pos = 0
+ raw_find = self.raw.find
+ lines_append = self.lines.append
+ while 1:
+ pos = raw_find('\n', pos) + 1
+ if not pos: break
+ lines_append(pos)
+ lines_append(len(self.raw))
+
+ # parse the source and write it
+ self.pos = 0
+ text = StringIO(self.raw)
+
+ error = False
+ try:
+ for atoken in generate_tokens(text.readline):
+ self(*atoken)
+ except tokenize.TokenError as ex:
+ msg = ex.args[0]
+ line = ex.args[1][0]
+ self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
+ (colors[token.ERRORTOKEN],
+ msg, self.raw[self.lines[line]:],
+ colors.normal)
+ )
+ error = True
+ self.out.write(colors.normal+'\n')
+ if string_output:
+ output = self.out.getvalue()
+ self.out = out_old
+ return (output, error)
+ return (None, error)
+
+ def __call__(self, toktype, toktext, start_pos, end_pos, line):
+ """ Token handler, with syntax highlighting."""
+ (srow,scol) = start_pos
+ (erow,ecol) = end_pos
+ colors = self.colors
+ owrite = self.out.write
+
+ # line separator, so this works across platforms
+ linesep = os.linesep
+
+ # calculate new positions
+ oldpos = self.pos
+ newpos = self.lines[srow] + scol
+ self.pos = newpos + len(toktext)
+
+ # send the original whitespace, if needed
+ if newpos > oldpos:
+ owrite(self.raw[oldpos:newpos])
+
+ # skip indenting tokens
+ if toktype in [token.INDENT, token.DEDENT]:
+ self.pos = newpos
+ return
+
+ # map token type to a color group
+ if token.LPAR <= toktype <= token.OP:
+ toktype = token.OP
+ elif toktype == token.NAME and keyword.iskeyword(toktext):
+ toktype = _KEYWORD
+ color = colors.get(toktype, colors[_TEXT])
+
+ #print '<%s>' % toktext, # dbg
+
+ # Triple quoted strings must be handled carefully so that backtracking
+ # in pagers works correctly. We need color terminators on _each_ line.
+ if linesep in toktext:
+ toktext = toktext.replace(linesep, '%s%s%s' %
+ (colors.normal,linesep,color))
+
+ # send text
+ owrite('%s%s%s' % (color,toktext,colors.normal))
+
+def main(argv=None):
+ """Run as a command-line script: colorize a python file or stdin using ANSI
+ color escapes and print to stdout.
+
+ Inputs:
+
+ - argv(None): a list of strings like sys.argv[1:] giving the command-line
+ arguments. If None, use sys.argv[1:].
+ """
+
+ usage_msg = """%prog [options] [filename]
+
+Colorize a python file or stdin using ANSI color escapes and print to stdout.
+If no filename is given, or if filename is -, read standard input."""
+
+ import optparse
+ parser = optparse.OptionParser(usage=usage_msg)
+ newopt = parser.add_option
+ newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store',
+ choices=['Linux','LightBG','NoColor'],default=_scheme_default,
+ help="give the color scheme to use. Currently only 'Linux'\
+ (default) and 'LightBG' and 'NoColor' are implemented (give without\
+ quotes)")
+
+ opts,args = parser.parse_args(argv)
+
+ if len(args) > 1:
+ parser.error("you must give at most one filename.")
+
+ if len(args) == 0:
+ fname = '-' # no filename given; setup to read from stdin
+ else:
+ fname = args[0]
+
+ if fname == '-':
+ stream = sys.stdin
+ else:
+ try:
+ stream = open(fname)
+ except IOError as msg:
+ print(msg, file=sys.stderr)
+ sys.exit(1)
+
+ parser = Parser()
+
+ # we need nested try blocks because pre-2.5 python doesn't support unified
+ # try-except-finally
+ try:
+ try:
+ # write colorized version to stdout
+ parser.format(stream.read(),scheme=opts.scheme_name)
+ except IOError as msg:
+ # if user reads through a pager and quits, don't print traceback
+ if msg.args != (32,'Broken pipe'):
+ raise
+ finally:
+ if stream is not sys.stdin:
+ stream.close() # in case a non-handled exception happened above
+
+if __name__ == "__main__":
+ main()
diff --git a/contrib/python/ipython/py2/IPython/utils/_process_cli.py b/contrib/python/ipython/py2/IPython/utils/_process_cli.py
index a65decf3b6..a7b7b90b68 100644
--- a/contrib/python/ipython/py2/IPython/utils/_process_cli.py
+++ b/contrib/python/ipython/py2/IPython/utils/_process_cli.py
@@ -1,78 +1,78 @@
-"""cli-specific implementation of process utilities.
-
-cli - Common Language Infrastructure for IronPython. Code
- can run on any operating system. Check os.name for os-
- specific settings.
-
-This file is only meant to be imported by process.py, not by end-users.
-
-This file is largely untested. To become a full drop-in process
-interface for IronPython will probably require you to help fill
-in the details.
-"""
-
-# Import cli libraries:
-import clr
-import System
-
-# Import Python libraries:
-import os
-
-# Import IPython libraries:
-from IPython.utils import py3compat
-from ._process_common import arg_split
-
-def _find_cmd(cmd):
- """Find the full path to a command using which."""
- paths = System.Environment.GetEnvironmentVariable("PATH").Split(os.pathsep)
- for path in paths:
- filename = os.path.join(path, cmd)
- if System.IO.File.Exists(filename):
- return py3compat.bytes_to_str(filename)
- raise OSError("command %r not found" % cmd)
-
-def system(cmd):
- """
- system(cmd) should work in a cli environment on Mac OSX, Linux,
- and Windows
- """
- psi = System.Diagnostics.ProcessStartInfo(cmd)
- psi.RedirectStandardOutput = True
- psi.RedirectStandardError = True
- psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
- psi.UseShellExecute = False
- # Start up process:
- reg = System.Diagnostics.Process.Start(psi)
-
-def getoutput(cmd):
- """
- getoutput(cmd) should work in a cli environment on Mac OSX, Linux,
- and Windows
- """
- psi = System.Diagnostics.ProcessStartInfo(cmd)
- psi.RedirectStandardOutput = True
- psi.RedirectStandardError = True
- psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
- psi.UseShellExecute = False
- # Start up process:
- reg = System.Diagnostics.Process.Start(psi)
- myOutput = reg.StandardOutput
- output = myOutput.ReadToEnd()
- myError = reg.StandardError
- error = myError.ReadToEnd()
- return output
-
-def check_pid(pid):
- """
- Check if a process with the given PID (pid) exists
- """
- try:
- System.Diagnostics.Process.GetProcessById(pid)
- # process with given pid is running
- return True
- except System.InvalidOperationException:
- # process wasn't started by this object (but is running)
- return True
- except System.ArgumentException:
- # process with given pid isn't running
- return False
+"""cli-specific implementation of process utilities.
+
+cli - Common Language Infrastructure for IronPython. Code
+ can run on any operating system. Check os.name for os-
+ specific settings.
+
+This file is only meant to be imported by process.py, not by end-users.
+
+This file is largely untested. To become a full drop-in process
+interface for IronPython will probably require you to help fill
+in the details.
+"""
+
+# Import cli libraries:
+import clr
+import System
+
+# Import Python libraries:
+import os
+
+# Import IPython libraries:
+from IPython.utils import py3compat
+from ._process_common import arg_split
+
+def _find_cmd(cmd):
+ """Find the full path to a command using which."""
+ paths = System.Environment.GetEnvironmentVariable("PATH").Split(os.pathsep)
+ for path in paths:
+ filename = os.path.join(path, cmd)
+ if System.IO.File.Exists(filename):
+ return py3compat.bytes_to_str(filename)
+ raise OSError("command %r not found" % cmd)
+
+def system(cmd):
+ """
+ system(cmd) should work in a cli environment on Mac OSX, Linux,
+ and Windows
+ """
+ psi = System.Diagnostics.ProcessStartInfo(cmd)
+ psi.RedirectStandardOutput = True
+ psi.RedirectStandardError = True
+ psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
+ psi.UseShellExecute = False
+ # Start up process:
+ reg = System.Diagnostics.Process.Start(psi)
+
+def getoutput(cmd):
+ """
+ getoutput(cmd) should work in a cli environment on Mac OSX, Linux,
+ and Windows
+ """
+ psi = System.Diagnostics.ProcessStartInfo(cmd)
+ psi.RedirectStandardOutput = True
+ psi.RedirectStandardError = True
+ psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
+ psi.UseShellExecute = False
+ # Start up process:
+ reg = System.Diagnostics.Process.Start(psi)
+ myOutput = reg.StandardOutput
+ output = myOutput.ReadToEnd()
+ myError = reg.StandardError
+ error = myError.ReadToEnd()
+ return output
+
+def check_pid(pid):
+ """
+ Check if a process with the given PID (pid) exists
+ """
+ try:
+ System.Diagnostics.Process.GetProcessById(pid)
+ # process with given pid is running
+ return True
+ except System.InvalidOperationException:
+ # process wasn't started by this object (but is running)
+ return True
+ except System.ArgumentException:
+ # process with given pid isn't running
+ return False
diff --git a/contrib/python/ipython/py2/IPython/utils/_process_common.py b/contrib/python/ipython/py2/IPython/utils/_process_common.py
index 6851e41869..9ede30d3f8 100644
--- a/contrib/python/ipython/py2/IPython/utils/_process_common.py
+++ b/contrib/python/ipython/py2/IPython/utils/_process_common.py
@@ -1,75 +1,75 @@
-"""Common utilities for the various process_* implementations.
-
-This file is only meant to be imported by the platform-specific implementations
-of subprocess utilities, and it contains tools that are common to all of them.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2010-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-import subprocess
-import shlex
-import sys
+"""Common utilities for the various process_* implementations.
+
+This file is only meant to be imported by the platform-specific implementations
+of subprocess utilities, and it contains tools that are common to all of them.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+import subprocess
+import shlex
+import sys
import os
-
-from IPython.utils import py3compat
-
-#-----------------------------------------------------------------------------
-# Function definitions
-#-----------------------------------------------------------------------------
-
-def read_no_interrupt(p):
- """Read from a pipe ignoring EINTR errors.
-
- This is necessary because when reading from pipes with GUI event loops
- running in the background, often interrupts are raised that stop the
- command from completing."""
- import errno
-
- try:
- return p.read()
- except IOError as err:
- if err.errno != errno.EINTR:
- raise
-
-
-def process_handler(cmd, callback, stderr=subprocess.PIPE):
- """Open a command in a shell subprocess and execute a callback.
-
- This function provides common scaffolding for creating subprocess.Popen()
- calls. It creates a Popen object and then calls the callback with it.
-
- Parameters
- ----------
- cmd : str or list
- A command to be executed by the system, using :class:`subprocess.Popen`.
- If a string is passed, it will be run in the system shell. If a list is
- passed, it will be used directly as arguments.
-
- callback : callable
- A one-argument function that will be called with the Popen object.
-
- stderr : file descriptor number, optional
- By default this is set to ``subprocess.PIPE``, but you can also pass the
- value ``subprocess.STDOUT`` to force the subprocess' stderr to go into
- the same file descriptor as its stdout. This is useful to read stdout
- and stderr combined in the order they are generated.
-
- Returns
- -------
- The return value of the provided callback is returned.
- """
- sys.stdout.flush()
- sys.stderr.flush()
- # On win32, close_fds can't be true when using pipes for stdin/out/err
- close_fds = sys.platform != 'win32'
+
+from IPython.utils import py3compat
+
+#-----------------------------------------------------------------------------
+# Function definitions
+#-----------------------------------------------------------------------------
+
+def read_no_interrupt(p):
+ """Read from a pipe ignoring EINTR errors.
+
+ This is necessary because when reading from pipes with GUI event loops
+ running in the background, often interrupts are raised that stop the
+ command from completing."""
+ import errno
+
+ try:
+ return p.read()
+ except IOError as err:
+ if err.errno != errno.EINTR:
+ raise
+
+
+def process_handler(cmd, callback, stderr=subprocess.PIPE):
+ """Open a command in a shell subprocess and execute a callback.
+
+ This function provides common scaffolding for creating subprocess.Popen()
+ calls. It creates a Popen object and then calls the callback with it.
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed by the system, using :class:`subprocess.Popen`.
+ If a string is passed, it will be run in the system shell. If a list is
+ passed, it will be used directly as arguments.
+
+ callback : callable
+ A one-argument function that will be called with the Popen object.
+
+ stderr : file descriptor number, optional
+ By default this is set to ``subprocess.PIPE``, but you can also pass the
+ value ``subprocess.STDOUT`` to force the subprocess' stderr to go into
+ the same file descriptor as its stdout. This is useful to read stdout
+ and stderr combined in the order they are generated.
+
+ Returns
+ -------
+ The return value of the provided callback is returned.
+ """
+ sys.stdout.flush()
+ sys.stderr.flush()
+ # On win32, close_fds can't be true when using pipes for stdin/out/err
+ close_fds = sys.platform != 'win32'
# Determine if cmd should be run with system shell.
shell = isinstance(cmd, py3compat.string_types)
# On POSIX systems run shell commands with user-preferred shell.
@@ -78,146 +78,146 @@ def process_handler(cmd, callback, stderr=subprocess.PIPE):
executable = os.environ['SHELL']
p = subprocess.Popen(cmd, shell=shell,
executable=executable,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=stderr,
- close_fds=close_fds)
-
- try:
- out = callback(p)
- except KeyboardInterrupt:
- print('^C')
- sys.stdout.flush()
- sys.stderr.flush()
- out = None
- finally:
- # Make really sure that we don't leave processes behind, in case the
- # call above raises an exception
- # We start by assuming the subprocess finished (to avoid NameErrors
- # later depending on the path taken)
- if p.returncode is None:
- try:
- p.terminate()
- p.poll()
- except OSError:
- pass
- # One last try on our way out
- if p.returncode is None:
- try:
- p.kill()
- except OSError:
- pass
-
- return out
-
-
-def getoutput(cmd):
- """Run a command and return its stdout/stderr as a string.
-
- Parameters
- ----------
- cmd : str or list
- A command to be executed in the system shell.
-
- Returns
- -------
- output : str
- A string containing the combination of stdout and stderr from the
- subprocess, in whatever order the subprocess originally wrote to its
- file descriptors (so the order of the information in this string is the
- correct order as would be seen if running the command in a terminal).
- """
- out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT)
- if out is None:
- return ''
- return py3compat.bytes_to_str(out)
-
-
-def getoutputerror(cmd):
- """Return (standard output, standard error) of executing cmd in a shell.
-
- Accepts the same arguments as os.system().
-
- Parameters
- ----------
- cmd : str or list
- A command to be executed in the system shell.
-
- Returns
- -------
- stdout : str
- stderr : str
- """
- return get_output_error_code(cmd)[:2]
-
-def get_output_error_code(cmd):
- """Return (standard output, standard error, return code) of executing cmd
- in a shell.
-
- Accepts the same arguments as os.system().
-
- Parameters
- ----------
- cmd : str or list
- A command to be executed in the system shell.
-
- Returns
- -------
- stdout : str
- stderr : str
- returncode: int
- """
-
- out_err, p = process_handler(cmd, lambda p: (p.communicate(), p))
- if out_err is None:
- return '', '', p.returncode
- out, err = out_err
- return py3compat.bytes_to_str(out), py3compat.bytes_to_str(err), p.returncode
-
-def arg_split(s, posix=False, strict=True):
- """Split a command line's arguments in a shell-like manner.
-
- This is a modified version of the standard library's shlex.split()
- function, but with a default of posix=False for splitting, so that quotes
- in inputs are respected.
-
- if strict=False, then any errors shlex.split would raise will result in the
- unparsed remainder being the last element of the list, rather than raising.
- This is because we sometimes use arg_split to parse things other than
- command-line args.
- """
-
- # Unfortunately, python's shlex module is buggy with unicode input:
- # http://bugs.python.org/issue1170
- # At least encoding the input when it's unicode seems to help, but there
- # may be more problems lurking. Apparently this is fixed in python3.
- is_unicode = False
- if (not py3compat.PY3) and isinstance(s, unicode):
- is_unicode = True
- s = s.encode('utf-8')
- lex = shlex.shlex(s, posix=posix)
- lex.whitespace_split = True
- # Extract tokens, ensuring that things like leaving open quotes
- # does not cause this to raise. This is important, because we
- # sometimes pass Python source through this (e.g. %timeit f(" ")),
- # and it shouldn't raise an exception.
- # It may be a bad idea to parse things that are not command-line args
- # through this function, but we do, so let's be safe about it.
- lex.commenters='' #fix for GH-1269
- tokens = []
- while True:
- try:
- tokens.append(next(lex))
- except StopIteration:
- break
- except ValueError:
- if strict:
- raise
- # couldn't parse, get remaining blob as last token
- tokens.append(lex.token)
- break
-
- if is_unicode:
- # Convert the tokens back to unicode.
- tokens = [x.decode('utf-8') for x in tokens]
- return tokens
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=stderr,
+ close_fds=close_fds)
+
+ try:
+ out = callback(p)
+ except KeyboardInterrupt:
+ print('^C')
+ sys.stdout.flush()
+ sys.stderr.flush()
+ out = None
+ finally:
+ # Make really sure that we don't leave processes behind, in case the
+ # call above raises an exception
+ # We start by assuming the subprocess finished (to avoid NameErrors
+ # later depending on the path taken)
+ if p.returncode is None:
+ try:
+ p.terminate()
+ p.poll()
+ except OSError:
+ pass
+ # One last try on our way out
+ if p.returncode is None:
+ try:
+ p.kill()
+ except OSError:
+ pass
+
+ return out
+
+
+def getoutput(cmd):
+ """Run a command and return its stdout/stderr as a string.
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ output : str
+ A string containing the combination of stdout and stderr from the
+ subprocess, in whatever order the subprocess originally wrote to its
+ file descriptors (so the order of the information in this string is the
+ correct order as would be seen if running the command in a terminal).
+ """
+ out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT)
+ if out is None:
+ return ''
+ return py3compat.bytes_to_str(out)
+
+
+def getoutputerror(cmd):
+ """Return (standard output, standard error) of executing cmd in a shell.
+
+ Accepts the same arguments as os.system().
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ stdout : str
+ stderr : str
+ """
+ return get_output_error_code(cmd)[:2]
+
+def get_output_error_code(cmd):
+ """Return (standard output, standard error, return code) of executing cmd
+ in a shell.
+
+ Accepts the same arguments as os.system().
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ stdout : str
+ stderr : str
+ returncode: int
+ """
+
+ out_err, p = process_handler(cmd, lambda p: (p.communicate(), p))
+ if out_err is None:
+ return '', '', p.returncode
+ out, err = out_err
+ return py3compat.bytes_to_str(out), py3compat.bytes_to_str(err), p.returncode
+
+def arg_split(s, posix=False, strict=True):
+ """Split a command line's arguments in a shell-like manner.
+
+ This is a modified version of the standard library's shlex.split()
+ function, but with a default of posix=False for splitting, so that quotes
+ in inputs are respected.
+
+ if strict=False, then any errors shlex.split would raise will result in the
+ unparsed remainder being the last element of the list, rather than raising.
+ This is because we sometimes use arg_split to parse things other than
+ command-line args.
+ """
+
+ # Unfortunately, python's shlex module is buggy with unicode input:
+ # http://bugs.python.org/issue1170
+ # At least encoding the input when it's unicode seems to help, but there
+ # may be more problems lurking. Apparently this is fixed in python3.
+ is_unicode = False
+ if (not py3compat.PY3) and isinstance(s, unicode):
+ is_unicode = True
+ s = s.encode('utf-8')
+ lex = shlex.shlex(s, posix=posix)
+ lex.whitespace_split = True
+ # Extract tokens, ensuring that things like leaving open quotes
+ # does not cause this to raise. This is important, because we
+ # sometimes pass Python source through this (e.g. %timeit f(" ")),
+ # and it shouldn't raise an exception.
+ # It may be a bad idea to parse things that are not command-line args
+ # through this function, but we do, so let's be safe about it.
+ lex.commenters='' #fix for GH-1269
+ tokens = []
+ while True:
+ try:
+ tokens.append(next(lex))
+ except StopIteration:
+ break
+ except ValueError:
+ if strict:
+ raise
+ # couldn't parse, get remaining blob as last token
+ tokens.append(lex.token)
+ break
+
+ if is_unicode:
+ # Convert the tokens back to unicode.
+ tokens = [x.decode('utf-8') for x in tokens]
+ return tokens
diff --git a/contrib/python/ipython/py2/IPython/utils/_process_posix.py b/contrib/python/ipython/py2/IPython/utils/_process_posix.py
index 059e80c991..ac3a9a0507 100644
--- a/contrib/python/ipython/py2/IPython/utils/_process_posix.py
+++ b/contrib/python/ipython/py2/IPython/utils/_process_posix.py
@@ -1,225 +1,225 @@
-"""Posix-specific implementation of process utilities.
-
-This file is only meant to be imported by process.py, not by end-users.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2010-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-from __future__ import print_function
-
-# Stdlib
-import errno
-import os
-import subprocess as sp
-import sys
-
-import pexpect
-
-# Our own
-from ._process_common import getoutput, arg_split
-from IPython.utils import py3compat
-from IPython.utils.encoding import DEFAULT_ENCODING
-
-#-----------------------------------------------------------------------------
-# Function definitions
-#-----------------------------------------------------------------------------
-
-def _find_cmd(cmd):
- """Find the full path to a command using which."""
-
- path = sp.Popen(['/usr/bin/env', 'which', cmd],
- stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0]
- return py3compat.bytes_to_str(path)
-
-
-class ProcessHandler(object):
- """Execute subprocesses under the control of pexpect.
- """
- # Timeout in seconds to wait on each reading of the subprocess' output.
- # This should not be set too low to avoid cpu overusage from our side,
- # since we read in a loop whose period is controlled by this timeout.
- read_timeout = 0.05
-
- # Timeout to give a process if we receive SIGINT, between sending the
- # SIGINT to the process and forcefully terminating it.
- terminate_timeout = 0.2
-
- # File object where stdout and stderr of the subprocess will be written
- logfile = None
-
- # Shell to call for subprocesses to execute
- _sh = None
-
- @property
- def sh(self):
- if self._sh is None:
- self._sh = pexpect.which('sh')
- if self._sh is None:
- raise OSError('"sh" shell not found')
-
- return self._sh
-
- def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None):
- """Arguments are used for pexpect calls."""
- self.read_timeout = (ProcessHandler.read_timeout if read_timeout is
- None else read_timeout)
- self.terminate_timeout = (ProcessHandler.terminate_timeout if
- terminate_timeout is None else
- terminate_timeout)
- self.logfile = sys.stdout if logfile is None else logfile
-
- def getoutput(self, cmd):
- """Run a command and return its stdout/stderr as a string.
-
- Parameters
- ----------
- cmd : str
- A command to be executed in the system shell.
-
- Returns
- -------
- output : str
- A string containing the combination of stdout and stderr from the
- subprocess, in whatever order the subprocess originally wrote to its
- file descriptors (so the order of the information in this string is the
- correct order as would be seen if running the command in a terminal).
- """
- try:
- return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n')
- except KeyboardInterrupt:
- print('^C', file=sys.stderr, end='')
-
- def getoutput_pexpect(self, cmd):
- """Run a command and return its stdout/stderr as a string.
-
- Parameters
- ----------
- cmd : str
- A command to be executed in the system shell.
-
- Returns
- -------
- output : str
- A string containing the combination of stdout and stderr from the
- subprocess, in whatever order the subprocess originally wrote to its
- file descriptors (so the order of the information in this string is the
- correct order as would be seen if running the command in a terminal).
- """
- try:
- return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n')
- except KeyboardInterrupt:
- print('^C', file=sys.stderr, end='')
-
- def system(self, cmd):
- """Execute a command in a subshell.
-
- Parameters
- ----------
- cmd : str
- A command to be executed in the system shell.
-
- Returns
- -------
- int : child's exitstatus
- """
- # Get likely encoding for the output.
- enc = DEFAULT_ENCODING
-
- # Patterns to match on the output, for pexpect. We read input and
- # allow either a short timeout or EOF
- patterns = [pexpect.TIMEOUT, pexpect.EOF]
- # the index of the EOF pattern in the list.
- # even though we know it's 1, this call means we don't have to worry if
- # we change the above list, and forget to change this value:
- EOF_index = patterns.index(pexpect.EOF)
- # The size of the output stored so far in the process output buffer.
- # Since pexpect only appends to this buffer, each time we print we
- # record how far we've printed, so that next time we only print *new*
- # content from the buffer.
- out_size = 0
- try:
- # Since we're not really searching the buffer for text patterns, we
- # can set pexpect's search window to be tiny and it won't matter.
- # We only search for the 'patterns' timeout or EOF, which aren't in
- # the text itself.
- #child = pexpect.spawn(pcmd, searchwindowsize=1)
- if hasattr(pexpect, 'spawnb'):
- child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U
- else:
- child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect
- flush = sys.stdout.flush
- while True:
- # res is the index of the pattern that caused the match, so we
- # know whether we've finished (if we matched EOF) or not
- res_idx = child.expect_list(patterns, self.read_timeout)
- print(child.before[out_size:].decode(enc, 'replace'), end='')
- flush()
- if res_idx==EOF_index:
- break
- # Update the pointer to what we've already printed
- out_size = len(child.before)
- except KeyboardInterrupt:
- # We need to send ^C to the process. The ascii code for '^C' is 3
- # (the character is known as ETX for 'End of Text', see
- # curses.ascii.ETX).
- child.sendline(chr(3))
- # Read and print any more output the program might produce on its
- # way out.
- try:
- out_size = len(child.before)
- child.expect_list(patterns, self.terminate_timeout)
- print(child.before[out_size:].decode(enc, 'replace'), end='')
- sys.stdout.flush()
- except KeyboardInterrupt:
- # Impatient users tend to type it multiple times
- pass
- finally:
- # Ensure the subprocess really is terminated
- child.terminate(force=True)
- # add isalive check, to ensure exitstatus is set:
- child.isalive()
-
- # We follow the subprocess pattern, returning either the exit status
- # as a positive number, or the terminating signal as a negative
- # number.
- # on Linux, sh returns 128+n for signals terminating child processes on Linux
- # on BSD (OS X), the signal code is set instead
- if child.exitstatus is None:
- # on WIFSIGNALED, pexpect sets signalstatus, leaving exitstatus=None
- if child.signalstatus is None:
- # this condition may never occur,
- # but let's be certain we always return an integer.
- return 0
- return -child.signalstatus
- if child.exitstatus > 128:
- return -(child.exitstatus - 128)
- return child.exitstatus
-
-
-# Make system() with a functional interface for outside use. Note that we use
-# getoutput() from the _common utils, which is built on top of popen(). Using
-# pexpect to get subprocess output produces difficult to parse output, since
-# programs think they are talking to a tty and produce highly formatted output
-# (ls is a good example) that makes them hard.
-system = ProcessHandler().system
-
-def check_pid(pid):
- try:
- os.kill(pid, 0)
- except OSError as err:
- if err.errno == errno.ESRCH:
- return False
- elif err.errno == errno.EPERM:
- # Don't have permission to signal the process - probably means it exists
- return True
- raise
- else:
- return True
+"""Posix-specific implementation of process utilities.
+
+This file is only meant to be imported by process.py, not by end-users.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+from __future__ import print_function
+
+# Stdlib
+import errno
+import os
+import subprocess as sp
+import sys
+
+import pexpect
+
+# Our own
+from ._process_common import getoutput, arg_split
+from IPython.utils import py3compat
+from IPython.utils.encoding import DEFAULT_ENCODING
+
+#-----------------------------------------------------------------------------
+# Function definitions
+#-----------------------------------------------------------------------------
+
+def _find_cmd(cmd):
+ """Find the full path to a command using which."""
+
+ path = sp.Popen(['/usr/bin/env', 'which', cmd],
+ stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0]
+ return py3compat.bytes_to_str(path)
+
+
+class ProcessHandler(object):
+ """Execute subprocesses under the control of pexpect.
+ """
+ # Timeout in seconds to wait on each reading of the subprocess' output.
+ # This should not be set too low to avoid cpu overusage from our side,
+ # since we read in a loop whose period is controlled by this timeout.
+ read_timeout = 0.05
+
+ # Timeout to give a process if we receive SIGINT, between sending the
+ # SIGINT to the process and forcefully terminating it.
+ terminate_timeout = 0.2
+
+ # File object where stdout and stderr of the subprocess will be written
+ logfile = None
+
+ # Shell to call for subprocesses to execute
+ _sh = None
+
+ @property
+ def sh(self):
+ if self._sh is None:
+ self._sh = pexpect.which('sh')
+ if self._sh is None:
+ raise OSError('"sh" shell not found')
+
+ return self._sh
+
+ def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None):
+ """Arguments are used for pexpect calls."""
+ self.read_timeout = (ProcessHandler.read_timeout if read_timeout is
+ None else read_timeout)
+ self.terminate_timeout = (ProcessHandler.terminate_timeout if
+ terminate_timeout is None else
+ terminate_timeout)
+ self.logfile = sys.stdout if logfile is None else logfile
+
+ def getoutput(self, cmd):
+ """Run a command and return its stdout/stderr as a string.
+
+ Parameters
+ ----------
+ cmd : str
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ output : str
+ A string containing the combination of stdout and stderr from the
+ subprocess, in whatever order the subprocess originally wrote to its
+ file descriptors (so the order of the information in this string is the
+ correct order as would be seen if running the command in a terminal).
+ """
+ try:
+ return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n')
+ except KeyboardInterrupt:
+ print('^C', file=sys.stderr, end='')
+
+ def getoutput_pexpect(self, cmd):
+ """Run a command and return its stdout/stderr as a string.
+
+ Parameters
+ ----------
+ cmd : str
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ output : str
+ A string containing the combination of stdout and stderr from the
+ subprocess, in whatever order the subprocess originally wrote to its
+ file descriptors (so the order of the information in this string is the
+ correct order as would be seen if running the command in a terminal).
+ """
+ try:
+ return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n')
+ except KeyboardInterrupt:
+ print('^C', file=sys.stderr, end='')
+
+ def system(self, cmd):
+ """Execute a command in a subshell.
+
+ Parameters
+ ----------
+ cmd : str
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ int : child's exitstatus
+ """
+ # Get likely encoding for the output.
+ enc = DEFAULT_ENCODING
+
+ # Patterns to match on the output, for pexpect. We read input and
+ # allow either a short timeout or EOF
+ patterns = [pexpect.TIMEOUT, pexpect.EOF]
+ # the index of the EOF pattern in the list.
+ # even though we know it's 1, this call means we don't have to worry if
+ # we change the above list, and forget to change this value:
+ EOF_index = patterns.index(pexpect.EOF)
+ # The size of the output stored so far in the process output buffer.
+ # Since pexpect only appends to this buffer, each time we print we
+ # record how far we've printed, so that next time we only print *new*
+ # content from the buffer.
+ out_size = 0
+ try:
+ # Since we're not really searching the buffer for text patterns, we
+ # can set pexpect's search window to be tiny and it won't matter.
+ # We only search for the 'patterns' timeout or EOF, which aren't in
+ # the text itself.
+ #child = pexpect.spawn(pcmd, searchwindowsize=1)
+ if hasattr(pexpect, 'spawnb'):
+ child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U
+ else:
+ child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect
+ flush = sys.stdout.flush
+ while True:
+ # res is the index of the pattern that caused the match, so we
+ # know whether we've finished (if we matched EOF) or not
+ res_idx = child.expect_list(patterns, self.read_timeout)
+ print(child.before[out_size:].decode(enc, 'replace'), end='')
+ flush()
+ if res_idx==EOF_index:
+ break
+ # Update the pointer to what we've already printed
+ out_size = len(child.before)
+ except KeyboardInterrupt:
+ # We need to send ^C to the process. The ascii code for '^C' is 3
+ # (the character is known as ETX for 'End of Text', see
+ # curses.ascii.ETX).
+ child.sendline(chr(3))
+ # Read and print any more output the program might produce on its
+ # way out.
+ try:
+ out_size = len(child.before)
+ child.expect_list(patterns, self.terminate_timeout)
+ print(child.before[out_size:].decode(enc, 'replace'), end='')
+ sys.stdout.flush()
+ except KeyboardInterrupt:
+ # Impatient users tend to type it multiple times
+ pass
+ finally:
+ # Ensure the subprocess really is terminated
+ child.terminate(force=True)
+ # add isalive check, to ensure exitstatus is set:
+ child.isalive()
+
+ # We follow the subprocess pattern, returning either the exit status
+ # as a positive number, or the terminating signal as a negative
+ # number.
+ # on Linux, sh returns 128+n for signals terminating child processes on Linux
+ # on BSD (OS X), the signal code is set instead
+ if child.exitstatus is None:
+ # on WIFSIGNALED, pexpect sets signalstatus, leaving exitstatus=None
+ if child.signalstatus is None:
+ # this condition may never occur,
+ # but let's be certain we always return an integer.
+ return 0
+ return -child.signalstatus
+ if child.exitstatus > 128:
+ return -(child.exitstatus - 128)
+ return child.exitstatus
+
+
+# Make system() with a functional interface for outside use. Note that we use
+# getoutput() from the _common utils, which is built on top of popen(). Using
+# pexpect to get subprocess output produces difficult to parse output, since
+# programs think they are talking to a tty and produce highly formatted output
+# (ls is a good example) that makes them hard.
+system = ProcessHandler().system
+
+def check_pid(pid):
+ try:
+ os.kill(pid, 0)
+ except OSError as err:
+ if err.errno == errno.ESRCH:
+ return False
+ elif err.errno == errno.EPERM:
+ # Don't have permission to signal the process - probably means it exists
+ return True
+ raise
+ else:
+ return True
diff --git a/contrib/python/ipython/py2/IPython/utils/_process_win32.py b/contrib/python/ipython/py2/IPython/utils/_process_win32.py
index 6d7d0f4197..3ac59b2c29 100644
--- a/contrib/python/ipython/py2/IPython/utils/_process_win32.py
+++ b/contrib/python/ipython/py2/IPython/utils/_process_win32.py
@@ -1,192 +1,192 @@
-"""Windows-specific implementation of process utilities.
-
-This file is only meant to be imported by process.py, not by end-users.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2010-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-from __future__ import print_function
-
-# stdlib
-import os
-import sys
-import ctypes
-
-from ctypes import c_int, POINTER
-from ctypes.wintypes import LPCWSTR, HLOCAL
-from subprocess import STDOUT
-
-# our own imports
-from ._process_common import read_no_interrupt, process_handler, arg_split as py_arg_split
-from . import py3compat
-from .encoding import DEFAULT_ENCODING
-
-#-----------------------------------------------------------------------------
-# Function definitions
-#-----------------------------------------------------------------------------
-
-class AvoidUNCPath(object):
- """A context manager to protect command execution from UNC paths.
-
- In the Win32 API, commands can't be invoked with the cwd being a UNC path.
- This context manager temporarily changes directory to the 'C:' drive on
- entering, and restores the original working directory on exit.
-
- The context manager returns the starting working directory *if* it made a
- change and None otherwise, so that users can apply the necessary adjustment
- to their system calls in the event of a change.
-
- Examples
- --------
- ::
- cmd = 'dir'
- with AvoidUNCPath() as path:
- if path is not None:
- cmd = '"pushd %s &&"%s' % (path, cmd)
- os.system(cmd)
- """
- def __enter__(self):
- self.path = py3compat.getcwd()
- self.is_unc_path = self.path.startswith(r"\\")
- if self.is_unc_path:
- # change to c drive (as cmd.exe cannot handle UNC addresses)
- os.chdir("C:")
- return self.path
- else:
- # We return None to signal that there was no change in the working
- # directory
- return None
-
- def __exit__(self, exc_type, exc_value, traceback):
- if self.is_unc_path:
- os.chdir(self.path)
-
-
-def _find_cmd(cmd):
- """Find the full path to a .bat or .exe using the win32api module."""
- try:
- from win32api import SearchPath
- except ImportError:
- raise ImportError('you need to have pywin32 installed for this to work')
- else:
- PATH = os.environ['PATH']
- extensions = ['.exe', '.com', '.bat', '.py']
- path = None
- for ext in extensions:
- try:
- path = SearchPath(PATH, cmd, ext)[0]
- except:
- pass
- if path is None:
- raise OSError("command %r not found" % cmd)
- else:
- return path
-
-
-def _system_body(p):
- """Callback for _system."""
- enc = DEFAULT_ENCODING
- for line in read_no_interrupt(p.stdout).splitlines():
- line = line.decode(enc, 'replace')
- print(line, file=sys.stdout)
- for line in read_no_interrupt(p.stderr).splitlines():
- line = line.decode(enc, 'replace')
- print(line, file=sys.stderr)
-
- # Wait to finish for returncode
- return p.wait()
-
-
-def system(cmd):
- """Win32 version of os.system() that works with network shares.
-
- Note that this implementation returns None, as meant for use in IPython.
-
- Parameters
- ----------
- cmd : str or list
- A command to be executed in the system shell.
-
- Returns
- -------
- None : we explicitly do NOT return the subprocess status code, as this
- utility is meant to be used extensively in IPython, where any return value
- would trigger :func:`sys.displayhook` calls.
- """
- # The controller provides interactivity with both
- # stdin and stdout
- #import _process_win32_controller
- #_process_win32_controller.system(cmd)
-
- with AvoidUNCPath() as path:
- if path is not None:
- cmd = '"pushd %s &&"%s' % (path, cmd)
- return process_handler(cmd, _system_body)
-
-def getoutput(cmd):
- """Return standard output of executing cmd in a shell.
-
- Accepts the same arguments as os.system().
-
- Parameters
- ----------
- cmd : str or list
- A command to be executed in the system shell.
-
- Returns
- -------
- stdout : str
- """
-
- with AvoidUNCPath() as path:
- if path is not None:
- cmd = '"pushd %s &&"%s' % (path, cmd)
- out = process_handler(cmd, lambda p: p.communicate()[0], STDOUT)
-
- if out is None:
- out = b''
- return py3compat.bytes_to_str(out)
-
-try:
- CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
- CommandLineToArgvW.arg_types = [LPCWSTR, POINTER(c_int)]
- CommandLineToArgvW.restype = POINTER(LPCWSTR)
- LocalFree = ctypes.windll.kernel32.LocalFree
- LocalFree.res_type = HLOCAL
- LocalFree.arg_types = [HLOCAL]
-
- def arg_split(commandline, posix=False, strict=True):
- """Split a command line's arguments in a shell-like manner.
-
- This is a special version for windows that use a ctypes call to CommandLineToArgvW
- to do the argv splitting. The posix paramter is ignored.
-
- If strict=False, process_common.arg_split(...strict=False) is used instead.
- """
- #CommandLineToArgvW returns path to executable if called with empty string.
- if commandline.strip() == "":
- return []
- if not strict:
- # not really a cl-arg, fallback on _process_common
- return py_arg_split(commandline, posix=posix, strict=strict)
- argvn = c_int()
- result_pointer = CommandLineToArgvW(py3compat.cast_unicode(commandline.lstrip()), ctypes.byref(argvn))
- result_array_type = LPCWSTR * argvn.value
- result = [arg for arg in result_array_type.from_address(ctypes.addressof(result_pointer.contents))]
- retval = LocalFree(result_pointer)
- return result
-except AttributeError:
- arg_split = py_arg_split
-
-def check_pid(pid):
- # OpenProcess returns 0 if no such process (of ours) exists
- # positive int otherwise
- return bool(ctypes.windll.kernel32.OpenProcess(1,0,pid))
+"""Windows-specific implementation of process utilities.
+
+This file is only meant to be imported by process.py, not by end-users.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+from __future__ import print_function
+
+# stdlib
+import os
+import sys
+import ctypes
+
+from ctypes import c_int, POINTER
+from ctypes.wintypes import LPCWSTR, HLOCAL
+from subprocess import STDOUT
+
+# our own imports
+from ._process_common import read_no_interrupt, process_handler, arg_split as py_arg_split
+from . import py3compat
+from .encoding import DEFAULT_ENCODING
+
+#-----------------------------------------------------------------------------
+# Function definitions
+#-----------------------------------------------------------------------------
+
+class AvoidUNCPath(object):
+ """A context manager to protect command execution from UNC paths.
+
+ In the Win32 API, commands can't be invoked with the cwd being a UNC path.
+ This context manager temporarily changes directory to the 'C:' drive on
+ entering, and restores the original working directory on exit.
+
+ The context manager returns the starting working directory *if* it made a
+ change and None otherwise, so that users can apply the necessary adjustment
+ to their system calls in the event of a change.
+
+ Examples
+ --------
+ ::
+ cmd = 'dir'
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ os.system(cmd)
+ """
+ def __enter__(self):
+ self.path = py3compat.getcwd()
+ self.is_unc_path = self.path.startswith(r"\\")
+ if self.is_unc_path:
+ # change to c drive (as cmd.exe cannot handle UNC addresses)
+ os.chdir("C:")
+ return self.path
+ else:
+ # We return None to signal that there was no change in the working
+ # directory
+ return None
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self.is_unc_path:
+ os.chdir(self.path)
+
+
+def _find_cmd(cmd):
+ """Find the full path to a .bat or .exe using the win32api module."""
+ try:
+ from win32api import SearchPath
+ except ImportError:
+ raise ImportError('you need to have pywin32 installed for this to work')
+ else:
+ PATH = os.environ['PATH']
+ extensions = ['.exe', '.com', '.bat', '.py']
+ path = None
+ for ext in extensions:
+ try:
+ path = SearchPath(PATH, cmd, ext)[0]
+ except:
+ pass
+ if path is None:
+ raise OSError("command %r not found" % cmd)
+ else:
+ return path
+
+
+def _system_body(p):
+ """Callback for _system."""
+ enc = DEFAULT_ENCODING
+ for line in read_no_interrupt(p.stdout).splitlines():
+ line = line.decode(enc, 'replace')
+ print(line, file=sys.stdout)
+ for line in read_no_interrupt(p.stderr).splitlines():
+ line = line.decode(enc, 'replace')
+ print(line, file=sys.stderr)
+
+ # Wait to finish for returncode
+ return p.wait()
+
+
+def system(cmd):
+ """Win32 version of os.system() that works with network shares.
+
+ Note that this implementation returns None, as meant for use in IPython.
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ None : we explicitly do NOT return the subprocess status code, as this
+ utility is meant to be used extensively in IPython, where any return value
+ would trigger :func:`sys.displayhook` calls.
+ """
+ # The controller provides interactivity with both
+ # stdin and stdout
+ #import _process_win32_controller
+ #_process_win32_controller.system(cmd)
+
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ return process_handler(cmd, _system_body)
+
+def getoutput(cmd):
+ """Return standard output of executing cmd in a shell.
+
+ Accepts the same arguments as os.system().
+
+ Parameters
+ ----------
+ cmd : str or list
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ stdout : str
+ """
+
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ out = process_handler(cmd, lambda p: p.communicate()[0], STDOUT)
+
+ if out is None:
+ out = b''
+ return py3compat.bytes_to_str(out)
+
+try:
+ CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
+ CommandLineToArgvW.arg_types = [LPCWSTR, POINTER(c_int)]
+ CommandLineToArgvW.restype = POINTER(LPCWSTR)
+ LocalFree = ctypes.windll.kernel32.LocalFree
+ LocalFree.res_type = HLOCAL
+ LocalFree.arg_types = [HLOCAL]
+
+ def arg_split(commandline, posix=False, strict=True):
+ """Split a command line's arguments in a shell-like manner.
+
+ This is a special version for windows that use a ctypes call to CommandLineToArgvW
+ to do the argv splitting. The posix paramter is ignored.
+
+ If strict=False, process_common.arg_split(...strict=False) is used instead.
+ """
+ #CommandLineToArgvW returns path to executable if called with empty string.
+ if commandline.strip() == "":
+ return []
+ if not strict:
+ # not really a cl-arg, fallback on _process_common
+ return py_arg_split(commandline, posix=posix, strict=strict)
+ argvn = c_int()
+ result_pointer = CommandLineToArgvW(py3compat.cast_unicode(commandline.lstrip()), ctypes.byref(argvn))
+ result_array_type = LPCWSTR * argvn.value
+ result = [arg for arg in result_array_type.from_address(ctypes.addressof(result_pointer.contents))]
+ retval = LocalFree(result_pointer)
+ return result
+except AttributeError:
+ arg_split = py_arg_split
+
+def check_pid(pid):
+ # OpenProcess returns 0 if no such process (of ours) exists
+ # positive int otherwise
+ return bool(ctypes.windll.kernel32.OpenProcess(1,0,pid))
diff --git a/contrib/python/ipython/py2/IPython/utils/_process_win32_controller.py b/contrib/python/ipython/py2/IPython/utils/_process_win32_controller.py
index 607e411916..555eec23b3 100644
--- a/contrib/python/ipython/py2/IPython/utils/_process_win32_controller.py
+++ b/contrib/python/ipython/py2/IPython/utils/_process_win32_controller.py
@@ -1,577 +1,577 @@
-"""Windows-specific implementation of process utilities with direct WinAPI.
-
-This file is meant to be used by process.py
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2010-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-from __future__ import print_function
-
-# stdlib
-import os, sys, threading
-import ctypes, msvcrt
-
-# local imports
-from . import py3compat
-
-# Win32 API types needed for the API calls
-from ctypes import POINTER
-from ctypes.wintypes import HANDLE, HLOCAL, LPVOID, WORD, DWORD, BOOL, \
- ULONG, LPCWSTR
-LPDWORD = POINTER(DWORD)
-LPHANDLE = POINTER(HANDLE)
-ULONG_PTR = POINTER(ULONG)
-class SECURITY_ATTRIBUTES(ctypes.Structure):
- _fields_ = [("nLength", DWORD),
- ("lpSecurityDescriptor", LPVOID),
- ("bInheritHandle", BOOL)]
-LPSECURITY_ATTRIBUTES = POINTER(SECURITY_ATTRIBUTES)
-class STARTUPINFO(ctypes.Structure):
- _fields_ = [("cb", DWORD),
- ("lpReserved", LPCWSTR),
- ("lpDesktop", LPCWSTR),
- ("lpTitle", LPCWSTR),
- ("dwX", DWORD),
- ("dwY", DWORD),
- ("dwXSize", DWORD),
- ("dwYSize", DWORD),
- ("dwXCountChars", DWORD),
- ("dwYCountChars", DWORD),
- ("dwFillAttribute", DWORD),
- ("dwFlags", DWORD),
- ("wShowWindow", WORD),
- ("cbReserved2", WORD),
- ("lpReserved2", LPVOID),
- ("hStdInput", HANDLE),
- ("hStdOutput", HANDLE),
- ("hStdError", HANDLE)]
-LPSTARTUPINFO = POINTER(STARTUPINFO)
-class PROCESS_INFORMATION(ctypes.Structure):
- _fields_ = [("hProcess", HANDLE),
- ("hThread", HANDLE),
- ("dwProcessId", DWORD),
- ("dwThreadId", DWORD)]
-LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
-
-# Win32 API constants needed
-ERROR_HANDLE_EOF = 38
-ERROR_BROKEN_PIPE = 109
-ERROR_NO_DATA = 232
-HANDLE_FLAG_INHERIT = 0x0001
-STARTF_USESTDHANDLES = 0x0100
-CREATE_SUSPENDED = 0x0004
-CREATE_NEW_CONSOLE = 0x0010
-CREATE_NO_WINDOW = 0x08000000
-STILL_ACTIVE = 259
-WAIT_TIMEOUT = 0x0102
-WAIT_FAILED = 0xFFFFFFFF
-INFINITE = 0xFFFFFFFF
-DUPLICATE_SAME_ACCESS = 0x00000002
-ENABLE_ECHO_INPUT = 0x0004
-ENABLE_LINE_INPUT = 0x0002
-ENABLE_PROCESSED_INPUT = 0x0001
-
-# Win32 API functions needed
-GetLastError = ctypes.windll.kernel32.GetLastError
-GetLastError.argtypes = []
-GetLastError.restype = DWORD
-
-CreateFile = ctypes.windll.kernel32.CreateFileW
-CreateFile.argtypes = [LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE]
-CreateFile.restype = HANDLE
-
-CreatePipe = ctypes.windll.kernel32.CreatePipe
-CreatePipe.argtypes = [POINTER(HANDLE), POINTER(HANDLE),
- LPSECURITY_ATTRIBUTES, DWORD]
-CreatePipe.restype = BOOL
-
-CreateProcess = ctypes.windll.kernel32.CreateProcessW
-CreateProcess.argtypes = [LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES,
- LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCWSTR, LPSTARTUPINFO,
- LPPROCESS_INFORMATION]
-CreateProcess.restype = BOOL
-
-GetExitCodeProcess = ctypes.windll.kernel32.GetExitCodeProcess
-GetExitCodeProcess.argtypes = [HANDLE, LPDWORD]
-GetExitCodeProcess.restype = BOOL
-
-GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
-GetCurrentProcess.argtypes = []
-GetCurrentProcess.restype = HANDLE
-
-ResumeThread = ctypes.windll.kernel32.ResumeThread
-ResumeThread.argtypes = [HANDLE]
-ResumeThread.restype = DWORD
-
-ReadFile = ctypes.windll.kernel32.ReadFile
-ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID]
-ReadFile.restype = BOOL
-
-WriteFile = ctypes.windll.kernel32.WriteFile
-WriteFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID]
-WriteFile.restype = BOOL
-
-GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode
-GetConsoleMode.argtypes = [HANDLE, LPDWORD]
-GetConsoleMode.restype = BOOL
-
-SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode
-SetConsoleMode.argtypes = [HANDLE, DWORD]
-SetConsoleMode.restype = BOOL
-
-FlushConsoleInputBuffer = ctypes.windll.kernel32.FlushConsoleInputBuffer
-FlushConsoleInputBuffer.argtypes = [HANDLE]
-FlushConsoleInputBuffer.restype = BOOL
-
-WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject
-WaitForSingleObject.argtypes = [HANDLE, DWORD]
-WaitForSingleObject.restype = DWORD
-
-DuplicateHandle = ctypes.windll.kernel32.DuplicateHandle
-DuplicateHandle.argtypes = [HANDLE, HANDLE, HANDLE, LPHANDLE,
- DWORD, BOOL, DWORD]
-DuplicateHandle.restype = BOOL
-
-SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
-SetHandleInformation.argtypes = [HANDLE, DWORD, DWORD]
-SetHandleInformation.restype = BOOL
-
-CloseHandle = ctypes.windll.kernel32.CloseHandle
-CloseHandle.argtypes = [HANDLE]
-CloseHandle.restype = BOOL
-
-CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
-CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(ctypes.c_int)]
-CommandLineToArgvW.restype = POINTER(LPCWSTR)
-
-LocalFree = ctypes.windll.kernel32.LocalFree
-LocalFree.argtypes = [HLOCAL]
-LocalFree.restype = HLOCAL
-
-class AvoidUNCPath(object):
- """A context manager to protect command execution from UNC paths.
-
- In the Win32 API, commands can't be invoked with the cwd being a UNC path.
- This context manager temporarily changes directory to the 'C:' drive on
- entering, and restores the original working directory on exit.
-
- The context manager returns the starting working directory *if* it made a
- change and None otherwise, so that users can apply the necessary adjustment
- to their system calls in the event of a change.
-
- Examples
- --------
- ::
- cmd = 'dir'
- with AvoidUNCPath() as path:
- if path is not None:
- cmd = '"pushd %s &&"%s' % (path, cmd)
- os.system(cmd)
- """
- def __enter__(self):
- self.path = py3compat.getcwd()
- self.is_unc_path = self.path.startswith(r"\\")
- if self.is_unc_path:
- # change to c drive (as cmd.exe cannot handle UNC addresses)
- os.chdir("C:")
- return self.path
- else:
- # We return None to signal that there was no change in the working
- # directory
- return None
-
- def __exit__(self, exc_type, exc_value, traceback):
- if self.is_unc_path:
- os.chdir(self.path)
-
-
-class Win32ShellCommandController(object):
- """Runs a shell command in a 'with' context.
-
- This implementation is Win32-specific.
-
- Example:
- # Runs the command interactively with default console stdin/stdout
- with ShellCommandController('python -i') as scc:
- scc.run()
-
- # Runs the command using the provided functions for stdin/stdout
- def my_stdout_func(s):
- # print or save the string 's'
- write_to_stdout(s)
- def my_stdin_func():
- # If input is available, return it as a string.
- if input_available():
- return get_input()
- # If no input available, return None after a short delay to
- # keep from blocking.
- else:
- time.sleep(0.01)
- return None
-
- with ShellCommandController('python -i') as scc:
- scc.run(my_stdout_func, my_stdin_func)
- """
-
- def __init__(self, cmd, mergeout = True):
- """Initializes the shell command controller.
-
- The cmd is the program to execute, and mergeout is
- whether to blend stdout and stderr into one output
- in stdout. Merging them together in this fashion more
- reliably keeps stdout and stderr in the correct order
- especially for interactive shell usage.
- """
- self.cmd = cmd
- self.mergeout = mergeout
-
- def __enter__(self):
- cmd = self.cmd
- mergeout = self.mergeout
-
- self.hstdout, self.hstdin, self.hstderr = None, None, None
- self.piProcInfo = None
- try:
- p_hstdout, c_hstdout, p_hstderr, \
- c_hstderr, p_hstdin, c_hstdin = [None]*6
-
- # SECURITY_ATTRIBUTES with inherit handle set to True
- saAttr = SECURITY_ATTRIBUTES()
- saAttr.nLength = ctypes.sizeof(saAttr)
- saAttr.bInheritHandle = True
- saAttr.lpSecurityDescriptor = None
-
- def create_pipe(uninherit):
- """Creates a Windows pipe, which consists of two handles.
-
- The 'uninherit' parameter controls which handle is not
- inherited by the child process.
- """
- handles = HANDLE(), HANDLE()
- if not CreatePipe(ctypes.byref(handles[0]),
- ctypes.byref(handles[1]), ctypes.byref(saAttr), 0):
- raise ctypes.WinError()
- if not SetHandleInformation(handles[uninherit],
- HANDLE_FLAG_INHERIT, 0):
- raise ctypes.WinError()
- return handles[0].value, handles[1].value
-
- p_hstdout, c_hstdout = create_pipe(uninherit=0)
- # 'mergeout' signals that stdout and stderr should be merged.
- # We do that by using one pipe for both of them.
- if mergeout:
- c_hstderr = HANDLE()
- if not DuplicateHandle(GetCurrentProcess(), c_hstdout,
- GetCurrentProcess(), ctypes.byref(c_hstderr),
- 0, True, DUPLICATE_SAME_ACCESS):
- raise ctypes.WinError()
- else:
- p_hstderr, c_hstderr = create_pipe(uninherit=0)
- c_hstdin, p_hstdin = create_pipe(uninherit=1)
-
- # Create the process object
- piProcInfo = PROCESS_INFORMATION()
- siStartInfo = STARTUPINFO()
- siStartInfo.cb = ctypes.sizeof(siStartInfo)
- siStartInfo.hStdInput = c_hstdin
- siStartInfo.hStdOutput = c_hstdout
- siStartInfo.hStdError = c_hstderr
- siStartInfo.dwFlags = STARTF_USESTDHANDLES
- dwCreationFlags = CREATE_SUSPENDED | CREATE_NO_WINDOW # | CREATE_NEW_CONSOLE
-
- if not CreateProcess(None,
- u"cmd.exe /c " + cmd,
- None, None, True, dwCreationFlags,
- None, None, ctypes.byref(siStartInfo),
- ctypes.byref(piProcInfo)):
- raise ctypes.WinError()
-
- # Close this process's versions of the child handles
- CloseHandle(c_hstdin)
- c_hstdin = None
- CloseHandle(c_hstdout)
- c_hstdout = None
- if c_hstderr is not None:
- CloseHandle(c_hstderr)
- c_hstderr = None
-
- # Transfer ownership of the parent handles to the object
- self.hstdin = p_hstdin
- p_hstdin = None
- self.hstdout = p_hstdout
- p_hstdout = None
- if not mergeout:
- self.hstderr = p_hstderr
- p_hstderr = None
- self.piProcInfo = piProcInfo
-
- finally:
- if p_hstdin:
- CloseHandle(p_hstdin)
- if c_hstdin:
- CloseHandle(c_hstdin)
- if p_hstdout:
- CloseHandle(p_hstdout)
- if c_hstdout:
- CloseHandle(c_hstdout)
- if p_hstderr:
- CloseHandle(p_hstderr)
- if c_hstderr:
- CloseHandle(c_hstderr)
-
- return self
-
- def _stdin_thread(self, handle, hprocess, func, stdout_func):
- exitCode = DWORD()
- bytesWritten = DWORD(0)
- while True:
- #print("stdin thread loop start")
- # Get the input string (may be bytes or unicode)
- data = func()
-
- # None signals to poll whether the process has exited
- if data is None:
- #print("checking for process completion")
- if not GetExitCodeProcess(hprocess, ctypes.byref(exitCode)):
- raise ctypes.WinError()
- if exitCode.value != STILL_ACTIVE:
- return
- # TESTING: Does zero-sized writefile help?
- if not WriteFile(handle, "", 0,
- ctypes.byref(bytesWritten), None):
- raise ctypes.WinError()
- continue
- #print("\nGot str %s\n" % repr(data), file=sys.stderr)
-
- # Encode the string to the console encoding
- if isinstance(data, unicode): #FIXME: Python3
- data = data.encode('utf_8')
-
- # What we have now must be a string of bytes
- if not isinstance(data, str): #FIXME: Python3
- raise RuntimeError("internal stdin function string error")
-
- # An empty string signals EOF
- if len(data) == 0:
- return
-
- # In a windows console, sometimes the input is echoed,
- # but sometimes not. How do we determine when to do this?
- stdout_func(data)
- # WriteFile may not accept all the data at once.
- # Loop until everything is processed
- while len(data) != 0:
- #print("Calling writefile")
- if not WriteFile(handle, data, len(data),
- ctypes.byref(bytesWritten), None):
- # This occurs at exit
- if GetLastError() == ERROR_NO_DATA:
- return
- raise ctypes.WinError()
- #print("Called writefile")
- data = data[bytesWritten.value:]
-
- def _stdout_thread(self, handle, func):
- # Allocate the output buffer
- data = ctypes.create_string_buffer(4096)
- while True:
- bytesRead = DWORD(0)
- if not ReadFile(handle, data, 4096,
- ctypes.byref(bytesRead), None):
- le = GetLastError()
- if le == ERROR_BROKEN_PIPE:
- return
- else:
- raise ctypes.WinError()
- # FIXME: Python3
- s = data.value[0:bytesRead.value]
- #print("\nv: %s" % repr(s), file=sys.stderr)
- func(s.decode('utf_8', 'replace'))
-
- def run(self, stdout_func = None, stdin_func = None, stderr_func = None):
- """Runs the process, using the provided functions for I/O.
-
- The function stdin_func should return strings whenever a
- character or characters become available.
- The functions stdout_func and stderr_func are called whenever
- something is printed to stdout or stderr, respectively.
- These functions are called from different threads (but not
- concurrently, because of the GIL).
- """
- if stdout_func is None and stdin_func is None and stderr_func is None:
- return self._run_stdio()
-
- if stderr_func is not None and self.mergeout:
- raise RuntimeError("Shell command was initiated with "
- "merged stdin/stdout, but a separate stderr_func "
- "was provided to the run() method")
-
- # Create a thread for each input/output handle
- stdin_thread = None
- threads = []
- if stdin_func:
- stdin_thread = threading.Thread(target=self._stdin_thread,
- args=(self.hstdin, self.piProcInfo.hProcess,
- stdin_func, stdout_func))
- threads.append(threading.Thread(target=self._stdout_thread,
- args=(self.hstdout, stdout_func)))
- if not self.mergeout:
- if stderr_func is None:
- stderr_func = stdout_func
- threads.append(threading.Thread(target=self._stdout_thread,
- args=(self.hstderr, stderr_func)))
- # Start the I/O threads and the process
- if ResumeThread(self.piProcInfo.hThread) == 0xFFFFFFFF:
- raise ctypes.WinError()
- if stdin_thread is not None:
- stdin_thread.start()
- for thread in threads:
- thread.start()
- # Wait for the process to complete
- if WaitForSingleObject(self.piProcInfo.hProcess, INFINITE) == \
- WAIT_FAILED:
- raise ctypes.WinError()
- # Wait for the I/O threads to complete
- for thread in threads:
- thread.join()
-
- # Wait for the stdin thread to complete
- if stdin_thread is not None:
- stdin_thread.join()
-
- def _stdin_raw_nonblock(self):
- """Use the raw Win32 handle of sys.stdin to do non-blocking reads"""
- # WARNING: This is experimental, and produces inconsistent results.
- # It's possible for the handle not to be appropriate for use
- # with WaitForSingleObject, among other things.
- handle = msvcrt.get_osfhandle(sys.stdin.fileno())
- result = WaitForSingleObject(handle, 100)
- if result == WAIT_FAILED:
- raise ctypes.WinError()
- elif result == WAIT_TIMEOUT:
- print(".", end='')
- return None
- else:
- data = ctypes.create_string_buffer(256)
- bytesRead = DWORD(0)
- print('?', end='')
-
- if not ReadFile(handle, data, 256,
- ctypes.byref(bytesRead), None):
- raise ctypes.WinError()
- # This ensures the non-blocking works with an actual console
- # Not checking the error, so the processing will still work with
- # other handle types
- FlushConsoleInputBuffer(handle)
-
- data = data.value
- data = data.replace('\r\n', '\n')
- data = data.replace('\r', '\n')
- print(repr(data) + " ", end='')
- return data
-
- def _stdin_raw_block(self):
- """Use a blocking stdin read"""
- # The big problem with the blocking read is that it doesn't
- # exit when it's supposed to in all contexts. An extra
- # key-press may be required to trigger the exit.
- try:
- data = sys.stdin.read(1)
- data = data.replace('\r', '\n')
- return data
- except WindowsError as we:
- if we.winerror == ERROR_NO_DATA:
- # This error occurs when the pipe is closed
- return None
- else:
- # Otherwise let the error propagate
- raise we
-
- def _stdout_raw(self, s):
- """Writes the string to stdout"""
- print(s, end='', file=sys.stdout)
- sys.stdout.flush()
-
- def _stderr_raw(self, s):
- """Writes the string to stdout"""
- print(s, end='', file=sys.stderr)
- sys.stderr.flush()
-
- def _run_stdio(self):
- """Runs the process using the system standard I/O.
-
- IMPORTANT: stdin needs to be asynchronous, so the Python
- sys.stdin object is not used. Instead,
- msvcrt.kbhit/getwch are used asynchronously.
- """
- # Disable Line and Echo mode
- #lpMode = DWORD()
- #handle = msvcrt.get_osfhandle(sys.stdin.fileno())
- #if GetConsoleMode(handle, ctypes.byref(lpMode)):
- # set_console_mode = True
- # if not SetConsoleMode(handle, lpMode.value &
- # ~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT)):
- # raise ctypes.WinError()
-
- if self.mergeout:
- return self.run(stdout_func = self._stdout_raw,
- stdin_func = self._stdin_raw_block)
- else:
- return self.run(stdout_func = self._stdout_raw,
- stdin_func = self._stdin_raw_block,
- stderr_func = self._stderr_raw)
-
- # Restore the previous console mode
- #if set_console_mode:
- # if not SetConsoleMode(handle, lpMode.value):
- # raise ctypes.WinError()
-
- def __exit__(self, exc_type, exc_value, traceback):
- if self.hstdin:
- CloseHandle(self.hstdin)
- self.hstdin = None
- if self.hstdout:
- CloseHandle(self.hstdout)
- self.hstdout = None
- if self.hstderr:
- CloseHandle(self.hstderr)
- self.hstderr = None
- if self.piProcInfo is not None:
- CloseHandle(self.piProcInfo.hProcess)
- CloseHandle(self.piProcInfo.hThread)
- self.piProcInfo = None
-
-
-def system(cmd):
- """Win32 version of os.system() that works with network shares.
-
- Note that this implementation returns None, as meant for use in IPython.
-
- Parameters
- ----------
- cmd : str
- A command to be executed in the system shell.
-
- Returns
- -------
- None : we explicitly do NOT return the subprocess status code, as this
- utility is meant to be used extensively in IPython, where any return value
- would trigger :func:`sys.displayhook` calls.
- """
- with AvoidUNCPath() as path:
- if path is not None:
- cmd = '"pushd %s &&"%s' % (path, cmd)
- with Win32ShellCommandController(cmd) as scc:
- scc.run()
-
-
-if __name__ == "__main__":
- print("Test starting!")
- #system("cmd")
- system("python -i")
- print("Test finished!")
+"""Windows-specific implementation of process utilities with direct WinAPI.
+
+This file is meant to be used by process.py
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+from __future__ import print_function
+
+# stdlib
+import os, sys, threading
+import ctypes, msvcrt
+
+# local imports
+from . import py3compat
+
+# Win32 API types needed for the API calls
+from ctypes import POINTER
+from ctypes.wintypes import HANDLE, HLOCAL, LPVOID, WORD, DWORD, BOOL, \
+ ULONG, LPCWSTR
+LPDWORD = POINTER(DWORD)
+LPHANDLE = POINTER(HANDLE)
+ULONG_PTR = POINTER(ULONG)
+class SECURITY_ATTRIBUTES(ctypes.Structure):
+ _fields_ = [("nLength", DWORD),
+ ("lpSecurityDescriptor", LPVOID),
+ ("bInheritHandle", BOOL)]
+LPSECURITY_ATTRIBUTES = POINTER(SECURITY_ATTRIBUTES)
+class STARTUPINFO(ctypes.Structure):
+ _fields_ = [("cb", DWORD),
+ ("lpReserved", LPCWSTR),
+ ("lpDesktop", LPCWSTR),
+ ("lpTitle", LPCWSTR),
+ ("dwX", DWORD),
+ ("dwY", DWORD),
+ ("dwXSize", DWORD),
+ ("dwYSize", DWORD),
+ ("dwXCountChars", DWORD),
+ ("dwYCountChars", DWORD),
+ ("dwFillAttribute", DWORD),
+ ("dwFlags", DWORD),
+ ("wShowWindow", WORD),
+ ("cbReserved2", WORD),
+ ("lpReserved2", LPVOID),
+ ("hStdInput", HANDLE),
+ ("hStdOutput", HANDLE),
+ ("hStdError", HANDLE)]
+LPSTARTUPINFO = POINTER(STARTUPINFO)
+class PROCESS_INFORMATION(ctypes.Structure):
+ _fields_ = [("hProcess", HANDLE),
+ ("hThread", HANDLE),
+ ("dwProcessId", DWORD),
+ ("dwThreadId", DWORD)]
+LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
+
+# Win32 API constants needed
+ERROR_HANDLE_EOF = 38
+ERROR_BROKEN_PIPE = 109
+ERROR_NO_DATA = 232
+HANDLE_FLAG_INHERIT = 0x0001
+STARTF_USESTDHANDLES = 0x0100
+CREATE_SUSPENDED = 0x0004
+CREATE_NEW_CONSOLE = 0x0010
+CREATE_NO_WINDOW = 0x08000000
+STILL_ACTIVE = 259
+WAIT_TIMEOUT = 0x0102
+WAIT_FAILED = 0xFFFFFFFF
+INFINITE = 0xFFFFFFFF
+DUPLICATE_SAME_ACCESS = 0x00000002
+ENABLE_ECHO_INPUT = 0x0004
+ENABLE_LINE_INPUT = 0x0002
+ENABLE_PROCESSED_INPUT = 0x0001
+
+# Win32 API functions needed
+GetLastError = ctypes.windll.kernel32.GetLastError
+GetLastError.argtypes = []
+GetLastError.restype = DWORD
+
+CreateFile = ctypes.windll.kernel32.CreateFileW
+CreateFile.argtypes = [LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE]
+CreateFile.restype = HANDLE
+
+CreatePipe = ctypes.windll.kernel32.CreatePipe
+CreatePipe.argtypes = [POINTER(HANDLE), POINTER(HANDLE),
+ LPSECURITY_ATTRIBUTES, DWORD]
+CreatePipe.restype = BOOL
+
+CreateProcess = ctypes.windll.kernel32.CreateProcessW
+CreateProcess.argtypes = [LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES,
+ LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCWSTR, LPSTARTUPINFO,
+ LPPROCESS_INFORMATION]
+CreateProcess.restype = BOOL
+
+GetExitCodeProcess = ctypes.windll.kernel32.GetExitCodeProcess
+GetExitCodeProcess.argtypes = [HANDLE, LPDWORD]
+GetExitCodeProcess.restype = BOOL
+
+GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
+GetCurrentProcess.argtypes = []
+GetCurrentProcess.restype = HANDLE
+
+ResumeThread = ctypes.windll.kernel32.ResumeThread
+ResumeThread.argtypes = [HANDLE]
+ResumeThread.restype = DWORD
+
+ReadFile = ctypes.windll.kernel32.ReadFile
+ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID]
+ReadFile.restype = BOOL
+
+WriteFile = ctypes.windll.kernel32.WriteFile
+WriteFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID]
+WriteFile.restype = BOOL
+
+GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode
+GetConsoleMode.argtypes = [HANDLE, LPDWORD]
+GetConsoleMode.restype = BOOL
+
+SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode
+SetConsoleMode.argtypes = [HANDLE, DWORD]
+SetConsoleMode.restype = BOOL
+
+FlushConsoleInputBuffer = ctypes.windll.kernel32.FlushConsoleInputBuffer
+FlushConsoleInputBuffer.argtypes = [HANDLE]
+FlushConsoleInputBuffer.restype = BOOL
+
+WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject
+WaitForSingleObject.argtypes = [HANDLE, DWORD]
+WaitForSingleObject.restype = DWORD
+
+DuplicateHandle = ctypes.windll.kernel32.DuplicateHandle
+DuplicateHandle.argtypes = [HANDLE, HANDLE, HANDLE, LPHANDLE,
+ DWORD, BOOL, DWORD]
+DuplicateHandle.restype = BOOL
+
+SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
+SetHandleInformation.argtypes = [HANDLE, DWORD, DWORD]
+SetHandleInformation.restype = BOOL
+
+CloseHandle = ctypes.windll.kernel32.CloseHandle
+CloseHandle.argtypes = [HANDLE]
+CloseHandle.restype = BOOL
+
+CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
+CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(ctypes.c_int)]
+CommandLineToArgvW.restype = POINTER(LPCWSTR)
+
+LocalFree = ctypes.windll.kernel32.LocalFree
+LocalFree.argtypes = [HLOCAL]
+LocalFree.restype = HLOCAL
+
+class AvoidUNCPath(object):
+ """A context manager to protect command execution from UNC paths.
+
+ In the Win32 API, commands can't be invoked with the cwd being a UNC path.
+ This context manager temporarily changes directory to the 'C:' drive on
+ entering, and restores the original working directory on exit.
+
+ The context manager returns the starting working directory *if* it made a
+ change and None otherwise, so that users can apply the necessary adjustment
+ to their system calls in the event of a change.
+
+ Examples
+ --------
+ ::
+ cmd = 'dir'
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ os.system(cmd)
+ """
+ def __enter__(self):
+ self.path = py3compat.getcwd()
+ self.is_unc_path = self.path.startswith(r"\\")
+ if self.is_unc_path:
+ # change to c drive (as cmd.exe cannot handle UNC addresses)
+ os.chdir("C:")
+ return self.path
+ else:
+ # We return None to signal that there was no change in the working
+ # directory
+ return None
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self.is_unc_path:
+ os.chdir(self.path)
+
+
+class Win32ShellCommandController(object):
+ """Runs a shell command in a 'with' context.
+
+ This implementation is Win32-specific.
+
+ Example:
+ # Runs the command interactively with default console stdin/stdout
+ with ShellCommandController('python -i') as scc:
+ scc.run()
+
+ # Runs the command using the provided functions for stdin/stdout
+ def my_stdout_func(s):
+ # print or save the string 's'
+ write_to_stdout(s)
+ def my_stdin_func():
+ # If input is available, return it as a string.
+ if input_available():
+ return get_input()
+ # If no input available, return None after a short delay to
+ # keep from blocking.
+ else:
+ time.sleep(0.01)
+ return None
+
+ with ShellCommandController('python -i') as scc:
+ scc.run(my_stdout_func, my_stdin_func)
+ """
+
+ def __init__(self, cmd, mergeout = True):
+ """Initializes the shell command controller.
+
+ The cmd is the program to execute, and mergeout is
+ whether to blend stdout and stderr into one output
+ in stdout. Merging them together in this fashion more
+ reliably keeps stdout and stderr in the correct order
+ especially for interactive shell usage.
+ """
+ self.cmd = cmd
+ self.mergeout = mergeout
+
+ def __enter__(self):
+ cmd = self.cmd
+ mergeout = self.mergeout
+
+ self.hstdout, self.hstdin, self.hstderr = None, None, None
+ self.piProcInfo = None
+ try:
+ p_hstdout, c_hstdout, p_hstderr, \
+ c_hstderr, p_hstdin, c_hstdin = [None]*6
+
+ # SECURITY_ATTRIBUTES with inherit handle set to True
+ saAttr = SECURITY_ATTRIBUTES()
+ saAttr.nLength = ctypes.sizeof(saAttr)
+ saAttr.bInheritHandle = True
+ saAttr.lpSecurityDescriptor = None
+
+ def create_pipe(uninherit):
+ """Creates a Windows pipe, which consists of two handles.
+
+ The 'uninherit' parameter controls which handle is not
+ inherited by the child process.
+ """
+ handles = HANDLE(), HANDLE()
+ if not CreatePipe(ctypes.byref(handles[0]),
+ ctypes.byref(handles[1]), ctypes.byref(saAttr), 0):
+ raise ctypes.WinError()
+ if not SetHandleInformation(handles[uninherit],
+ HANDLE_FLAG_INHERIT, 0):
+ raise ctypes.WinError()
+ return handles[0].value, handles[1].value
+
+ p_hstdout, c_hstdout = create_pipe(uninherit=0)
+ # 'mergeout' signals that stdout and stderr should be merged.
+ # We do that by using one pipe for both of them.
+ if mergeout:
+ c_hstderr = HANDLE()
+ if not DuplicateHandle(GetCurrentProcess(), c_hstdout,
+ GetCurrentProcess(), ctypes.byref(c_hstderr),
+ 0, True, DUPLICATE_SAME_ACCESS):
+ raise ctypes.WinError()
+ else:
+ p_hstderr, c_hstderr = create_pipe(uninherit=0)
+ c_hstdin, p_hstdin = create_pipe(uninherit=1)
+
+ # Create the process object
+ piProcInfo = PROCESS_INFORMATION()
+ siStartInfo = STARTUPINFO()
+ siStartInfo.cb = ctypes.sizeof(siStartInfo)
+ siStartInfo.hStdInput = c_hstdin
+ siStartInfo.hStdOutput = c_hstdout
+ siStartInfo.hStdError = c_hstderr
+ siStartInfo.dwFlags = STARTF_USESTDHANDLES
+ dwCreationFlags = CREATE_SUSPENDED | CREATE_NO_WINDOW # | CREATE_NEW_CONSOLE
+
+ if not CreateProcess(None,
+ u"cmd.exe /c " + cmd,
+ None, None, True, dwCreationFlags,
+ None, None, ctypes.byref(siStartInfo),
+ ctypes.byref(piProcInfo)):
+ raise ctypes.WinError()
+
+ # Close this process's versions of the child handles
+ CloseHandle(c_hstdin)
+ c_hstdin = None
+ CloseHandle(c_hstdout)
+ c_hstdout = None
+ if c_hstderr is not None:
+ CloseHandle(c_hstderr)
+ c_hstderr = None
+
+ # Transfer ownership of the parent handles to the object
+ self.hstdin = p_hstdin
+ p_hstdin = None
+ self.hstdout = p_hstdout
+ p_hstdout = None
+ if not mergeout:
+ self.hstderr = p_hstderr
+ p_hstderr = None
+ self.piProcInfo = piProcInfo
+
+ finally:
+ if p_hstdin:
+ CloseHandle(p_hstdin)
+ if c_hstdin:
+ CloseHandle(c_hstdin)
+ if p_hstdout:
+ CloseHandle(p_hstdout)
+ if c_hstdout:
+ CloseHandle(c_hstdout)
+ if p_hstderr:
+ CloseHandle(p_hstderr)
+ if c_hstderr:
+ CloseHandle(c_hstderr)
+
+ return self
+
+ def _stdin_thread(self, handle, hprocess, func, stdout_func):
+ exitCode = DWORD()
+ bytesWritten = DWORD(0)
+ while True:
+ #print("stdin thread loop start")
+ # Get the input string (may be bytes or unicode)
+ data = func()
+
+ # None signals to poll whether the process has exited
+ if data is None:
+ #print("checking for process completion")
+ if not GetExitCodeProcess(hprocess, ctypes.byref(exitCode)):
+ raise ctypes.WinError()
+ if exitCode.value != STILL_ACTIVE:
+ return
+ # TESTING: Does zero-sized writefile help?
+ if not WriteFile(handle, "", 0,
+ ctypes.byref(bytesWritten), None):
+ raise ctypes.WinError()
+ continue
+ #print("\nGot str %s\n" % repr(data), file=sys.stderr)
+
+ # Encode the string to the console encoding
+ if isinstance(data, unicode): #FIXME: Python3
+ data = data.encode('utf_8')
+
+ # What we have now must be a string of bytes
+ if not isinstance(data, str): #FIXME: Python3
+ raise RuntimeError("internal stdin function string error")
+
+ # An empty string signals EOF
+ if len(data) == 0:
+ return
+
+ # In a windows console, sometimes the input is echoed,
+ # but sometimes not. How do we determine when to do this?
+ stdout_func(data)
+ # WriteFile may not accept all the data at once.
+ # Loop until everything is processed
+ while len(data) != 0:
+ #print("Calling writefile")
+ if not WriteFile(handle, data, len(data),
+ ctypes.byref(bytesWritten), None):
+ # This occurs at exit
+ if GetLastError() == ERROR_NO_DATA:
+ return
+ raise ctypes.WinError()
+ #print("Called writefile")
+ data = data[bytesWritten.value:]
+
+ def _stdout_thread(self, handle, func):
+ # Allocate the output buffer
+ data = ctypes.create_string_buffer(4096)
+ while True:
+ bytesRead = DWORD(0)
+ if not ReadFile(handle, data, 4096,
+ ctypes.byref(bytesRead), None):
+ le = GetLastError()
+ if le == ERROR_BROKEN_PIPE:
+ return
+ else:
+ raise ctypes.WinError()
+ # FIXME: Python3
+ s = data.value[0:bytesRead.value]
+ #print("\nv: %s" % repr(s), file=sys.stderr)
+ func(s.decode('utf_8', 'replace'))
+
+ def run(self, stdout_func = None, stdin_func = None, stderr_func = None):
+ """Runs the process, using the provided functions for I/O.
+
+ The function stdin_func should return strings whenever a
+ character or characters become available.
+ The functions stdout_func and stderr_func are called whenever
+ something is printed to stdout or stderr, respectively.
+ These functions are called from different threads (but not
+ concurrently, because of the GIL).
+ """
+ if stdout_func is None and stdin_func is None and stderr_func is None:
+ return self._run_stdio()
+
+ if stderr_func is not None and self.mergeout:
+ raise RuntimeError("Shell command was initiated with "
+ "merged stdin/stdout, but a separate stderr_func "
+ "was provided to the run() method")
+
+ # Create a thread for each input/output handle
+ stdin_thread = None
+ threads = []
+ if stdin_func:
+ stdin_thread = threading.Thread(target=self._stdin_thread,
+ args=(self.hstdin, self.piProcInfo.hProcess,
+ stdin_func, stdout_func))
+ threads.append(threading.Thread(target=self._stdout_thread,
+ args=(self.hstdout, stdout_func)))
+ if not self.mergeout:
+ if stderr_func is None:
+ stderr_func = stdout_func
+ threads.append(threading.Thread(target=self._stdout_thread,
+ args=(self.hstderr, stderr_func)))
+ # Start the I/O threads and the process
+ if ResumeThread(self.piProcInfo.hThread) == 0xFFFFFFFF:
+ raise ctypes.WinError()
+ if stdin_thread is not None:
+ stdin_thread.start()
+ for thread in threads:
+ thread.start()
+ # Wait for the process to complete
+ if WaitForSingleObject(self.piProcInfo.hProcess, INFINITE) == \
+ WAIT_FAILED:
+ raise ctypes.WinError()
+ # Wait for the I/O threads to complete
+ for thread in threads:
+ thread.join()
+
+ # Wait for the stdin thread to complete
+ if stdin_thread is not None:
+ stdin_thread.join()
+
+ def _stdin_raw_nonblock(self):
+ """Use the raw Win32 handle of sys.stdin to do non-blocking reads"""
+ # WARNING: This is experimental, and produces inconsistent results.
+ # It's possible for the handle not to be appropriate for use
+ # with WaitForSingleObject, among other things.
+ handle = msvcrt.get_osfhandle(sys.stdin.fileno())
+ result = WaitForSingleObject(handle, 100)
+ if result == WAIT_FAILED:
+ raise ctypes.WinError()
+ elif result == WAIT_TIMEOUT:
+ print(".", end='')
+ return None
+ else:
+ data = ctypes.create_string_buffer(256)
+ bytesRead = DWORD(0)
+ print('?', end='')
+
+ if not ReadFile(handle, data, 256,
+ ctypes.byref(bytesRead), None):
+ raise ctypes.WinError()
+ # This ensures the non-blocking works with an actual console
+ # Not checking the error, so the processing will still work with
+ # other handle types
+ FlushConsoleInputBuffer(handle)
+
+ data = data.value
+ data = data.replace('\r\n', '\n')
+ data = data.replace('\r', '\n')
+ print(repr(data) + " ", end='')
+ return data
+
+ def _stdin_raw_block(self):
+ """Use a blocking stdin read"""
+ # The big problem with the blocking read is that it doesn't
+ # exit when it's supposed to in all contexts. An extra
+ # key-press may be required to trigger the exit.
+ try:
+ data = sys.stdin.read(1)
+ data = data.replace('\r', '\n')
+ return data
+ except WindowsError as we:
+ if we.winerror == ERROR_NO_DATA:
+ # This error occurs when the pipe is closed
+ return None
+ else:
+ # Otherwise let the error propagate
+ raise we
+
+ def _stdout_raw(self, s):
+ """Writes the string to stdout"""
+ print(s, end='', file=sys.stdout)
+ sys.stdout.flush()
+
+ def _stderr_raw(self, s):
+ """Writes the string to stdout"""
+ print(s, end='', file=sys.stderr)
+ sys.stderr.flush()
+
+ def _run_stdio(self):
+ """Runs the process using the system standard I/O.
+
+ IMPORTANT: stdin needs to be asynchronous, so the Python
+ sys.stdin object is not used. Instead,
+ msvcrt.kbhit/getwch are used asynchronously.
+ """
+ # Disable Line and Echo mode
+ #lpMode = DWORD()
+ #handle = msvcrt.get_osfhandle(sys.stdin.fileno())
+ #if GetConsoleMode(handle, ctypes.byref(lpMode)):
+ # set_console_mode = True
+ # if not SetConsoleMode(handle, lpMode.value &
+ # ~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT)):
+ # raise ctypes.WinError()
+
+ if self.mergeout:
+ return self.run(stdout_func = self._stdout_raw,
+ stdin_func = self._stdin_raw_block)
+ else:
+ return self.run(stdout_func = self._stdout_raw,
+ stdin_func = self._stdin_raw_block,
+ stderr_func = self._stderr_raw)
+
+ # Restore the previous console mode
+ #if set_console_mode:
+ # if not SetConsoleMode(handle, lpMode.value):
+ # raise ctypes.WinError()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self.hstdin:
+ CloseHandle(self.hstdin)
+ self.hstdin = None
+ if self.hstdout:
+ CloseHandle(self.hstdout)
+ self.hstdout = None
+ if self.hstderr:
+ CloseHandle(self.hstderr)
+ self.hstderr = None
+ if self.piProcInfo is not None:
+ CloseHandle(self.piProcInfo.hProcess)
+ CloseHandle(self.piProcInfo.hThread)
+ self.piProcInfo = None
+
+
+def system(cmd):
+ """Win32 version of os.system() that works with network shares.
+
+ Note that this implementation returns None, as meant for use in IPython.
+
+ Parameters
+ ----------
+ cmd : str
+ A command to be executed in the system shell.
+
+ Returns
+ -------
+ None : we explicitly do NOT return the subprocess status code, as this
+ utility is meant to be used extensively in IPython, where any return value
+ would trigger :func:`sys.displayhook` calls.
+ """
+ with AvoidUNCPath() as path:
+ if path is not None:
+ cmd = '"pushd %s &&"%s' % (path, cmd)
+ with Win32ShellCommandController(cmd) as scc:
+ scc.run()
+
+
+if __name__ == "__main__":
+ print("Test starting!")
+ #system("cmd")
+ system("python -i")
+ print("Test finished!")
diff --git a/contrib/python/ipython/py2/IPython/utils/_signatures.py b/contrib/python/ipython/py2/IPython/utils/_signatures.py
index 9f403618ce..20f52b98ed 100644
--- a/contrib/python/ipython/py2/IPython/utils/_signatures.py
+++ b/contrib/python/ipython/py2/IPython/utils/_signatures.py
@@ -1,818 +1,818 @@
-"""Function signature objects for callables.
-
-Back port of Python 3.3's function signature tools from the inspect module,
-modified to be compatible with Python 2.7 and 3.2+.
-"""
-
-#-----------------------------------------------------------------------------
-# Python 3.3 stdlib inspect.py is public domain
-#
-# Backports Copyright (C) 2013 Aaron Iles
-# Used under Apache License Version 2.0
+"""Function signature objects for callables.
+
+Back port of Python 3.3's function signature tools from the inspect module,
+modified to be compatible with Python 2.7 and 3.2+.
+"""
+
+#-----------------------------------------------------------------------------
+# Python 3.3 stdlib inspect.py is public domain
#
-# Further Changes are Copyright (C) 2013 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-import itertools
-import functools
-import re
-import types
+# Backports Copyright (C) 2013 Aaron Iles
+# Used under Apache License Version 2.0
+#
+# Further Changes are Copyright (C) 2013 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+import itertools
+import functools
+import re
+import types
import inspect
-
-
-# patch for single-file
-# we don't support 2.6, so we can just import OrderedDict
-from collections import OrderedDict
-
-__version__ = '0.3'
-# end patch
-
-__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
-
-
-_WrapperDescriptor = type(type.__call__)
-_MethodWrapper = type(all.__call__)
-
-_NonUserDefinedCallables = (_WrapperDescriptor,
- _MethodWrapper,
- types.BuiltinFunctionType)
-
-
-def formatannotation(annotation, base_module=None):
- if isinstance(annotation, type):
- if annotation.__module__ in ('builtins', '__builtin__', base_module):
- return annotation.__name__
- return annotation.__module__+'.'+annotation.__name__
- return repr(annotation)
-
-
-def _get_user_defined_method(cls, method_name, *nested):
- try:
- if cls is type:
- return
- meth = getattr(cls, method_name)
- for name in nested:
- meth = getattr(meth, name, meth)
- except AttributeError:
- return
- else:
- if not isinstance(meth, _NonUserDefinedCallables):
- # Once '__signature__' will be added to 'C'-level
- # callables, this check won't be necessary
- return meth
-
-
-def signature(obj):
- '''Get a signature object for the passed callable.'''
-
- if not callable(obj):
- raise TypeError('{0!r} is not a callable object'.format(obj))
-
+
+
+# patch for single-file
+# we don't support 2.6, so we can just import OrderedDict
+from collections import OrderedDict
+
+__version__ = '0.3'
+# end patch
+
+__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
+
+
+_WrapperDescriptor = type(type.__call__)
+_MethodWrapper = type(all.__call__)
+
+_NonUserDefinedCallables = (_WrapperDescriptor,
+ _MethodWrapper,
+ types.BuiltinFunctionType)
+
+
+def formatannotation(annotation, base_module=None):
+ if isinstance(annotation, type):
+ if annotation.__module__ in ('builtins', '__builtin__', base_module):
+ return annotation.__name__
+ return annotation.__module__+'.'+annotation.__name__
+ return repr(annotation)
+
+
+def _get_user_defined_method(cls, method_name, *nested):
+ try:
+ if cls is type:
+ return
+ meth = getattr(cls, method_name)
+ for name in nested:
+ meth = getattr(meth, name, meth)
+ except AttributeError:
+ return
+ else:
+ if not isinstance(meth, _NonUserDefinedCallables):
+ # Once '__signature__' will be added to 'C'-level
+ # callables, this check won't be necessary
+ return meth
+
+
+def signature(obj):
+ '''Get a signature object for the passed callable.'''
+
+ if not callable(obj):
+ raise TypeError('{0!r} is not a callable object'.format(obj))
+
if inspect.ismethod(obj):
- if obj.__self__ is None:
- # Unbound method - treat it as a function (no distinction in Py 3)
- obj = obj.__func__
- else:
- # Bound method: trim off the first parameter (typically self or cls)
- sig = signature(obj.__func__)
- return sig.replace(parameters=tuple(sig.parameters.values())[1:])
-
- try:
- sig = obj.__signature__
- except AttributeError:
- pass
- else:
- if sig is not None:
- return sig
-
- try:
- # Was this function wrapped by a decorator?
- wrapped = obj.__wrapped__
- except AttributeError:
- pass
- else:
- return signature(wrapped)
-
+ if obj.__self__ is None:
+ # Unbound method - treat it as a function (no distinction in Py 3)
+ obj = obj.__func__
+ else:
+ # Bound method: trim off the first parameter (typically self or cls)
+ sig = signature(obj.__func__)
+ return sig.replace(parameters=tuple(sig.parameters.values())[1:])
+
+ try:
+ sig = obj.__signature__
+ except AttributeError:
+ pass
+ else:
+ if sig is not None:
+ return sig
+
+ try:
+ # Was this function wrapped by a decorator?
+ wrapped = obj.__wrapped__
+ except AttributeError:
+ pass
+ else:
+ return signature(wrapped)
+
if inspect.isfunction(obj):
- return Signature.from_function(obj)
-
- if isinstance(obj, functools.partial):
- sig = signature(obj.func)
-
- new_params = OrderedDict(sig.parameters.items())
-
- partial_args = obj.args or ()
- partial_keywords = obj.keywords or {}
- try:
- ba = sig.bind_partial(*partial_args, **partial_keywords)
- except TypeError as ex:
- msg = 'partial object {0!r} has incorrect arguments'.format(obj)
- raise ValueError(msg)
-
- for arg_name, arg_value in ba.arguments.items():
- param = new_params[arg_name]
- if arg_name in partial_keywords:
- # We set a new default value, because the following code
- # is correct:
- #
- # >>> def foo(a): print(a)
- # >>> print(partial(partial(foo, a=10), a=20)())
- # 20
- # >>> print(partial(partial(foo, a=10), a=20)(a=30))
- # 30
- #
- # So, with 'partial' objects, passing a keyword argument is
- # like setting a new default value for the corresponding
- # parameter
- #
- # We also mark this parameter with '_partial_kwarg'
- # flag. Later, in '_bind', the 'default' value of this
- # parameter will be added to 'kwargs', to simulate
- # the 'functools.partial' real call.
- new_params[arg_name] = param.replace(default=arg_value,
- _partial_kwarg=True)
-
- elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
- not param._partial_kwarg):
- new_params.pop(arg_name)
-
- return sig.replace(parameters=new_params.values())
-
- sig = None
- if isinstance(obj, type):
- # obj is a class or a metaclass
-
- # First, let's see if it has an overloaded __call__ defined
- # in its metaclass
- call = _get_user_defined_method(type(obj), '__call__')
- if call is not None:
- sig = signature(call)
- else:
- # Now we check if the 'obj' class has a '__new__' method
- new = _get_user_defined_method(obj, '__new__')
- if new is not None:
- sig = signature(new)
- else:
- # Finally, we should have at least __init__ implemented
- init = _get_user_defined_method(obj, '__init__')
- if init is not None:
- sig = signature(init)
- elif not isinstance(obj, _NonUserDefinedCallables):
- # An object with __call__
- # We also check that the 'obj' is not an instance of
- # _WrapperDescriptor or _MethodWrapper to avoid
- # infinite recursion (and even potential segfault)
- call = _get_user_defined_method(type(obj), '__call__', 'im_func')
- if call is not None:
- sig = signature(call)
-
- if sig is not None:
- return sig
-
- if isinstance(obj, types.BuiltinFunctionType):
- # Raise a nicer error message for builtins
- msg = 'no signature found for builtin function {0!r}'.format(obj)
- raise ValueError(msg)
-
- raise ValueError('callable {0!r} is not supported by signature'.format(obj))
-
-
-class _void(object):
- '''A private marker - used in Parameter & Signature'''
-
-
-class _empty(object):
- pass
-
-
-class _ParameterKind(int):
- def __new__(self, *args, **kwargs):
- obj = int.__new__(self, *args)
- obj._name = kwargs['name']
- return obj
-
- def __str__(self):
- return self._name
-
- def __repr__(self):
- return '<_ParameterKind: {0!r}>'.format(self._name)
-
-
-_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
-_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
-_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
-_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
-_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
-
-
-class Parameter(object):
- '''Represents a parameter in a function signature.
-
- Has the following public attributes:
-
- * name : str
- The name of the parameter as a string.
- * default : object
- The default value for the parameter if specified. If the
- parameter has no default value, this attribute is not set.
- * annotation
- The annotation for the parameter if specified. If the
- parameter has no annotation, this attribute is not set.
- * kind : str
- Describes how argument values are bound to the parameter.
- Possible values: `Parameter.POSITIONAL_ONLY`,
- `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
- `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
- '''
-
- __slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
-
- POSITIONAL_ONLY = _POSITIONAL_ONLY
- POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
- VAR_POSITIONAL = _VAR_POSITIONAL
- KEYWORD_ONLY = _KEYWORD_ONLY
- VAR_KEYWORD = _VAR_KEYWORD
-
- empty = _empty
-
- def __init__(self, name, kind, default=_empty, annotation=_empty,
- _partial_kwarg=False):
-
- if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
- _VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
- raise ValueError("invalid value for 'Parameter.kind' attribute")
- self._kind = kind
-
- if default is not _empty:
- if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
- msg = '{0} parameters cannot have default values'.format(kind)
- raise ValueError(msg)
- self._default = default
- self._annotation = annotation
-
- if name is None:
- if kind != _POSITIONAL_ONLY:
- raise ValueError("None is not a valid name for a "
- "non-positional-only parameter")
- self._name = name
- else:
- name = str(name)
- if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
- msg = '{0!r} is not a valid parameter name'.format(name)
- raise ValueError(msg)
- self._name = name
-
- self._partial_kwarg = _partial_kwarg
-
- @property
- def name(self):
- return self._name
-
- @property
- def default(self):
- return self._default
-
- @property
- def annotation(self):
- return self._annotation
-
- @property
- def kind(self):
- return self._kind
-
- def replace(self, name=_void, kind=_void, annotation=_void,
- default=_void, _partial_kwarg=_void):
- '''Creates a customized copy of the Parameter.'''
-
- if name is _void:
- name = self._name
-
- if kind is _void:
- kind = self._kind
-
- if annotation is _void:
- annotation = self._annotation
-
- if default is _void:
- default = self._default
-
- if _partial_kwarg is _void:
- _partial_kwarg = self._partial_kwarg
-
- return type(self)(name, kind, default=default, annotation=annotation,
- _partial_kwarg=_partial_kwarg)
-
- def __str__(self):
- kind = self.kind
-
- formatted = self._name
- if kind == _POSITIONAL_ONLY:
- if formatted is None:
- formatted = ''
- formatted = '<{0}>'.format(formatted)
-
- # Add annotation and default value
- if self._annotation is not _empty:
- formatted = '{0}:{1}'.format(formatted,
- formatannotation(self._annotation))
-
- if self._default is not _empty:
- formatted = '{0}={1}'.format(formatted, repr(self._default))
-
- if kind == _VAR_POSITIONAL:
- formatted = '*' + formatted
- elif kind == _VAR_KEYWORD:
- formatted = '**' + formatted
-
- return formatted
-
- def __repr__(self):
- return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
- id(self), self.name)
-
- def __hash__(self):
- msg = "unhashable type: '{0}'".format(self.__class__.__name__)
- raise TypeError(msg)
-
- def __eq__(self, other):
- return (issubclass(other.__class__, Parameter) and
- self._name == other._name and
- self._kind == other._kind and
- self._default == other._default and
- self._annotation == other._annotation)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class BoundArguments(object):
- '''Result of :meth:`Signature.bind` call. Holds the mapping of arguments
- to the function's parameters.
-
- Has the following public attributes:
-
- arguments : :class:`collections.OrderedDict`
- An ordered mutable mapping of parameters' names to arguments' values.
- Does not contain arguments' default values.
- signature : :class:`Signature`
- The Signature object that created this instance.
- args : tuple
- Tuple of positional arguments values.
- kwargs : dict
- Dict of keyword arguments values.
- '''
-
- def __init__(self, signature, arguments):
- self.arguments = arguments
- self._signature = signature
-
- @property
- def signature(self):
- return self._signature
-
- @property
- def args(self):
- args = []
- for param_name, param in self._signature.parameters.items():
- if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
- param._partial_kwarg):
- # Keyword arguments mapped by 'functools.partial'
- # (Parameter._partial_kwarg is True) are mapped
- # in 'BoundArguments.kwargs', along with VAR_KEYWORD &
- # KEYWORD_ONLY
- break
-
- try:
- arg = self.arguments[param_name]
- except KeyError:
- # We're done here. Other arguments
- # will be mapped in 'BoundArguments.kwargs'
- break
- else:
- if param.kind == _VAR_POSITIONAL:
- # *args
- args.extend(arg)
- else:
- # plain argument
- args.append(arg)
-
- return tuple(args)
-
- @property
- def kwargs(self):
- kwargs = {}
- kwargs_started = False
- for param_name, param in self._signature.parameters.items():
- if not kwargs_started:
- if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
- param._partial_kwarg):
- kwargs_started = True
- else:
- if param_name not in self.arguments:
- kwargs_started = True
- continue
-
- if not kwargs_started:
- continue
-
- try:
- arg = self.arguments[param_name]
- except KeyError:
- pass
- else:
- if param.kind == _VAR_KEYWORD:
- # **kwargs
- kwargs.update(arg)
- else:
- # plain keyword argument
- kwargs[param_name] = arg
-
- return kwargs
-
- def __hash__(self):
- msg = "unhashable type: '{0}'".format(self.__class__.__name__)
- raise TypeError(msg)
-
- def __eq__(self, other):
- return (issubclass(other.__class__, BoundArguments) and
- self.signature == other.signature and
- self.arguments == other.arguments)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class Signature(object):
- '''A Signature object represents the overall signature of a function.
- It stores a Parameter object for each parameter accepted by the
- function, as well as information specific to the function itself.
-
- A Signature object has the following public attributes:
-
- parameters : :class:`collections.OrderedDict`
- An ordered mapping of parameters' names to the corresponding
- Parameter objects (keyword-only arguments are in the same order
- as listed in `code.co_varnames`).
- return_annotation
- The annotation for the return type of the function if specified.
- If the function has no annotation for its return type, this
- attribute is not set.
- '''
-
- __slots__ = ('_return_annotation', '_parameters')
-
- _parameter_cls = Parameter
- _bound_arguments_cls = BoundArguments
-
- empty = _empty
-
- def __init__(self, parameters=None, return_annotation=_empty,
- __validate_parameters__=True):
- '''Constructs Signature from the given list of Parameter
- objects and 'return_annotation'. All arguments are optional.
- '''
-
- if parameters is None:
- params = OrderedDict()
- else:
- if __validate_parameters__:
- params = OrderedDict()
- top_kind = _POSITIONAL_ONLY
-
- for idx, param in enumerate(parameters):
- kind = param.kind
- if kind < top_kind:
- msg = 'wrong parameter order: {0} before {1}'
- msg = msg.format(top_kind, param.kind)
- raise ValueError(msg)
- else:
- top_kind = kind
-
- name = param.name
- if name is None:
- name = str(idx)
- param = param.replace(name=name)
-
- if name in params:
- msg = 'duplicate parameter name: {0!r}'.format(name)
- raise ValueError(msg)
- params[name] = param
- else:
- params = OrderedDict(((param.name, param)
- for param in parameters))
-
- self._parameters = params
- self._return_annotation = return_annotation
-
- @classmethod
- def from_function(cls, func):
- '''Constructs Signature for the given python function'''
-
+ return Signature.from_function(obj)
+
+ if isinstance(obj, functools.partial):
+ sig = signature(obj.func)
+
+ new_params = OrderedDict(sig.parameters.items())
+
+ partial_args = obj.args or ()
+ partial_keywords = obj.keywords or {}
+ try:
+ ba = sig.bind_partial(*partial_args, **partial_keywords)
+ except TypeError as ex:
+ msg = 'partial object {0!r} has incorrect arguments'.format(obj)
+ raise ValueError(msg)
+
+ for arg_name, arg_value in ba.arguments.items():
+ param = new_params[arg_name]
+ if arg_name in partial_keywords:
+ # We set a new default value, because the following code
+ # is correct:
+ #
+ # >>> def foo(a): print(a)
+ # >>> print(partial(partial(foo, a=10), a=20)())
+ # 20
+ # >>> print(partial(partial(foo, a=10), a=20)(a=30))
+ # 30
+ #
+ # So, with 'partial' objects, passing a keyword argument is
+ # like setting a new default value for the corresponding
+ # parameter
+ #
+ # We also mark this parameter with '_partial_kwarg'
+ # flag. Later, in '_bind', the 'default' value of this
+ # parameter will be added to 'kwargs', to simulate
+ # the 'functools.partial' real call.
+ new_params[arg_name] = param.replace(default=arg_value,
+ _partial_kwarg=True)
+
+ elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
+ not param._partial_kwarg):
+ new_params.pop(arg_name)
+
+ return sig.replace(parameters=new_params.values())
+
+ sig = None
+ if isinstance(obj, type):
+ # obj is a class or a metaclass
+
+ # First, let's see if it has an overloaded __call__ defined
+ # in its metaclass
+ call = _get_user_defined_method(type(obj), '__call__')
+ if call is not None:
+ sig = signature(call)
+ else:
+ # Now we check if the 'obj' class has a '__new__' method
+ new = _get_user_defined_method(obj, '__new__')
+ if new is not None:
+ sig = signature(new)
+ else:
+ # Finally, we should have at least __init__ implemented
+ init = _get_user_defined_method(obj, '__init__')
+ if init is not None:
+ sig = signature(init)
+ elif not isinstance(obj, _NonUserDefinedCallables):
+ # An object with __call__
+ # We also check that the 'obj' is not an instance of
+ # _WrapperDescriptor or _MethodWrapper to avoid
+ # infinite recursion (and even potential segfault)
+ call = _get_user_defined_method(type(obj), '__call__', 'im_func')
+ if call is not None:
+ sig = signature(call)
+
+ if sig is not None:
+ return sig
+
+ if isinstance(obj, types.BuiltinFunctionType):
+ # Raise a nicer error message for builtins
+ msg = 'no signature found for builtin function {0!r}'.format(obj)
+ raise ValueError(msg)
+
+ raise ValueError('callable {0!r} is not supported by signature'.format(obj))
+
+
+class _void(object):
+ '''A private marker - used in Parameter & Signature'''
+
+
+class _empty(object):
+ pass
+
+
+class _ParameterKind(int):
+ def __new__(self, *args, **kwargs):
+ obj = int.__new__(self, *args)
+ obj._name = kwargs['name']
+ return obj
+
+ def __str__(self):
+ return self._name
+
+ def __repr__(self):
+ return '<_ParameterKind: {0!r}>'.format(self._name)
+
+
+_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
+_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
+_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
+_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
+_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
+
+
+class Parameter(object):
+ '''Represents a parameter in a function signature.
+
+ Has the following public attributes:
+
+ * name : str
+ The name of the parameter as a string.
+ * default : object
+ The default value for the parameter if specified. If the
+ parameter has no default value, this attribute is not set.
+ * annotation
+ The annotation for the parameter if specified. If the
+ parameter has no annotation, this attribute is not set.
+ * kind : str
+ Describes how argument values are bound to the parameter.
+ Possible values: `Parameter.POSITIONAL_ONLY`,
+ `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
+ `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
+ '''
+
+ __slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
+
+ POSITIONAL_ONLY = _POSITIONAL_ONLY
+ POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
+ VAR_POSITIONAL = _VAR_POSITIONAL
+ KEYWORD_ONLY = _KEYWORD_ONLY
+ VAR_KEYWORD = _VAR_KEYWORD
+
+ empty = _empty
+
+ def __init__(self, name, kind, default=_empty, annotation=_empty,
+ _partial_kwarg=False):
+
+ if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
+ _VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
+ raise ValueError("invalid value for 'Parameter.kind' attribute")
+ self._kind = kind
+
+ if default is not _empty:
+ if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
+ msg = '{0} parameters cannot have default values'.format(kind)
+ raise ValueError(msg)
+ self._default = default
+ self._annotation = annotation
+
+ if name is None:
+ if kind != _POSITIONAL_ONLY:
+ raise ValueError("None is not a valid name for a "
+ "non-positional-only parameter")
+ self._name = name
+ else:
+ name = str(name)
+ if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
+ msg = '{0!r} is not a valid parameter name'.format(name)
+ raise ValueError(msg)
+ self._name = name
+
+ self._partial_kwarg = _partial_kwarg
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def default(self):
+ return self._default
+
+ @property
+ def annotation(self):
+ return self._annotation
+
+ @property
+ def kind(self):
+ return self._kind
+
+ def replace(self, name=_void, kind=_void, annotation=_void,
+ default=_void, _partial_kwarg=_void):
+ '''Creates a customized copy of the Parameter.'''
+
+ if name is _void:
+ name = self._name
+
+ if kind is _void:
+ kind = self._kind
+
+ if annotation is _void:
+ annotation = self._annotation
+
+ if default is _void:
+ default = self._default
+
+ if _partial_kwarg is _void:
+ _partial_kwarg = self._partial_kwarg
+
+ return type(self)(name, kind, default=default, annotation=annotation,
+ _partial_kwarg=_partial_kwarg)
+
+ def __str__(self):
+ kind = self.kind
+
+ formatted = self._name
+ if kind == _POSITIONAL_ONLY:
+ if formatted is None:
+ formatted = ''
+ formatted = '<{0}>'.format(formatted)
+
+ # Add annotation and default value
+ if self._annotation is not _empty:
+ formatted = '{0}:{1}'.format(formatted,
+ formatannotation(self._annotation))
+
+ if self._default is not _empty:
+ formatted = '{0}={1}'.format(formatted, repr(self._default))
+
+ if kind == _VAR_POSITIONAL:
+ formatted = '*' + formatted
+ elif kind == _VAR_KEYWORD:
+ formatted = '**' + formatted
+
+ return formatted
+
+ def __repr__(self):
+ return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
+ id(self), self.name)
+
+ def __hash__(self):
+ msg = "unhashable type: '{0}'".format(self.__class__.__name__)
+ raise TypeError(msg)
+
+ def __eq__(self, other):
+ return (issubclass(other.__class__, Parameter) and
+ self._name == other._name and
+ self._kind == other._kind and
+ self._default == other._default and
+ self._annotation == other._annotation)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class BoundArguments(object):
+ '''Result of :meth:`Signature.bind` call. Holds the mapping of arguments
+ to the function's parameters.
+
+ Has the following public attributes:
+
+ arguments : :class:`collections.OrderedDict`
+ An ordered mutable mapping of parameters' names to arguments' values.
+ Does not contain arguments' default values.
+ signature : :class:`Signature`
+ The Signature object that created this instance.
+ args : tuple
+ Tuple of positional arguments values.
+ kwargs : dict
+ Dict of keyword arguments values.
+ '''
+
+ def __init__(self, signature, arguments):
+ self.arguments = arguments
+ self._signature = signature
+
+ @property
+ def signature(self):
+ return self._signature
+
+ @property
+ def args(self):
+ args = []
+ for param_name, param in self._signature.parameters.items():
+ if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
+ param._partial_kwarg):
+ # Keyword arguments mapped by 'functools.partial'
+ # (Parameter._partial_kwarg is True) are mapped
+ # in 'BoundArguments.kwargs', along with VAR_KEYWORD &
+ # KEYWORD_ONLY
+ break
+
+ try:
+ arg = self.arguments[param_name]
+ except KeyError:
+ # We're done here. Other arguments
+ # will be mapped in 'BoundArguments.kwargs'
+ break
+ else:
+ if param.kind == _VAR_POSITIONAL:
+ # *args
+ args.extend(arg)
+ else:
+ # plain argument
+ args.append(arg)
+
+ return tuple(args)
+
+ @property
+ def kwargs(self):
+ kwargs = {}
+ kwargs_started = False
+ for param_name, param in self._signature.parameters.items():
+ if not kwargs_started:
+ if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
+ param._partial_kwarg):
+ kwargs_started = True
+ else:
+ if param_name not in self.arguments:
+ kwargs_started = True
+ continue
+
+ if not kwargs_started:
+ continue
+
+ try:
+ arg = self.arguments[param_name]
+ except KeyError:
+ pass
+ else:
+ if param.kind == _VAR_KEYWORD:
+ # **kwargs
+ kwargs.update(arg)
+ else:
+ # plain keyword argument
+ kwargs[param_name] = arg
+
+ return kwargs
+
+ def __hash__(self):
+ msg = "unhashable type: '{0}'".format(self.__class__.__name__)
+ raise TypeError(msg)
+
+ def __eq__(self, other):
+ return (issubclass(other.__class__, BoundArguments) and
+ self.signature == other.signature and
+ self.arguments == other.arguments)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class Signature(object):
+ '''A Signature object represents the overall signature of a function.
+ It stores a Parameter object for each parameter accepted by the
+ function, as well as information specific to the function itself.
+
+ A Signature object has the following public attributes:
+
+ parameters : :class:`collections.OrderedDict`
+ An ordered mapping of parameters' names to the corresponding
+ Parameter objects (keyword-only arguments are in the same order
+ as listed in `code.co_varnames`).
+ return_annotation
+ The annotation for the return type of the function if specified.
+ If the function has no annotation for its return type, this
+ attribute is not set.
+ '''
+
+ __slots__ = ('_return_annotation', '_parameters')
+
+ _parameter_cls = Parameter
+ _bound_arguments_cls = BoundArguments
+
+ empty = _empty
+
+ def __init__(self, parameters=None, return_annotation=_empty,
+ __validate_parameters__=True):
+ '''Constructs Signature from the given list of Parameter
+ objects and 'return_annotation'. All arguments are optional.
+ '''
+
+ if parameters is None:
+ params = OrderedDict()
+ else:
+ if __validate_parameters__:
+ params = OrderedDict()
+ top_kind = _POSITIONAL_ONLY
+
+ for idx, param in enumerate(parameters):
+ kind = param.kind
+ if kind < top_kind:
+ msg = 'wrong parameter order: {0} before {1}'
+ msg = msg.format(top_kind, param.kind)
+ raise ValueError(msg)
+ else:
+ top_kind = kind
+
+ name = param.name
+ if name is None:
+ name = str(idx)
+ param = param.replace(name=name)
+
+ if name in params:
+ msg = 'duplicate parameter name: {0!r}'.format(name)
+ raise ValueError(msg)
+ params[name] = param
+ else:
+ params = OrderedDict(((param.name, param)
+ for param in parameters))
+
+ self._parameters = params
+ self._return_annotation = return_annotation
+
+ @classmethod
+ def from_function(cls, func):
+ '''Constructs Signature for the given python function'''
+
if not inspect.isfunction(func):
- raise TypeError('{0!r} is not a Python function'.format(func))
-
- Parameter = cls._parameter_cls
-
- # Parameter information.
- func_code = func.__code__
- pos_count = func_code.co_argcount
- arg_names = func_code.co_varnames
- positional = tuple(arg_names[:pos_count])
- keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
- keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
- annotations = getattr(func, '__annotations__', {})
- defaults = func.__defaults__
- kwdefaults = getattr(func, '__kwdefaults__', None)
-
- if defaults:
- pos_default_count = len(defaults)
- else:
- pos_default_count = 0
-
- parameters = []
-
- # Non-keyword-only parameters w/o defaults.
- non_default_count = pos_count - pos_default_count
- for name in positional[:non_default_count]:
- annotation = annotations.get(name, _empty)
- parameters.append(Parameter(name, annotation=annotation,
- kind=_POSITIONAL_OR_KEYWORD))
-
- # ... w/ defaults.
- for offset, name in enumerate(positional[non_default_count:]):
- annotation = annotations.get(name, _empty)
- parameters.append(Parameter(name, annotation=annotation,
- kind=_POSITIONAL_OR_KEYWORD,
- default=defaults[offset]))
-
- # *args
- if func_code.co_flags & 0x04:
- name = arg_names[pos_count + keyword_only_count]
- annotation = annotations.get(name, _empty)
- parameters.append(Parameter(name, annotation=annotation,
- kind=_VAR_POSITIONAL))
-
- # Keyword-only parameters.
- for name in keyword_only:
- default = _empty
- if kwdefaults is not None:
- default = kwdefaults.get(name, _empty)
-
- annotation = annotations.get(name, _empty)
- parameters.append(Parameter(name, annotation=annotation,
- kind=_KEYWORD_ONLY,
- default=default))
- # **kwargs
- if func_code.co_flags & 0x08:
- index = pos_count + keyword_only_count
- if func_code.co_flags & 0x04:
- index += 1
-
- name = arg_names[index]
- annotation = annotations.get(name, _empty)
- parameters.append(Parameter(name, annotation=annotation,
- kind=_VAR_KEYWORD))
-
- return cls(parameters,
- return_annotation=annotations.get('return', _empty),
- __validate_parameters__=False)
-
- @property
- def parameters(self):
- try:
- return types.MappingProxyType(self._parameters)
- except AttributeError:
- return OrderedDict(self._parameters.items())
-
- @property
- def return_annotation(self):
- return self._return_annotation
-
- def replace(self, parameters=_void, return_annotation=_void):
- '''Creates a customized copy of the Signature.
- Pass 'parameters' and/or 'return_annotation' arguments
- to override them in the new copy.
- '''
-
- if parameters is _void:
- parameters = self.parameters.values()
-
- if return_annotation is _void:
- return_annotation = self._return_annotation
-
- return type(self)(parameters,
- return_annotation=return_annotation)
-
- def __hash__(self):
- msg = "unhashable type: '{0}'".format(self.__class__.__name__)
- raise TypeError(msg)
-
- def __eq__(self, other):
- if (not issubclass(type(other), Signature) or
- self.return_annotation != other.return_annotation or
- len(self.parameters) != len(other.parameters)):
- return False
-
- other_positions = dict((param, idx)
- for idx, param in enumerate(other.parameters.keys()))
-
- for idx, (param_name, param) in enumerate(self.parameters.items()):
- if param.kind == _KEYWORD_ONLY:
- try:
- other_param = other.parameters[param_name]
- except KeyError:
- return False
- else:
- if param != other_param:
- return False
- else:
- try:
- other_idx = other_positions[param_name]
- except KeyError:
- return False
- else:
- if (idx != other_idx or
- param != other.parameters[param_name]):
- return False
-
- return True
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def _bind(self, args, kwargs, partial=False):
- '''Private method. Don't use directly.'''
-
- arguments = OrderedDict()
-
- parameters = iter(self.parameters.values())
- parameters_ex = ()
- arg_vals = iter(args)
-
- if partial:
- # Support for binding arguments to 'functools.partial' objects.
- # See 'functools.partial' case in 'signature()' implementation
- # for details.
- for param_name, param in self.parameters.items():
- if (param._partial_kwarg and param_name not in kwargs):
- # Simulating 'functools.partial' behavior
- kwargs[param_name] = param.default
-
- while True:
- # Let's iterate through the positional arguments and corresponding
- # parameters
- try:
- arg_val = next(arg_vals)
- except StopIteration:
- # No more positional arguments
- try:
- param = next(parameters)
- except StopIteration:
- # No more parameters. That's it. Just need to check that
- # we have no `kwargs` after this while loop
- break
- else:
- if param.kind == _VAR_POSITIONAL:
- # That's OK, just empty *args. Let's start parsing
- # kwargs
- break
- elif param.name in kwargs:
- if param.kind == _POSITIONAL_ONLY:
- msg = '{arg!r} parameter is positional only, ' \
- 'but was passed as a keyword'
- msg = msg.format(arg=param.name)
- raise TypeError(msg)
- parameters_ex = (param,)
- break
- elif (param.kind == _VAR_KEYWORD or
- param.default is not _empty):
- # That's fine too - we have a default value for this
- # parameter. So, lets start parsing `kwargs`, starting
- # with the current parameter
- parameters_ex = (param,)
- break
- else:
- if partial:
- parameters_ex = (param,)
- break
- else:
- msg = '{arg!r} parameter lacking default value'
- msg = msg.format(arg=param.name)
- raise TypeError(msg)
- else:
- # We have a positional argument to process
- try:
- param = next(parameters)
- except StopIteration:
- raise TypeError('too many positional arguments')
- else:
- if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
- # Looks like we have no parameter for this positional
- # argument
- raise TypeError('too many positional arguments')
-
- if param.kind == _VAR_POSITIONAL:
- # We have an '*args'-like argument, let's fill it with
- # all positional arguments we have left and move on to
- # the next phase
- values = [arg_val]
- values.extend(arg_vals)
- arguments[param.name] = tuple(values)
- break
-
- if param.name in kwargs:
- raise TypeError('multiple values for argument '
- '{arg!r}'.format(arg=param.name))
-
- arguments[param.name] = arg_val
-
- # Now, we iterate through the remaining parameters to process
- # keyword arguments
- kwargs_param = None
- for param in itertools.chain(parameters_ex, parameters):
- if param.kind == _POSITIONAL_ONLY:
- # This should never happen in case of a properly built
- # Signature object (but let's have this check here
- # to ensure correct behaviour just in case)
- raise TypeError('{arg!r} parameter is positional only, '
- 'but was passed as a keyword'. \
- format(arg=param.name))
-
- if param.kind == _VAR_KEYWORD:
- # Memorize that we have a '**kwargs'-like parameter
- kwargs_param = param
- continue
-
- param_name = param.name
- try:
- arg_val = kwargs.pop(param_name)
- except KeyError:
- # We have no value for this parameter. It's fine though,
- # if it has a default value, or it is an '*args'-like
- # parameter, left alone by the processing of positional
- # arguments.
- if (not partial and param.kind != _VAR_POSITIONAL and
- param.default is _empty):
- raise TypeError('{arg!r} parameter lacking default value'. \
- format(arg=param_name))
-
- else:
- arguments[param_name] = arg_val
-
- if kwargs:
- if kwargs_param is not None:
- # Process our '**kwargs'-like parameter
- arguments[kwargs_param.name] = kwargs
- else:
- raise TypeError('too many keyword arguments')
-
- return self._bound_arguments_cls(self, arguments)
-
- def bind(self, *args, **kwargs):
- '''Get a :class:`BoundArguments` object, that maps the passed `args`
- and `kwargs` to the function's signature. Raises :exc:`TypeError`
- if the passed arguments can not be bound.
- '''
- return self._bind(args, kwargs)
-
- def bind_partial(self, *args, **kwargs):
- '''Get a :class:`BoundArguments` object, that partially maps the
- passed `args` and `kwargs` to the function's signature.
- Raises :exc:`TypeError` if the passed arguments can not be bound.
- '''
- return self._bind(args, kwargs, partial=True)
-
- def __str__(self):
- result = []
- render_kw_only_separator = True
- for idx, param in enumerate(self.parameters.values()):
- formatted = str(param)
-
- kind = param.kind
- if kind == _VAR_POSITIONAL:
- # OK, we have an '*args'-like parameter, so we won't need
- # a '*' to separate keyword-only arguments
- render_kw_only_separator = False
- elif kind == _KEYWORD_ONLY and render_kw_only_separator:
- # We have a keyword-only parameter to render and we haven't
- # rendered an '*args'-like parameter before, so add a '*'
- # separator to the parameters list ("foo(arg1, *, arg2)" case)
- result.append('*')
- # This condition should be only triggered once, so
- # reset the flag
- render_kw_only_separator = False
-
- result.append(formatted)
-
- rendered = '({0})'.format(', '.join(result))
-
- if self.return_annotation is not _empty:
- anno = formatannotation(self.return_annotation)
- rendered += ' -> {0}'.format(anno)
-
- return rendered
-
+ raise TypeError('{0!r} is not a Python function'.format(func))
+
+ Parameter = cls._parameter_cls
+
+ # Parameter information.
+ func_code = func.__code__
+ pos_count = func_code.co_argcount
+ arg_names = func_code.co_varnames
+ positional = tuple(arg_names[:pos_count])
+ keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
+ keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
+ annotations = getattr(func, '__annotations__', {})
+ defaults = func.__defaults__
+ kwdefaults = getattr(func, '__kwdefaults__', None)
+
+ if defaults:
+ pos_default_count = len(defaults)
+ else:
+ pos_default_count = 0
+
+ parameters = []
+
+ # Non-keyword-only parameters w/o defaults.
+ non_default_count = pos_count - pos_default_count
+ for name in positional[:non_default_count]:
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=_POSITIONAL_OR_KEYWORD))
+
+ # ... w/ defaults.
+ for offset, name in enumerate(positional[non_default_count:]):
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=_POSITIONAL_OR_KEYWORD,
+ default=defaults[offset]))
+
+ # *args
+ if func_code.co_flags & 0x04:
+ name = arg_names[pos_count + keyword_only_count]
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=_VAR_POSITIONAL))
+
+ # Keyword-only parameters.
+ for name in keyword_only:
+ default = _empty
+ if kwdefaults is not None:
+ default = kwdefaults.get(name, _empty)
+
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=_KEYWORD_ONLY,
+ default=default))
+ # **kwargs
+ if func_code.co_flags & 0x08:
+ index = pos_count + keyword_only_count
+ if func_code.co_flags & 0x04:
+ index += 1
+
+ name = arg_names[index]
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=_VAR_KEYWORD))
+
+ return cls(parameters,
+ return_annotation=annotations.get('return', _empty),
+ __validate_parameters__=False)
+
+ @property
+ def parameters(self):
+ try:
+ return types.MappingProxyType(self._parameters)
+ except AttributeError:
+ return OrderedDict(self._parameters.items())
+
+ @property
+ def return_annotation(self):
+ return self._return_annotation
+
+ def replace(self, parameters=_void, return_annotation=_void):
+ '''Creates a customized copy of the Signature.
+ Pass 'parameters' and/or 'return_annotation' arguments
+ to override them in the new copy.
+ '''
+
+ if parameters is _void:
+ parameters = self.parameters.values()
+
+ if return_annotation is _void:
+ return_annotation = self._return_annotation
+
+ return type(self)(parameters,
+ return_annotation=return_annotation)
+
+ def __hash__(self):
+ msg = "unhashable type: '{0}'".format(self.__class__.__name__)
+ raise TypeError(msg)
+
+ def __eq__(self, other):
+ if (not issubclass(type(other), Signature) or
+ self.return_annotation != other.return_annotation or
+ len(self.parameters) != len(other.parameters)):
+ return False
+
+ other_positions = dict((param, idx)
+ for idx, param in enumerate(other.parameters.keys()))
+
+ for idx, (param_name, param) in enumerate(self.parameters.items()):
+ if param.kind == _KEYWORD_ONLY:
+ try:
+ other_param = other.parameters[param_name]
+ except KeyError:
+ return False
+ else:
+ if param != other_param:
+ return False
+ else:
+ try:
+ other_idx = other_positions[param_name]
+ except KeyError:
+ return False
+ else:
+ if (idx != other_idx or
+ param != other.parameters[param_name]):
+ return False
+
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _bind(self, args, kwargs, partial=False):
+ '''Private method. Don't use directly.'''
+
+ arguments = OrderedDict()
+
+ parameters = iter(self.parameters.values())
+ parameters_ex = ()
+ arg_vals = iter(args)
+
+ if partial:
+ # Support for binding arguments to 'functools.partial' objects.
+ # See 'functools.partial' case in 'signature()' implementation
+ # for details.
+ for param_name, param in self.parameters.items():
+ if (param._partial_kwarg and param_name not in kwargs):
+ # Simulating 'functools.partial' behavior
+ kwargs[param_name] = param.default
+
+ while True:
+ # Let's iterate through the positional arguments and corresponding
+ # parameters
+ try:
+ arg_val = next(arg_vals)
+ except StopIteration:
+ # No more positional arguments
+ try:
+ param = next(parameters)
+ except StopIteration:
+ # No more parameters. That's it. Just need to check that
+ # we have no `kwargs` after this while loop
+ break
+ else:
+ if param.kind == _VAR_POSITIONAL:
+ # That's OK, just empty *args. Let's start parsing
+ # kwargs
+ break
+ elif param.name in kwargs:
+ if param.kind == _POSITIONAL_ONLY:
+ msg = '{arg!r} parameter is positional only, ' \
+ 'but was passed as a keyword'
+ msg = msg.format(arg=param.name)
+ raise TypeError(msg)
+ parameters_ex = (param,)
+ break
+ elif (param.kind == _VAR_KEYWORD or
+ param.default is not _empty):
+ # That's fine too - we have a default value for this
+ # parameter. So, lets start parsing `kwargs`, starting
+ # with the current parameter
+ parameters_ex = (param,)
+ break
+ else:
+ if partial:
+ parameters_ex = (param,)
+ break
+ else:
+ msg = '{arg!r} parameter lacking default value'
+ msg = msg.format(arg=param.name)
+ raise TypeError(msg)
+ else:
+ # We have a positional argument to process
+ try:
+ param = next(parameters)
+ except StopIteration:
+ raise TypeError('too many positional arguments')
+ else:
+ if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
+ # Looks like we have no parameter for this positional
+ # argument
+ raise TypeError('too many positional arguments')
+
+ if param.kind == _VAR_POSITIONAL:
+ # We have an '*args'-like argument, let's fill it with
+ # all positional arguments we have left and move on to
+ # the next phase
+ values = [arg_val]
+ values.extend(arg_vals)
+ arguments[param.name] = tuple(values)
+ break
+
+ if param.name in kwargs:
+ raise TypeError('multiple values for argument '
+ '{arg!r}'.format(arg=param.name))
+
+ arguments[param.name] = arg_val
+
+ # Now, we iterate through the remaining parameters to process
+ # keyword arguments
+ kwargs_param = None
+ for param in itertools.chain(parameters_ex, parameters):
+ if param.kind == _POSITIONAL_ONLY:
+ # This should never happen in case of a properly built
+ # Signature object (but let's have this check here
+ # to ensure correct behaviour just in case)
+ raise TypeError('{arg!r} parameter is positional only, '
+ 'but was passed as a keyword'. \
+ format(arg=param.name))
+
+ if param.kind == _VAR_KEYWORD:
+ # Memorize that we have a '**kwargs'-like parameter
+ kwargs_param = param
+ continue
+
+ param_name = param.name
+ try:
+ arg_val = kwargs.pop(param_name)
+ except KeyError:
+ # We have no value for this parameter. It's fine though,
+ # if it has a default value, or it is an '*args'-like
+ # parameter, left alone by the processing of positional
+ # arguments.
+ if (not partial and param.kind != _VAR_POSITIONAL and
+ param.default is _empty):
+ raise TypeError('{arg!r} parameter lacking default value'. \
+ format(arg=param_name))
+
+ else:
+ arguments[param_name] = arg_val
+
+ if kwargs:
+ if kwargs_param is not None:
+ # Process our '**kwargs'-like parameter
+ arguments[kwargs_param.name] = kwargs
+ else:
+ raise TypeError('too many keyword arguments')
+
+ return self._bound_arguments_cls(self, arguments)
+
+ def bind(self, *args, **kwargs):
+ '''Get a :class:`BoundArguments` object, that maps the passed `args`
+ and `kwargs` to the function's signature. Raises :exc:`TypeError`
+ if the passed arguments can not be bound.
+ '''
+ return self._bind(args, kwargs)
+
+ def bind_partial(self, *args, **kwargs):
+ '''Get a :class:`BoundArguments` object, that partially maps the
+ passed `args` and `kwargs` to the function's signature.
+ Raises :exc:`TypeError` if the passed arguments can not be bound.
+ '''
+ return self._bind(args, kwargs, partial=True)
+
+ def __str__(self):
+ result = []
+ render_kw_only_separator = True
+ for idx, param in enumerate(self.parameters.values()):
+ formatted = str(param)
+
+ kind = param.kind
+ if kind == _VAR_POSITIONAL:
+ # OK, we have an '*args'-like parameter, so we won't need
+ # a '*' to separate keyword-only arguments
+ render_kw_only_separator = False
+ elif kind == _KEYWORD_ONLY and render_kw_only_separator:
+ # We have a keyword-only parameter to render and we haven't
+ # rendered an '*args'-like parameter before, so add a '*'
+ # separator to the parameters list ("foo(arg1, *, arg2)" case)
+ result.append('*')
+ # This condition should be only triggered once, so
+ # reset the flag
+ render_kw_only_separator = False
+
+ result.append(formatted)
+
+ rendered = '({0})'.format(', '.join(result))
+
+ if self.return_annotation is not _empty:
+ anno = formatannotation(self.return_annotation)
+ rendered += ' -> {0}'.format(anno)
+
+ return rendered
+
diff --git a/contrib/python/ipython/py2/IPython/utils/_sysinfo.py b/contrib/python/ipython/py2/IPython/utils/_sysinfo.py
index f3422bbb3c..21dd2fcceb 100644
--- a/contrib/python/ipython/py2/IPython/utils/_sysinfo.py
+++ b/contrib/python/ipython/py2/IPython/utils/_sysinfo.py
@@ -1,2 +1,2 @@
-# GENERATED BY setup.py
+# GENERATED BY setup.py
commit = u"2348ebbe4"
diff --git a/contrib/python/ipython/py2/IPython/utils/_tokenize_py2.py b/contrib/python/ipython/py2/IPython/utils/_tokenize_py2.py
index ffd7cc5e71..195df96ee5 100644
--- a/contrib/python/ipython/py2/IPython/utils/_tokenize_py2.py
+++ b/contrib/python/ipython/py2/IPython/utils/_tokenize_py2.py
@@ -1,439 +1,439 @@
-"""Patched version of standard library tokenize, to deal with various bugs.
-
-Patches
-
-- Relevant parts of Gareth Rees' patch for Python issue #12691 (untokenizing),
- manually applied.
-- Newlines in comments and blank lines should be either NL or NEWLINE, depending
- on whether they are in a multi-line statement. Filed as Python issue #17061.
-
--------------------------------------------------------------------------------
-Tokenization help for Python programs.
-
-generate_tokens(readline) is a generator that breaks a stream of
-text into Python tokens. It accepts a readline-like method which is called
-repeatedly to get the next line of input (or "" for EOF). It generates
-5-tuples with these members:
-
- the token type (see token.py)
- the token (a string)
- the starting (row, column) indices of the token (a 2-tuple of ints)
- the ending (row, column) indices of the token (a 2-tuple of ints)
- the original line (string)
-
-It is designed to match the working of the Python tokenizer exactly, except
-that it produces COMMENT tokens for comments and gives type OP for all
-operators
-
-Older entry points
- tokenize_loop(readline, tokeneater)
- tokenize(readline, tokeneater=printtoken)
-are the same, except instead of generating tokens, tokeneater is a callback
-function to which the 5 fields described above are passed as 5 arguments,
-each time a new token is found."""
-from __future__ import print_function
-
-__author__ = 'Ka-Ping Yee <ping@lfw.org>'
-__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
- 'Skip Montanaro, Raymond Hettinger')
-
-import string, re
-from token import *
-
-import token
-__all__ = [x for x in dir(token) if not x.startswith("_")]
-__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
-del x
-del token
-
-__all__ += ["TokenError"]
-
-COMMENT = N_TOKENS
-tok_name[COMMENT] = 'COMMENT'
-NL = N_TOKENS + 1
-tok_name[NL] = 'NL'
-N_TOKENS += 2
-
-def group(*choices): return '(' + '|'.join(choices) + ')'
-def any(*choices): return group(*choices) + '*'
-def maybe(*choices): return group(*choices) + '?'
-
-Whitespace = r'[ \f\t]*'
-Comment = r'#[^\r\n]*'
-Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
-Name = r'[a-zA-Z_]\w*'
-
-Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
-Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
-Binnumber = r'0[bB][01]+[lL]?'
-Decnumber = r'[1-9]\d*[lL]?'
-Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
-Exponent = r'[eE][-+]?\d+'
-Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
-Expfloat = r'\d+' + Exponent
-Floatnumber = group(Pointfloat, Expfloat)
-Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
-Number = group(Imagnumber, Floatnumber, Intnumber)
-
-# Tail end of ' string.
-Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
-# Tail end of " string.
-Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
-# Tail end of ''' string.
-Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
-# Tail end of """ string.
-Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
-Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
-# Single-line ' or " string.
-String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
- r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
-
-# Because of leftmost-then-longest match semantics, be sure to put the
-# longest operators first (e.g., if = came before ==, == would get
-# recognized as two instances of =).
-Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
- r"//=?",
- r"[+\-*/%&|^=<>]=?",
- r"~")
-
-Bracket = '[][(){}]'
-Special = group(r'\r?\n', r'[:;.,`@]')
-Funny = group(Operator, Bracket, Special)
-
-PlainToken = group(Number, Funny, String, Name)
-Token = Ignore + PlainToken
-
-# First (or only) line of ' or " string.
-ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
- group("'", r'\\\r?\n'),
- r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
- group('"', r'\\\r?\n'))
-PseudoExtras = group(r'\\\r?\n', Comment, Triple)
-PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
-
-tokenprog, pseudoprog, single3prog, double3prog = map(
- re.compile, (Token, PseudoToken, Single3, Double3))
-endprogs = {"'": re.compile(Single), '"': re.compile(Double),
- "'''": single3prog, '"""': double3prog,
- "r'''": single3prog, 'r"""': double3prog,
- "u'''": single3prog, 'u"""': double3prog,
- "ur'''": single3prog, 'ur"""': double3prog,
- "R'''": single3prog, 'R"""': double3prog,
- "U'''": single3prog, 'U"""': double3prog,
- "uR'''": single3prog, 'uR"""': double3prog,
- "Ur'''": single3prog, 'Ur"""': double3prog,
- "UR'''": single3prog, 'UR"""': double3prog,
- "b'''": single3prog, 'b"""': double3prog,
- "br'''": single3prog, 'br"""': double3prog,
- "B'''": single3prog, 'B"""': double3prog,
- "bR'''": single3prog, 'bR"""': double3prog,
- "Br'''": single3prog, 'Br"""': double3prog,
- "BR'''": single3prog, 'BR"""': double3prog,
- 'r': None, 'R': None, 'u': None, 'U': None,
- 'b': None, 'B': None}
-
-triple_quoted = {}
-for t in ("'''", '"""',
- "r'''", 'r"""', "R'''", 'R"""',
- "u'''", 'u"""', "U'''", 'U"""',
- "ur'''", 'ur"""', "Ur'''", 'Ur"""',
- "uR'''", 'uR"""', "UR'''", 'UR"""',
- "b'''", 'b"""', "B'''", 'B"""',
- "br'''", 'br"""', "Br'''", 'Br"""',
- "bR'''", 'bR"""', "BR'''", 'BR"""'):
- triple_quoted[t] = t
-single_quoted = {}
-for t in ("'", '"',
- "r'", 'r"', "R'", 'R"',
- "u'", 'u"', "U'", 'U"',
- "ur'", 'ur"', "Ur'", 'Ur"',
- "uR'", 'uR"', "UR'", 'UR"',
- "b'", 'b"', "B'", 'B"',
- "br'", 'br"', "Br'", 'Br"',
- "bR'", 'bR"', "BR'", 'BR"' ):
- single_quoted[t] = t
-
-tabsize = 8
-
-class TokenError(Exception): pass
-
-class StopTokenizing(Exception): pass
-
-def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
- srow, scol = srow_scol
- erow, ecol = erow_ecol
- print("%d,%d-%d,%d:\t%s\t%s" % \
- (srow, scol, erow, ecol, tok_name[type], repr(token)))
-
-def tokenize(readline, tokeneater=printtoken):
- """
- The tokenize() function accepts two parameters: one representing the
- input stream, and one providing an output mechanism for tokenize().
-
- The first parameter, readline, must be a callable object which provides
- the same interface as the readline() method of built-in file objects.
- Each call to the function should return one line of input as a string.
-
- The second parameter, tokeneater, must also be a callable object. It is
- called once for each token, with five arguments, corresponding to the
- tuples generated by generate_tokens().
- """
- try:
- tokenize_loop(readline, tokeneater)
- except StopTokenizing:
- pass
-
-# backwards compatible interface
-def tokenize_loop(readline, tokeneater):
- for token_info in generate_tokens(readline):
- tokeneater(*token_info)
-
-class Untokenizer:
-
- def __init__(self):
- self.tokens = []
- self.prev_row = 1
- self.prev_col = 0
-
- def add_whitespace(self, start):
- row, col = start
- assert row >= self.prev_row
- col_offset = col - self.prev_col
- if col_offset > 0:
- self.tokens.append(" " * col_offset)
- elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
- # Line was backslash-continued
- self.tokens.append(" ")
-
- def untokenize(self, tokens):
- iterable = iter(tokens)
- for t in iterable:
- if len(t) == 2:
- self.compat(t, iterable)
- break
- tok_type, token, start, end = t[:4]
- self.add_whitespace(start)
- self.tokens.append(token)
- self.prev_row, self.prev_col = end
- if tok_type in (NEWLINE, NL):
- self.prev_row += 1
- self.prev_col = 0
- return "".join(self.tokens)
-
- def compat(self, token, iterable):
- # This import is here to avoid problems when the itertools
- # module is not built yet and tokenize is imported.
- from itertools import chain
- startline = False
- prevstring = False
- indents = []
- toks_append = self.tokens.append
- for tok in chain([token], iterable):
- toknum, tokval = tok[:2]
-
- if toknum in (NAME, NUMBER):
- tokval += ' '
-
- # Insert a space between two consecutive strings
- if toknum == STRING:
- if prevstring:
- tokval = ' ' + tokval
- prevstring = True
- else:
- prevstring = False
-
- if toknum == INDENT:
- indents.append(tokval)
- continue
- elif toknum == DEDENT:
- indents.pop()
- continue
- elif toknum in (NEWLINE, NL):
- startline = True
- elif startline and indents:
- toks_append(indents[-1])
- startline = False
- toks_append(tokval)
-
-def untokenize(iterable):
- """Transform tokens back into Python source code.
-
- Each element returned by the iterable must be a token sequence
- with at least two elements, a token number and token value. If
- only two tokens are passed, the resulting output is poor.
-
- Round-trip invariant for full input:
- Untokenized source will match input source exactly
-
- Round-trip invariant for limited intput:
- # Output text will tokenize the back to the input
- t1 = [tok[:2] for tok in generate_tokens(f.readline)]
- newcode = untokenize(t1)
- readline = iter(newcode.splitlines(1)).next
- t2 = [tok[:2] for tok in generate_tokens(readline)]
- assert t1 == t2
- """
- ut = Untokenizer()
- return ut.untokenize(iterable)
-
-def generate_tokens(readline):
- """
- The generate_tokens() generator requires one argment, readline, which
- must be a callable object which provides the same interface as the
- readline() method of built-in file objects. Each call to the function
- should return one line of input as a string. Alternately, readline
- can be a callable function terminating with StopIteration:
- readline = open(myfile).next # Example of alternate readline
-
- The generator produces 5-tuples with these members: the token type; the
- token string; a 2-tuple (srow, scol) of ints specifying the row and
- column where the token begins in the source; a 2-tuple (erow, ecol) of
- ints specifying the row and column where the token ends in the source;
- and the line on which the token was found. The line passed is the
- logical line; continuation lines are included.
- """
- lnum = parenlev = continued = 0
- namechars, numchars = string.ascii_letters + '_', '0123456789'
- contstr, needcont = '', 0
- contline = None
- indents = [0]
-
- while 1: # loop over lines in stream
- try:
- line = readline()
- except StopIteration:
- line = ''
- lnum += 1
- pos, max = 0, len(line)
-
- if contstr: # continued string
- if not line:
- raise TokenError("EOF in multi-line string", strstart)
- endmatch = endprog.match(line)
- if endmatch:
- pos = end = endmatch.end(0)
- yield (STRING, contstr + line[:end],
- strstart, (lnum, end), contline + line)
- contstr, needcont = '', 0
- contline = None
- elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
- yield (ERRORTOKEN, contstr + line,
- strstart, (lnum, len(line)), contline)
- contstr = ''
- contline = None
- continue
- else:
- contstr = contstr + line
- contline = contline + line
- continue
-
- elif parenlev == 0 and not continued: # new statement
- if not line: break
- column = 0
- while pos < max: # measure leading whitespace
- if line[pos] == ' ':
- column += 1
- elif line[pos] == '\t':
- column = (column//tabsize + 1)*tabsize
- elif line[pos] == '\f':
- column = 0
- else:
- break
- pos += 1
- if pos == max:
- break
-
- if line[pos] in '#\r\n': # skip comments or blank lines
- if line[pos] == '#':
- comment_token = line[pos:].rstrip('\r\n')
- nl_pos = pos + len(comment_token)
- yield (COMMENT, comment_token,
- (lnum, pos), (lnum, pos + len(comment_token)), line)
- yield (NEWLINE, line[nl_pos:],
- (lnum, nl_pos), (lnum, len(line)), line)
- else:
- yield (NEWLINE, line[pos:],
- (lnum, pos), (lnum, len(line)), line)
- continue
-
- if column > indents[-1]: # count indents or dedents
- indents.append(column)
- yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
- while column < indents[-1]:
- if column not in indents:
- raise IndentationError(
- "unindent does not match any outer indentation level",
- ("<tokenize>", lnum, pos, line))
- indents = indents[:-1]
- yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
-
- else: # continued statement
- if not line:
- raise TokenError("EOF in multi-line statement", (lnum, 0))
- continued = 0
-
- while pos < max:
- pseudomatch = pseudoprog.match(line, pos)
- if pseudomatch: # scan for tokens
- start, end = pseudomatch.span(1)
- spos, epos, pos = (lnum, start), (lnum, end), end
- token, initial = line[start:end], line[start]
-
- if initial in numchars or \
- (initial == '.' and token != '.'): # ordinary number
- yield (NUMBER, token, spos, epos, line)
- elif initial in '\r\n':
- yield (NL if parenlev > 0 else NEWLINE,
- token, spos, epos, line)
- elif initial == '#':
- assert not token.endswith("\n")
- yield (COMMENT, token, spos, epos, line)
- elif token in triple_quoted:
- endprog = endprogs[token]
- endmatch = endprog.match(line, pos)
- if endmatch: # all on one line
- pos = endmatch.end(0)
- token = line[start:pos]
- yield (STRING, token, spos, (lnum, pos), line)
- else:
- strstart = (lnum, start) # multiple lines
- contstr = line[start:]
- contline = line
- break
- elif initial in single_quoted or \
- token[:2] in single_quoted or \
- token[:3] in single_quoted:
- if token[-1] == '\n': # continued string
- strstart = (lnum, start)
- endprog = (endprogs[initial] or endprogs[token[1]] or
- endprogs[token[2]])
- contstr, needcont = line[start:], 1
- contline = line
- break
- else: # ordinary string
- yield (STRING, token, spos, epos, line)
- elif initial in namechars: # ordinary name
- yield (NAME, token, spos, epos, line)
- elif initial == '\\': # continued stmt
- continued = 1
- else:
- if initial in '([{':
- parenlev += 1
- elif initial in ')]}':
- parenlev -= 1
- yield (OP, token, spos, epos, line)
- else:
- yield (ERRORTOKEN, line[pos],
- (lnum, pos), (lnum, pos+1), line)
- pos += 1
-
- for indent in indents[1:]: # pop remaining indent levels
- yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
- yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
-
-if __name__ == '__main__': # testing
- import sys
- if len(sys.argv) > 1:
- tokenize(open(sys.argv[1]).readline)
- else:
- tokenize(sys.stdin.readline)
+"""Patched version of standard library tokenize, to deal with various bugs.
+
+Patches
+
+- Relevant parts of Gareth Rees' patch for Python issue #12691 (untokenizing),
+ manually applied.
+- Newlines in comments and blank lines should be either NL or NEWLINE, depending
+ on whether they are in a multi-line statement. Filed as Python issue #17061.
+
+-------------------------------------------------------------------------------
+Tokenization help for Python programs.
+
+generate_tokens(readline) is a generator that breaks a stream of
+text into Python tokens. It accepts a readline-like method which is called
+repeatedly to get the next line of input (or "" for EOF). It generates
+5-tuples with these members:
+
+ the token type (see token.py)
+ the token (a string)
+ the starting (row, column) indices of the token (a 2-tuple of ints)
+ the ending (row, column) indices of the token (a 2-tuple of ints)
+ the original line (string)
+
+It is designed to match the working of the Python tokenizer exactly, except
+that it produces COMMENT tokens for comments and gives type OP for all
+operators
+
+Older entry points
+ tokenize_loop(readline, tokeneater)
+ tokenize(readline, tokeneater=printtoken)
+are the same, except instead of generating tokens, tokeneater is a callback
+function to which the 5 fields described above are passed as 5 arguments,
+each time a new token is found."""
+from __future__ import print_function
+
+__author__ = 'Ka-Ping Yee <ping@lfw.org>'
+__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
+ 'Skip Montanaro, Raymond Hettinger')
+
+import string, re
+from token import *
+
+import token
+__all__ = [x for x in dir(token) if not x.startswith("_")]
+__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
+del x
+del token
+
+__all__ += ["TokenError"]
+
+COMMENT = N_TOKENS
+tok_name[COMMENT] = 'COMMENT'
+NL = N_TOKENS + 1
+tok_name[NL] = 'NL'
+N_TOKENS += 2
+
+def group(*choices): return '(' + '|'.join(choices) + ')'
+def any(*choices): return group(*choices) + '*'
+def maybe(*choices): return group(*choices) + '?'
+
+Whitespace = r'[ \f\t]*'
+Comment = r'#[^\r\n]*'
+Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+Name = r'[a-zA-Z_]\w*'
+
+Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
+Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
+Binnumber = r'0[bB][01]+[lL]?'
+Decnumber = r'[1-9]\d*[lL]?'
+Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
+Exponent = r'[eE][-+]?\d+'
+Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
+Expfloat = r'\d+' + Exponent
+Floatnumber = group(Pointfloat, Expfloat)
+Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
+Number = group(Imagnumber, Floatnumber, Intnumber)
+
+# Tail end of ' string.
+Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+# Tail end of " string.
+Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+# Tail end of ''' string.
+Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+# Tail end of """ string.
+Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
+# Single-line ' or " string.
+String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+ r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+
+# Because of leftmost-then-longest match semantics, be sure to put the
+# longest operators first (e.g., if = came before ==, == would get
+# recognized as two instances of =).
+Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
+ r"//=?",
+ r"[+\-*/%&|^=<>]=?",
+ r"~")
+
+Bracket = '[][(){}]'
+Special = group(r'\r?\n', r'[:;.,`@]')
+Funny = group(Operator, Bracket, Special)
+
+PlainToken = group(Number, Funny, String, Name)
+Token = Ignore + PlainToken
+
+# First (or only) line of ' or " string.
+ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ group("'", r'\\\r?\n'),
+ r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+ group('"', r'\\\r?\n'))
+PseudoExtras = group(r'\\\r?\n', Comment, Triple)
+PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+
+tokenprog, pseudoprog, single3prog, double3prog = map(
+ re.compile, (Token, PseudoToken, Single3, Double3))
+endprogs = {"'": re.compile(Single), '"': re.compile(Double),
+ "'''": single3prog, '"""': double3prog,
+ "r'''": single3prog, 'r"""': double3prog,
+ "u'''": single3prog, 'u"""': double3prog,
+ "ur'''": single3prog, 'ur"""': double3prog,
+ "R'''": single3prog, 'R"""': double3prog,
+ "U'''": single3prog, 'U"""': double3prog,
+ "uR'''": single3prog, 'uR"""': double3prog,
+ "Ur'''": single3prog, 'Ur"""': double3prog,
+ "UR'''": single3prog, 'UR"""': double3prog,
+ "b'''": single3prog, 'b"""': double3prog,
+ "br'''": single3prog, 'br"""': double3prog,
+ "B'''": single3prog, 'B"""': double3prog,
+ "bR'''": single3prog, 'bR"""': double3prog,
+ "Br'''": single3prog, 'Br"""': double3prog,
+ "BR'''": single3prog, 'BR"""': double3prog,
+ 'r': None, 'R': None, 'u': None, 'U': None,
+ 'b': None, 'B': None}
+
+triple_quoted = {}
+for t in ("'''", '"""',
+ "r'''", 'r"""', "R'''", 'R"""',
+ "u'''", 'u"""', "U'''", 'U"""',
+ "ur'''", 'ur"""', "Ur'''", 'Ur"""',
+ "uR'''", 'uR"""', "UR'''", 'UR"""',
+ "b'''", 'b"""', "B'''", 'B"""',
+ "br'''", 'br"""', "Br'''", 'Br"""',
+ "bR'''", 'bR"""', "BR'''", 'BR"""'):
+ triple_quoted[t] = t
+single_quoted = {}
+for t in ("'", '"',
+ "r'", 'r"', "R'", 'R"',
+ "u'", 'u"', "U'", 'U"',
+ "ur'", 'ur"', "Ur'", 'Ur"',
+ "uR'", 'uR"', "UR'", 'UR"',
+ "b'", 'b"', "B'", 'B"',
+ "br'", 'br"', "Br'", 'Br"',
+ "bR'", 'bR"', "BR'", 'BR"' ):
+ single_quoted[t] = t
+
+tabsize = 8
+
+class TokenError(Exception): pass
+
+class StopTokenizing(Exception): pass
+
+def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
+ srow, scol = srow_scol
+ erow, ecol = erow_ecol
+ print("%d,%d-%d,%d:\t%s\t%s" % \
+ (srow, scol, erow, ecol, tok_name[type], repr(token)))
+
+def tokenize(readline, tokeneater=printtoken):
+ """
+ The tokenize() function accepts two parameters: one representing the
+ input stream, and one providing an output mechanism for tokenize().
+
+ The first parameter, readline, must be a callable object which provides
+ the same interface as the readline() method of built-in file objects.
+ Each call to the function should return one line of input as a string.
+
+ The second parameter, tokeneater, must also be a callable object. It is
+ called once for each token, with five arguments, corresponding to the
+ tuples generated by generate_tokens().
+ """
+ try:
+ tokenize_loop(readline, tokeneater)
+ except StopTokenizing:
+ pass
+
+# backwards compatible interface
+def tokenize_loop(readline, tokeneater):
+ for token_info in generate_tokens(readline):
+ tokeneater(*token_info)
+
+class Untokenizer:
+
+ def __init__(self):
+ self.tokens = []
+ self.prev_row = 1
+ self.prev_col = 0
+
+ def add_whitespace(self, start):
+ row, col = start
+ assert row >= self.prev_row
+ col_offset = col - self.prev_col
+ if col_offset > 0:
+ self.tokens.append(" " * col_offset)
+ elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
+ # Line was backslash-continued
+ self.tokens.append(" ")
+
+ def untokenize(self, tokens):
+ iterable = iter(tokens)
+ for t in iterable:
+ if len(t) == 2:
+ self.compat(t, iterable)
+ break
+ tok_type, token, start, end = t[:4]
+ self.add_whitespace(start)
+ self.tokens.append(token)
+ self.prev_row, self.prev_col = end
+ if tok_type in (NEWLINE, NL):
+ self.prev_row += 1
+ self.prev_col = 0
+ return "".join(self.tokens)
+
+ def compat(self, token, iterable):
+ # This import is here to avoid problems when the itertools
+ # module is not built yet and tokenize is imported.
+ from itertools import chain
+ startline = False
+ prevstring = False
+ indents = []
+ toks_append = self.tokens.append
+ for tok in chain([token], iterable):
+ toknum, tokval = tok[:2]
+
+ if toknum in (NAME, NUMBER):
+ tokval += ' '
+
+ # Insert a space between two consecutive strings
+ if toknum == STRING:
+ if prevstring:
+ tokval = ' ' + tokval
+ prevstring = True
+ else:
+ prevstring = False
+
+ if toknum == INDENT:
+ indents.append(tokval)
+ continue
+ elif toknum == DEDENT:
+ indents.pop()
+ continue
+ elif toknum in (NEWLINE, NL):
+ startline = True
+ elif startline and indents:
+ toks_append(indents[-1])
+ startline = False
+ toks_append(tokval)
+
+def untokenize(iterable):
+ """Transform tokens back into Python source code.
+
+ Each element returned by the iterable must be a token sequence
+ with at least two elements, a token number and token value. If
+ only two tokens are passed, the resulting output is poor.
+
+ Round-trip invariant for full input:
+ Untokenized source will match input source exactly
+
+ Round-trip invariant for limited intput:
+ # Output text will tokenize the back to the input
+ t1 = [tok[:2] for tok in generate_tokens(f.readline)]
+ newcode = untokenize(t1)
+ readline = iter(newcode.splitlines(1)).next
+ t2 = [tok[:2] for tok in generate_tokens(readline)]
+ assert t1 == t2
+ """
+ ut = Untokenizer()
+ return ut.untokenize(iterable)
+
+def generate_tokens(readline):
+ """
+ The generate_tokens() generator requires one argment, readline, which
+ must be a callable object which provides the same interface as the
+ readline() method of built-in file objects. Each call to the function
+ should return one line of input as a string. Alternately, readline
+ can be a callable function terminating with StopIteration:
+ readline = open(myfile).next # Example of alternate readline
+
+ The generator produces 5-tuples with these members: the token type; the
+ token string; a 2-tuple (srow, scol) of ints specifying the row and
+ column where the token begins in the source; a 2-tuple (erow, ecol) of
+ ints specifying the row and column where the token ends in the source;
+ and the line on which the token was found. The line passed is the
+ logical line; continuation lines are included.
+ """
+ lnum = parenlev = continued = 0
+ namechars, numchars = string.ascii_letters + '_', '0123456789'
+ contstr, needcont = '', 0
+ contline = None
+ indents = [0]
+
+ while 1: # loop over lines in stream
+ try:
+ line = readline()
+ except StopIteration:
+ line = ''
+ lnum += 1
+ pos, max = 0, len(line)
+
+ if contstr: # continued string
+ if not line:
+ raise TokenError("EOF in multi-line string", strstart)
+ endmatch = endprog.match(line)
+ if endmatch:
+ pos = end = endmatch.end(0)
+ yield (STRING, contstr + line[:end],
+ strstart, (lnum, end), contline + line)
+ contstr, needcont = '', 0
+ contline = None
+ elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+ yield (ERRORTOKEN, contstr + line,
+ strstart, (lnum, len(line)), contline)
+ contstr = ''
+ contline = None
+ continue
+ else:
+ contstr = contstr + line
+ contline = contline + line
+ continue
+
+ elif parenlev == 0 and not continued: # new statement
+ if not line: break
+ column = 0
+ while pos < max: # measure leading whitespace
+ if line[pos] == ' ':
+ column += 1
+ elif line[pos] == '\t':
+ column = (column//tabsize + 1)*tabsize
+ elif line[pos] == '\f':
+ column = 0
+ else:
+ break
+ pos += 1
+ if pos == max:
+ break
+
+ if line[pos] in '#\r\n': # skip comments or blank lines
+ if line[pos] == '#':
+ comment_token = line[pos:].rstrip('\r\n')
+ nl_pos = pos + len(comment_token)
+ yield (COMMENT, comment_token,
+ (lnum, pos), (lnum, pos + len(comment_token)), line)
+ yield (NEWLINE, line[nl_pos:],
+ (lnum, nl_pos), (lnum, len(line)), line)
+ else:
+ yield (NEWLINE, line[pos:],
+ (lnum, pos), (lnum, len(line)), line)
+ continue
+
+ if column > indents[-1]: # count indents or dedents
+ indents.append(column)
+ yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+ while column < indents[-1]:
+ if column not in indents:
+ raise IndentationError(
+ "unindent does not match any outer indentation level",
+ ("<tokenize>", lnum, pos, line))
+ indents = indents[:-1]
+ yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
+
+ else: # continued statement
+ if not line:
+ raise TokenError("EOF in multi-line statement", (lnum, 0))
+ continued = 0
+
+ while pos < max:
+ pseudomatch = pseudoprog.match(line, pos)
+ if pseudomatch: # scan for tokens
+ start, end = pseudomatch.span(1)
+ spos, epos, pos = (lnum, start), (lnum, end), end
+ token, initial = line[start:end], line[start]
+
+ if initial in numchars or \
+ (initial == '.' and token != '.'): # ordinary number
+ yield (NUMBER, token, spos, epos, line)
+ elif initial in '\r\n':
+ yield (NL if parenlev > 0 else NEWLINE,
+ token, spos, epos, line)
+ elif initial == '#':
+ assert not token.endswith("\n")
+ yield (COMMENT, token, spos, epos, line)
+ elif token in triple_quoted:
+ endprog = endprogs[token]
+ endmatch = endprog.match(line, pos)
+ if endmatch: # all on one line
+ pos = endmatch.end(0)
+ token = line[start:pos]
+ yield (STRING, token, spos, (lnum, pos), line)
+ else:
+ strstart = (lnum, start) # multiple lines
+ contstr = line[start:]
+ contline = line
+ break
+ elif initial in single_quoted or \
+ token[:2] in single_quoted or \
+ token[:3] in single_quoted:
+ if token[-1] == '\n': # continued string
+ strstart = (lnum, start)
+ endprog = (endprogs[initial] or endprogs[token[1]] or
+ endprogs[token[2]])
+ contstr, needcont = line[start:], 1
+ contline = line
+ break
+ else: # ordinary string
+ yield (STRING, token, spos, epos, line)
+ elif initial in namechars: # ordinary name
+ yield (NAME, token, spos, epos, line)
+ elif initial == '\\': # continued stmt
+ continued = 1
+ else:
+ if initial in '([{':
+ parenlev += 1
+ elif initial in ')]}':
+ parenlev -= 1
+ yield (OP, token, spos, epos, line)
+ else:
+ yield (ERRORTOKEN, line[pos],
+ (lnum, pos), (lnum, pos+1), line)
+ pos += 1
+
+ for indent in indents[1:]: # pop remaining indent levels
+ yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
+ yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+
+if __name__ == '__main__': # testing
+ import sys
+ if len(sys.argv) > 1:
+ tokenize(open(sys.argv[1]).readline)
+ else:
+ tokenize(sys.stdin.readline)
diff --git a/contrib/python/ipython/py2/IPython/utils/_tokenize_py3.py b/contrib/python/ipython/py2/IPython/utils/_tokenize_py3.py
index ca85023c32..ee1fd9e639 100644
--- a/contrib/python/ipython/py2/IPython/utils/_tokenize_py3.py
+++ b/contrib/python/ipython/py2/IPython/utils/_tokenize_py3.py
@@ -1,595 +1,595 @@
-"""Patched version of standard library tokenize, to deal with various bugs.
-
-Based on Python 3.2 code.
-
-Patches:
-
-- Gareth Rees' patch for Python issue #12691 (untokenizing)
- - Except we don't encode the output of untokenize
- - Python 2 compatible syntax, so that it can be byte-compiled at installation
-- Newlines in comments and blank lines should be either NL or NEWLINE, depending
- on whether they are in a multi-line statement. Filed as Python issue #17061.
-- Export generate_tokens & TokenError
-- u and rb literals are allowed under Python 3.3 and above.
-
-------------------------------------------------------------------------------
-Tokenization help for Python programs.
-
-tokenize(readline) is a generator that breaks a stream of bytes into
-Python tokens. It decodes the bytes according to PEP-0263 for
-determining source file encoding.
-
-It accepts a readline-like method which is called repeatedly to get the
-next line of input (or b"" for EOF). It generates 5-tuples with these
-members:
-
- the token type (see token.py)
- the token (a string)
- the starting (row, column) indices of the token (a 2-tuple of ints)
- the ending (row, column) indices of the token (a 2-tuple of ints)
- the original line (string)
-
-It is designed to match the working of the Python tokenizer exactly, except
-that it produces COMMENT tokens for comments and gives type OP for all
-operators. Additionally, all token lists start with an ENCODING token
-which tells you which encoding was used to decode the bytes stream.
-"""
-from __future__ import absolute_import
-
-__author__ = 'Ka-Ping Yee <ping@lfw.org>'
-__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
- 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
- 'Michael Foord')
-import builtins
-import re
-import sys
-from token import *
-from codecs import lookup, BOM_UTF8
-import collections
-from io import TextIOWrapper
-cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
-
-import token
-__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
- "NL", "untokenize", "ENCODING", "TokenInfo"]
-del token
-
-__all__ += ["generate_tokens", "TokenError"]
-
-COMMENT = N_TOKENS
-tok_name[COMMENT] = 'COMMENT'
-NL = N_TOKENS + 1
-tok_name[NL] = 'NL'
-ENCODING = N_TOKENS + 2
-tok_name[ENCODING] = 'ENCODING'
-N_TOKENS += 3
-
-class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
- def __repr__(self):
- annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
- return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
- self._replace(type=annotated_type))
-
-def group(*choices): return '(' + '|'.join(choices) + ')'
-def any(*choices): return group(*choices) + '*'
-def maybe(*choices): return group(*choices) + '?'
-
-# Note: we use unicode matching for names ("\w") but ascii matching for
-# number literals.
-Whitespace = r'[ \f\t]*'
-Comment = r'#[^\r\n]*'
-Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
-Name = r'\w+'
-
-Hexnumber = r'0[xX][0-9a-fA-F]+'
-Binnumber = r'0[bB][01]+'
-Octnumber = r'0[oO][0-7]+'
-Decnumber = r'(?:0+|[1-9][0-9]*)'
-Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
-Exponent = r'[eE][-+]?[0-9]+'
-Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
-Expfloat = r'[0-9]+' + Exponent
-Floatnumber = group(Pointfloat, Expfloat)
-Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
-Number = group(Imagnumber, Floatnumber, Intnumber)
-
-if sys.version_info.minor >= 3:
- StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
-else:
- StringPrefix = r'(?:[bB]?[rR]?)?'
-
-# Tail end of ' string.
-Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
-# Tail end of " string.
-Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
-# Tail end of ''' string.
-Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
-# Tail end of """ string.
-Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
-Triple = group(StringPrefix + "'''", StringPrefix + '"""')
-# Single-line ' or " string.
-String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
- StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
-
-# Because of leftmost-then-longest match semantics, be sure to put the
-# longest operators first (e.g., if = came before ==, == would get
-# recognized as two instances of =).
-Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
- r"//=?", r"->",
- r"[+\-*/%&|^=<>]=?",
- r"~")
-
-Bracket = '[][(){}]'
-Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
-Funny = group(Operator, Bracket, Special)
-
-PlainToken = group(Number, Funny, String, Name)
-Token = Ignore + PlainToken
-
-# First (or only) line of ' or " string.
-ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
- group("'", r'\\\r?\n'),
- StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
- group('"', r'\\\r?\n'))
-PseudoExtras = group(r'\\\r?\n', Comment, Triple)
-PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
-
-def _compile(expr):
- return re.compile(expr, re.UNICODE)
-
-tokenprog, pseudoprog, single3prog, double3prog = map(
- _compile, (Token, PseudoToken, Single3, Double3))
-endprogs = {"'": _compile(Single), '"': _compile(Double),
- "'''": single3prog, '"""': double3prog,
- "r'''": single3prog, 'r"""': double3prog,
- "b'''": single3prog, 'b"""': double3prog,
- "R'''": single3prog, 'R"""': double3prog,
- "B'''": single3prog, 'B"""': double3prog,
- "br'''": single3prog, 'br"""': double3prog,
- "bR'''": single3prog, 'bR"""': double3prog,
- "Br'''": single3prog, 'Br"""': double3prog,
- "BR'''": single3prog, 'BR"""': double3prog,
- 'r': None, 'R': None, 'b': None, 'B': None}
-
-triple_quoted = {}
-for t in ("'''", '"""',
- "r'''", 'r"""', "R'''", 'R"""',
- "b'''", 'b"""', "B'''", 'B"""',
- "br'''", 'br"""', "Br'''", 'Br"""',
- "bR'''", 'bR"""', "BR'''", 'BR"""'):
- triple_quoted[t] = t
-single_quoted = {}
-for t in ("'", '"',
- "r'", 'r"', "R'", 'R"',
- "b'", 'b"', "B'", 'B"',
- "br'", 'br"', "Br'", 'Br"',
- "bR'", 'bR"', "BR'", 'BR"' ):
- single_quoted[t] = t
-
-if sys.version_info.minor >= 3:
- # Python 3.3
- for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']:
- _t2 = _prefix+'"""'
- endprogs[_t2] = double3prog
- triple_quoted[_t2] = _t2
- _t1 = _prefix + "'''"
- endprogs[_t1] = single3prog
- triple_quoted[_t1] = _t1
- single_quoted[_prefix+'"'] = _prefix+'"'
- single_quoted[_prefix+"'"] = _prefix+"'"
- del _prefix, _t2, _t1
- endprogs['u'] = None
- endprogs['U'] = None
-
-del _compile
-
-tabsize = 8
-
-class TokenError(Exception): pass
-
-class StopTokenizing(Exception): pass
-
-
-class Untokenizer:
-
- def __init__(self):
- self.tokens = []
- self.prev_row = 1
- self.prev_col = 0
- self.encoding = 'utf-8'
-
- def add_whitespace(self, tok_type, start):
- row, col = start
- assert row >= self.prev_row
- col_offset = col - self.prev_col
- if col_offset > 0:
- self.tokens.append(" " * col_offset)
- elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
- # Line was backslash-continued.
- self.tokens.append(" ")
-
- def untokenize(self, tokens):
- iterable = iter(tokens)
- for t in iterable:
- if len(t) == 2:
- self.compat(t, iterable)
- break
- tok_type, token, start, end = t[:4]
- if tok_type == ENCODING:
- self.encoding = token
- continue
- self.add_whitespace(tok_type, start)
- self.tokens.append(token)
- self.prev_row, self.prev_col = end
- if tok_type in (NEWLINE, NL):
- self.prev_row += 1
- self.prev_col = 0
- return "".join(self.tokens)
-
- def compat(self, token, iterable):
- # This import is here to avoid problems when the itertools
- # module is not built yet and tokenize is imported.
- from itertools import chain
- startline = False
- prevstring = False
- indents = []
- toks_append = self.tokens.append
-
- for tok in chain([token], iterable):
- toknum, tokval = tok[:2]
- if toknum == ENCODING:
- self.encoding = tokval
- continue
-
- if toknum in (NAME, NUMBER):
- tokval += ' '
-
- # Insert a space between two consecutive strings
- if toknum == STRING:
- if prevstring:
- tokval = ' ' + tokval
- prevstring = True
- else:
- prevstring = False
-
- if toknum == INDENT:
- indents.append(tokval)
- continue
- elif toknum == DEDENT:
- indents.pop()
- continue
- elif toknum in (NEWLINE, NL):
- startline = True
- elif startline and indents:
- toks_append(indents[-1])
- startline = False
- toks_append(tokval)
-
-
-def untokenize(tokens):
- """
- Convert ``tokens`` (an iterable) back into Python source code. Return
- a bytes object, encoded using the encoding specified by the last
- ENCODING token in ``tokens``, or UTF-8 if no ENCODING token is found.
-
- The result is guaranteed to tokenize back to match the input so that
- the conversion is lossless and round-trips are assured. The
- guarantee applies only to the token type and token string as the
- spacing between tokens (column positions) may change.
-
- :func:`untokenize` has two modes. If the input tokens are sequences
- of length 2 (``type``, ``string``) then spaces are added as necessary to
- preserve the round-trip property.
-
- If the input tokens are sequences of length 4 or more (``type``,
- ``string``, ``start``, ``end``), as returned by :func:`tokenize`, then
- spaces are added so that each token appears in the result at the
- position indicated by ``start`` and ``end``, if possible.
- """
- return Untokenizer().untokenize(tokens)
-
-
-def _get_normal_name(orig_enc):
- """Imitates get_normal_name in tokenizer.c."""
- # Only care about the first 12 characters.
- enc = orig_enc[:12].lower().replace("_", "-")
- if enc == "utf-8" or enc.startswith("utf-8-"):
- return "utf-8"
- if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
- enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
- return "iso-8859-1"
- return orig_enc
-
-def detect_encoding(readline):
- """
- The detect_encoding() function is used to detect the encoding that should
- be used to decode a Python source file. It requires one argment, readline,
- in the same way as the tokenize() generator.
-
- It will call readline a maximum of twice, and return the encoding used
- (as a string) and a list of any lines (left as bytes) it has read in.
-
- It detects the encoding from the presence of a utf-8 bom or an encoding
- cookie as specified in pep-0263. If both a bom and a cookie are present,
- but disagree, a SyntaxError will be raised. If the encoding cookie is an
- invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
- 'utf-8-sig' is returned.
-
- If no encoding is specified, then the default of 'utf-8' will be returned.
- """
- bom_found = False
- encoding = None
- default = 'utf-8'
- def read_or_stop():
- try:
- return readline()
- except StopIteration:
- return b''
-
- def find_cookie(line):
- try:
- # Decode as UTF-8. Either the line is an encoding declaration,
- # in which case it should be pure ASCII, or it must be UTF-8
- # per default encoding.
- line_string = line.decode('utf-8')
- except UnicodeDecodeError:
- raise SyntaxError("invalid or missing encoding declaration")
-
- matches = cookie_re.findall(line_string)
- if not matches:
- return None
- encoding = _get_normal_name(matches[0])
- try:
- codec = lookup(encoding)
- except LookupError:
- # This behaviour mimics the Python interpreter
- raise SyntaxError("unknown encoding: " + encoding)
-
- if bom_found:
- if encoding != 'utf-8':
- # This behaviour mimics the Python interpreter
- raise SyntaxError('encoding problem: utf-8')
- encoding += '-sig'
- return encoding
-
- first = read_or_stop()
- if first.startswith(BOM_UTF8):
- bom_found = True
- first = first[3:]
- default = 'utf-8-sig'
- if not first:
- return default, []
-
- encoding = find_cookie(first)
- if encoding:
- return encoding, [first]
-
- second = read_or_stop()
- if not second:
- return default, [first]
-
- encoding = find_cookie(second)
- if encoding:
- return encoding, [first, second]
-
- return default, [first, second]
-
-
-def open(filename):
- """Open a file in read only mode using the encoding detected by
- detect_encoding().
- """
- buffer = builtins.open(filename, 'rb')
- encoding, lines = detect_encoding(buffer.readline)
- buffer.seek(0)
- text = TextIOWrapper(buffer, encoding, line_buffering=True)
- text.mode = 'r'
- return text
-
-
-def tokenize(readline):
- """
- The tokenize() generator requires one argment, readline, which
- must be a callable object which provides the same interface as the
- readline() method of built-in file objects. Each call to the function
- should return one line of input as bytes. Alternately, readline
- can be a callable function terminating with StopIteration:
- readline = open(myfile, 'rb').__next__ # Example of alternate readline
-
- The generator produces 5-tuples with these members: the token type; the
- token string; a 2-tuple (srow, scol) of ints specifying the row and
- column where the token begins in the source; a 2-tuple (erow, ecol) of
- ints specifying the row and column where the token ends in the source;
- and the line on which the token was found. The line passed is the
- logical line; continuation lines are included.
-
- The first token sequence will always be an ENCODING token
- which tells you which encoding was used to decode the bytes stream.
- """
- # This import is here to avoid problems when the itertools module is not
- # built yet and tokenize is imported.
- from itertools import chain, repeat
- encoding, consumed = detect_encoding(readline)
- rl_gen = iter(readline, b"")
- empty = repeat(b"")
- return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
-
-
-def _tokenize(readline, encoding):
- lnum = parenlev = continued = 0
- numchars = '0123456789'
- contstr, needcont = '', 0
- contline = None
- indents = [0]
-
- if encoding is not None:
- if encoding == "utf-8-sig":
- # BOM will already have been stripped.
- encoding = "utf-8"
- yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
- while True: # loop over lines in stream
- try:
- line = readline()
- except StopIteration:
- line = b''
-
- if encoding is not None:
- line = line.decode(encoding)
- lnum += 1
- pos, max = 0, len(line)
-
- if contstr: # continued string
- if not line:
- raise TokenError("EOF in multi-line string", strstart)
- endmatch = endprog.match(line)
- if endmatch:
- pos = end = endmatch.end(0)
- yield TokenInfo(STRING, contstr + line[:end],
- strstart, (lnum, end), contline + line)
- contstr, needcont = '', 0
- contline = None
- elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
- yield TokenInfo(ERRORTOKEN, contstr + line,
- strstart, (lnum, len(line)), contline)
- contstr = ''
- contline = None
- continue
- else:
- contstr = contstr + line
- contline = contline + line
- continue
-
- elif parenlev == 0 and not continued: # new statement
- if not line: break
- column = 0
- while pos < max: # measure leading whitespace
- if line[pos] == ' ':
- column += 1
- elif line[pos] == '\t':
- column = (column//tabsize + 1)*tabsize
- elif line[pos] == '\f':
- column = 0
- else:
- break
- pos += 1
- if pos == max:
- break
-
- if line[pos] in '#\r\n': # skip comments or blank lines
- if line[pos] == '#':
- comment_token = line[pos:].rstrip('\r\n')
- nl_pos = pos + len(comment_token)
- yield TokenInfo(COMMENT, comment_token,
- (lnum, pos), (lnum, pos + len(comment_token)), line)
- yield TokenInfo(NEWLINE, line[nl_pos:],
- (lnum, nl_pos), (lnum, len(line)), line)
- else:
- yield TokenInfo(NEWLINE, line[pos:],
- (lnum, pos), (lnum, len(line)), line)
- continue
-
- if column > indents[-1]: # count indents or dedents
- indents.append(column)
- yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
- while column < indents[-1]:
- if column not in indents:
- raise IndentationError(
- "unindent does not match any outer indentation level",
- ("<tokenize>", lnum, pos, line))
- indents = indents[:-1]
- yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
-
- else: # continued statement
- if not line:
- raise TokenError("EOF in multi-line statement", (lnum, 0))
- continued = 0
-
- while pos < max:
- pseudomatch = pseudoprog.match(line, pos)
- if pseudomatch: # scan for tokens
- start, end = pseudomatch.span(1)
- spos, epos, pos = (lnum, start), (lnum, end), end
- token, initial = line[start:end], line[start]
-
- if (initial in numchars or # ordinary number
- (initial == '.' and token != '.' and token != '...')):
- yield TokenInfo(NUMBER, token, spos, epos, line)
- elif initial in '\r\n':
- yield TokenInfo(NL if parenlev > 0 else NEWLINE,
- token, spos, epos, line)
- elif initial == '#':
- assert not token.endswith("\n")
- yield TokenInfo(COMMENT, token, spos, epos, line)
- elif token in triple_quoted:
- endprog = endprogs[token]
- endmatch = endprog.match(line, pos)
- if endmatch: # all on one line
- pos = endmatch.end(0)
- token = line[start:pos]
- yield TokenInfo(STRING, token, spos, (lnum, pos), line)
- else:
- strstart = (lnum, start) # multiple lines
- contstr = line[start:]
- contline = line
- break
- elif initial in single_quoted or \
- token[:2] in single_quoted or \
- token[:3] in single_quoted:
- if token[-1] == '\n': # continued string
- strstart = (lnum, start)
- endprog = (endprogs[initial] or endprogs[token[1]] or
- endprogs[token[2]])
- contstr, needcont = line[start:], 1
- contline = line
- break
- else: # ordinary string
- yield TokenInfo(STRING, token, spos, epos, line)
- elif initial.isidentifier(): # ordinary name
- yield TokenInfo(NAME, token, spos, epos, line)
- elif initial == '\\': # continued stmt
- continued = 1
- else:
- if initial in '([{':
- parenlev += 1
- elif initial in ')]}':
- parenlev -= 1
- yield TokenInfo(OP, token, spos, epos, line)
- else:
- yield TokenInfo(ERRORTOKEN, line[pos],
- (lnum, pos), (lnum, pos+1), line)
- pos += 1
-
- for indent in indents[1:]: # pop remaining indent levels
- yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
- yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
-
-
-# An undocumented, backwards compatible, API for all the places in the standard
-# library that expect to be able to use tokenize with strings
-def generate_tokens(readline):
- return _tokenize(readline, None)
-
-if __name__ == "__main__":
- # Quick sanity check
- s = b'''def parseline(self, line):
- """Parse the line into a command name and a string containing
- the arguments. Returns a tuple containing (command, args, line).
- 'command' and 'args' may be None if the line couldn't be parsed.
- """
- line = line.strip()
- if not line:
- return None, None, line
- elif line[0] == '?':
- line = 'help ' + line[1:]
- elif line[0] == '!':
- if hasattr(self, 'do_shell'):
- line = 'shell ' + line[1:]
- else:
- return None, None, line
- i, n = 0, len(line)
- while i < n and line[i] in self.identchars: i = i+1
- cmd, arg = line[:i], line[i:].strip()
- return cmd, arg, line
- '''
- for tok in tokenize(iter(s.splitlines()).__next__):
- print(tok)
+"""Patched version of standard library tokenize, to deal with various bugs.
+
+Based on Python 3.2 code.
+
+Patches:
+
+- Gareth Rees' patch for Python issue #12691 (untokenizing)
+ - Except we don't encode the output of untokenize
+ - Python 2 compatible syntax, so that it can be byte-compiled at installation
+- Newlines in comments and blank lines should be either NL or NEWLINE, depending
+ on whether they are in a multi-line statement. Filed as Python issue #17061.
+- Export generate_tokens & TokenError
+- u and rb literals are allowed under Python 3.3 and above.
+
+------------------------------------------------------------------------------
+Tokenization help for Python programs.
+
+tokenize(readline) is a generator that breaks a stream of bytes into
+Python tokens. It decodes the bytes according to PEP-0263 for
+determining source file encoding.
+
+It accepts a readline-like method which is called repeatedly to get the
+next line of input (or b"" for EOF). It generates 5-tuples with these
+members:
+
+ the token type (see token.py)
+ the token (a string)
+ the starting (row, column) indices of the token (a 2-tuple of ints)
+ the ending (row, column) indices of the token (a 2-tuple of ints)
+ the original line (string)
+
+It is designed to match the working of the Python tokenizer exactly, except
+that it produces COMMENT tokens for comments and gives type OP for all
+operators. Additionally, all token lists start with an ENCODING token
+which tells you which encoding was used to decode the bytes stream.
+"""
+from __future__ import absolute_import
+
+__author__ = 'Ka-Ping Yee <ping@lfw.org>'
+__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
+ 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
+ 'Michael Foord')
+import builtins
+import re
+import sys
+from token import *
+from codecs import lookup, BOM_UTF8
+import collections
+from io import TextIOWrapper
+cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
+
+import token
+__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
+ "NL", "untokenize", "ENCODING", "TokenInfo"]
+del token
+
+__all__ += ["generate_tokens", "TokenError"]
+
+COMMENT = N_TOKENS
+tok_name[COMMENT] = 'COMMENT'
+NL = N_TOKENS + 1
+tok_name[NL] = 'NL'
+ENCODING = N_TOKENS + 2
+tok_name[ENCODING] = 'ENCODING'
+N_TOKENS += 3
+
+class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
+ def __repr__(self):
+ annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
+ return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
+ self._replace(type=annotated_type))
+
+def group(*choices): return '(' + '|'.join(choices) + ')'
+def any(*choices): return group(*choices) + '*'
+def maybe(*choices): return group(*choices) + '?'
+
+# Note: we use unicode matching for names ("\w") but ascii matching for
+# number literals.
+Whitespace = r'[ \f\t]*'
+Comment = r'#[^\r\n]*'
+Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+Name = r'\w+'
+
+Hexnumber = r'0[xX][0-9a-fA-F]+'
+Binnumber = r'0[bB][01]+'
+Octnumber = r'0[oO][0-7]+'
+Decnumber = r'(?:0+|[1-9][0-9]*)'
+Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
+Exponent = r'[eE][-+]?[0-9]+'
+Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
+Expfloat = r'[0-9]+' + Exponent
+Floatnumber = group(Pointfloat, Expfloat)
+Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
+Number = group(Imagnumber, Floatnumber, Intnumber)
+
+if sys.version_info.minor >= 3:
+ StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
+else:
+ StringPrefix = r'(?:[bB]?[rR]?)?'
+
+# Tail end of ' string.
+Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+# Tail end of " string.
+Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+# Tail end of ''' string.
+Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+# Tail end of """ string.
+Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+Triple = group(StringPrefix + "'''", StringPrefix + '"""')
+# Single-line ' or " string.
+String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+ StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+
+# Because of leftmost-then-longest match semantics, be sure to put the
+# longest operators first (e.g., if = came before ==, == would get
+# recognized as two instances of =).
+Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
+ r"//=?", r"->",
+ r"[+\-*/%&|^=<>]=?",
+ r"~")
+
+Bracket = '[][(){}]'
+Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
+Funny = group(Operator, Bracket, Special)
+
+PlainToken = group(Number, Funny, String, Name)
+Token = Ignore + PlainToken
+
+# First (or only) line of ' or " string.
+ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ group("'", r'\\\r?\n'),
+ StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+ group('"', r'\\\r?\n'))
+PseudoExtras = group(r'\\\r?\n', Comment, Triple)
+PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+
+def _compile(expr):
+ return re.compile(expr, re.UNICODE)
+
+tokenprog, pseudoprog, single3prog, double3prog = map(
+ _compile, (Token, PseudoToken, Single3, Double3))
+endprogs = {"'": _compile(Single), '"': _compile(Double),
+ "'''": single3prog, '"""': double3prog,
+ "r'''": single3prog, 'r"""': double3prog,
+ "b'''": single3prog, 'b"""': double3prog,
+ "R'''": single3prog, 'R"""': double3prog,
+ "B'''": single3prog, 'B"""': double3prog,
+ "br'''": single3prog, 'br"""': double3prog,
+ "bR'''": single3prog, 'bR"""': double3prog,
+ "Br'''": single3prog, 'Br"""': double3prog,
+ "BR'''": single3prog, 'BR"""': double3prog,
+ 'r': None, 'R': None, 'b': None, 'B': None}
+
+triple_quoted = {}
+for t in ("'''", '"""',
+ "r'''", 'r"""', "R'''", 'R"""',
+ "b'''", 'b"""', "B'''", 'B"""',
+ "br'''", 'br"""', "Br'''", 'Br"""',
+ "bR'''", 'bR"""', "BR'''", 'BR"""'):
+ triple_quoted[t] = t
+single_quoted = {}
+for t in ("'", '"',
+ "r'", 'r"', "R'", 'R"',
+ "b'", 'b"', "B'", 'B"',
+ "br'", 'br"', "Br'", 'Br"',
+ "bR'", 'bR"', "BR'", 'BR"' ):
+ single_quoted[t] = t
+
+if sys.version_info.minor >= 3:
+ # Python 3.3
+ for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']:
+ _t2 = _prefix+'"""'
+ endprogs[_t2] = double3prog
+ triple_quoted[_t2] = _t2
+ _t1 = _prefix + "'''"
+ endprogs[_t1] = single3prog
+ triple_quoted[_t1] = _t1
+ single_quoted[_prefix+'"'] = _prefix+'"'
+ single_quoted[_prefix+"'"] = _prefix+"'"
+ del _prefix, _t2, _t1
+ endprogs['u'] = None
+ endprogs['U'] = None
+
+del _compile
+
+tabsize = 8
+
+class TokenError(Exception): pass
+
+class StopTokenizing(Exception): pass
+
+
+class Untokenizer:
+
+ def __init__(self):
+ self.tokens = []
+ self.prev_row = 1
+ self.prev_col = 0
+ self.encoding = 'utf-8'
+
+ def add_whitespace(self, tok_type, start):
+ row, col = start
+ assert row >= self.prev_row
+ col_offset = col - self.prev_col
+ if col_offset > 0:
+ self.tokens.append(" " * col_offset)
+ elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
+ # Line was backslash-continued.
+ self.tokens.append(" ")
+
+ def untokenize(self, tokens):
+ iterable = iter(tokens)
+ for t in iterable:
+ if len(t) == 2:
+ self.compat(t, iterable)
+ break
+ tok_type, token, start, end = t[:4]
+ if tok_type == ENCODING:
+ self.encoding = token
+ continue
+ self.add_whitespace(tok_type, start)
+ self.tokens.append(token)
+ self.prev_row, self.prev_col = end
+ if tok_type in (NEWLINE, NL):
+ self.prev_row += 1
+ self.prev_col = 0
+ return "".join(self.tokens)
+
+ def compat(self, token, iterable):
+ # This import is here to avoid problems when the itertools
+ # module is not built yet and tokenize is imported.
+ from itertools import chain
+ startline = False
+ prevstring = False
+ indents = []
+ toks_append = self.tokens.append
+
+ for tok in chain([token], iterable):
+ toknum, tokval = tok[:2]
+ if toknum == ENCODING:
+ self.encoding = tokval
+ continue
+
+ if toknum in (NAME, NUMBER):
+ tokval += ' '
+
+ # Insert a space between two consecutive strings
+ if toknum == STRING:
+ if prevstring:
+ tokval = ' ' + tokval
+ prevstring = True
+ else:
+ prevstring = False
+
+ if toknum == INDENT:
+ indents.append(tokval)
+ continue
+ elif toknum == DEDENT:
+ indents.pop()
+ continue
+ elif toknum in (NEWLINE, NL):
+ startline = True
+ elif startline and indents:
+ toks_append(indents[-1])
+ startline = False
+ toks_append(tokval)
+
+
+def untokenize(tokens):
+ """
+ Convert ``tokens`` (an iterable) back into Python source code. Return
+ a bytes object, encoded using the encoding specified by the last
+ ENCODING token in ``tokens``, or UTF-8 if no ENCODING token is found.
+
+ The result is guaranteed to tokenize back to match the input so that
+ the conversion is lossless and round-trips are assured. The
+ guarantee applies only to the token type and token string as the
+ spacing between tokens (column positions) may change.
+
+ :func:`untokenize` has two modes. If the input tokens are sequences
+ of length 2 (``type``, ``string``) then spaces are added as necessary to
+ preserve the round-trip property.
+
+ If the input tokens are sequences of length 4 or more (``type``,
+ ``string``, ``start``, ``end``), as returned by :func:`tokenize`, then
+ spaces are added so that each token appears in the result at the
+ position indicated by ``start`` and ``end``, if possible.
+ """
+ return Untokenizer().untokenize(tokens)
+
+
+def _get_normal_name(orig_enc):
+ """Imitates get_normal_name in tokenizer.c."""
+ # Only care about the first 12 characters.
+ enc = orig_enc[:12].lower().replace("_", "-")
+ if enc == "utf-8" or enc.startswith("utf-8-"):
+ return "utf-8"
+ if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
+ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
+ return "iso-8859-1"
+ return orig_enc
+
+def detect_encoding(readline):
+ """
+ The detect_encoding() function is used to detect the encoding that should
+ be used to decode a Python source file. It requires one argment, readline,
+ in the same way as the tokenize() generator.
+
+ It will call readline a maximum of twice, and return the encoding used
+ (as a string) and a list of any lines (left as bytes) it has read in.
+
+ It detects the encoding from the presence of a utf-8 bom or an encoding
+ cookie as specified in pep-0263. If both a bom and a cookie are present,
+ but disagree, a SyntaxError will be raised. If the encoding cookie is an
+ invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
+ 'utf-8-sig' is returned.
+
+ If no encoding is specified, then the default of 'utf-8' will be returned.
+ """
+ bom_found = False
+ encoding = None
+ default = 'utf-8'
+ def read_or_stop():
+ try:
+ return readline()
+ except StopIteration:
+ return b''
+
+ def find_cookie(line):
+ try:
+ # Decode as UTF-8. Either the line is an encoding declaration,
+ # in which case it should be pure ASCII, or it must be UTF-8
+ # per default encoding.
+ line_string = line.decode('utf-8')
+ except UnicodeDecodeError:
+ raise SyntaxError("invalid or missing encoding declaration")
+
+ matches = cookie_re.findall(line_string)
+ if not matches:
+ return None
+ encoding = _get_normal_name(matches[0])
+ try:
+ codec = lookup(encoding)
+ except LookupError:
+ # This behaviour mimics the Python interpreter
+ raise SyntaxError("unknown encoding: " + encoding)
+
+ if bom_found:
+ if encoding != 'utf-8':
+ # This behaviour mimics the Python interpreter
+ raise SyntaxError('encoding problem: utf-8')
+ encoding += '-sig'
+ return encoding
+
+ first = read_or_stop()
+ if first.startswith(BOM_UTF8):
+ bom_found = True
+ first = first[3:]
+ default = 'utf-8-sig'
+ if not first:
+ return default, []
+
+ encoding = find_cookie(first)
+ if encoding:
+ return encoding, [first]
+
+ second = read_or_stop()
+ if not second:
+ return default, [first]
+
+ encoding = find_cookie(second)
+ if encoding:
+ return encoding, [first, second]
+
+ return default, [first, second]
+
+
+def open(filename):
+ """Open a file in read only mode using the encoding detected by
+ detect_encoding().
+ """
+ buffer = builtins.open(filename, 'rb')
+ encoding, lines = detect_encoding(buffer.readline)
+ buffer.seek(0)
+ text = TextIOWrapper(buffer, encoding, line_buffering=True)
+ text.mode = 'r'
+ return text
+
+
+def tokenize(readline):
+ """
+ The tokenize() generator requires one argment, readline, which
+ must be a callable object which provides the same interface as the
+ readline() method of built-in file objects. Each call to the function
+ should return one line of input as bytes. Alternately, readline
+ can be a callable function terminating with StopIteration:
+ readline = open(myfile, 'rb').__next__ # Example of alternate readline
+
+ The generator produces 5-tuples with these members: the token type; the
+ token string; a 2-tuple (srow, scol) of ints specifying the row and
+ column where the token begins in the source; a 2-tuple (erow, ecol) of
+ ints specifying the row and column where the token ends in the source;
+ and the line on which the token was found. The line passed is the
+ logical line; continuation lines are included.
+
+ The first token sequence will always be an ENCODING token
+ which tells you which encoding was used to decode the bytes stream.
+ """
+ # This import is here to avoid problems when the itertools module is not
+ # built yet and tokenize is imported.
+ from itertools import chain, repeat
+ encoding, consumed = detect_encoding(readline)
+ rl_gen = iter(readline, b"")
+ empty = repeat(b"")
+ return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
+
+
+def _tokenize(readline, encoding):
+ lnum = parenlev = continued = 0
+ numchars = '0123456789'
+ contstr, needcont = '', 0
+ contline = None
+ indents = [0]
+
+ if encoding is not None:
+ if encoding == "utf-8-sig":
+ # BOM will already have been stripped.
+ encoding = "utf-8"
+ yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
+ while True: # loop over lines in stream
+ try:
+ line = readline()
+ except StopIteration:
+ line = b''
+
+ if encoding is not None:
+ line = line.decode(encoding)
+ lnum += 1
+ pos, max = 0, len(line)
+
+ if contstr: # continued string
+ if not line:
+ raise TokenError("EOF in multi-line string", strstart)
+ endmatch = endprog.match(line)
+ if endmatch:
+ pos = end = endmatch.end(0)
+ yield TokenInfo(STRING, contstr + line[:end],
+ strstart, (lnum, end), contline + line)
+ contstr, needcont = '', 0
+ contline = None
+ elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+ yield TokenInfo(ERRORTOKEN, contstr + line,
+ strstart, (lnum, len(line)), contline)
+ contstr = ''
+ contline = None
+ continue
+ else:
+ contstr = contstr + line
+ contline = contline + line
+ continue
+
+ elif parenlev == 0 and not continued: # new statement
+ if not line: break
+ column = 0
+ while pos < max: # measure leading whitespace
+ if line[pos] == ' ':
+ column += 1
+ elif line[pos] == '\t':
+ column = (column//tabsize + 1)*tabsize
+ elif line[pos] == '\f':
+ column = 0
+ else:
+ break
+ pos += 1
+ if pos == max:
+ break
+
+ if line[pos] in '#\r\n': # skip comments or blank lines
+ if line[pos] == '#':
+ comment_token = line[pos:].rstrip('\r\n')
+ nl_pos = pos + len(comment_token)
+ yield TokenInfo(COMMENT, comment_token,
+ (lnum, pos), (lnum, pos + len(comment_token)), line)
+ yield TokenInfo(NEWLINE, line[nl_pos:],
+ (lnum, nl_pos), (lnum, len(line)), line)
+ else:
+ yield TokenInfo(NEWLINE, line[pos:],
+ (lnum, pos), (lnum, len(line)), line)
+ continue
+
+ if column > indents[-1]: # count indents or dedents
+ indents.append(column)
+ yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+ while column < indents[-1]:
+ if column not in indents:
+ raise IndentationError(
+ "unindent does not match any outer indentation level",
+ ("<tokenize>", lnum, pos, line))
+ indents = indents[:-1]
+ yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
+
+ else: # continued statement
+ if not line:
+ raise TokenError("EOF in multi-line statement", (lnum, 0))
+ continued = 0
+
+ while pos < max:
+ pseudomatch = pseudoprog.match(line, pos)
+ if pseudomatch: # scan for tokens
+ start, end = pseudomatch.span(1)
+ spos, epos, pos = (lnum, start), (lnum, end), end
+ token, initial = line[start:end], line[start]
+
+ if (initial in numchars or # ordinary number
+ (initial == '.' and token != '.' and token != '...')):
+ yield TokenInfo(NUMBER, token, spos, epos, line)
+ elif initial in '\r\n':
+ yield TokenInfo(NL if parenlev > 0 else NEWLINE,
+ token, spos, epos, line)
+ elif initial == '#':
+ assert not token.endswith("\n")
+ yield TokenInfo(COMMENT, token, spos, epos, line)
+ elif token in triple_quoted:
+ endprog = endprogs[token]
+ endmatch = endprog.match(line, pos)
+ if endmatch: # all on one line
+ pos = endmatch.end(0)
+ token = line[start:pos]
+ yield TokenInfo(STRING, token, spos, (lnum, pos), line)
+ else:
+ strstart = (lnum, start) # multiple lines
+ contstr = line[start:]
+ contline = line
+ break
+ elif initial in single_quoted or \
+ token[:2] in single_quoted or \
+ token[:3] in single_quoted:
+ if token[-1] == '\n': # continued string
+ strstart = (lnum, start)
+ endprog = (endprogs[initial] or endprogs[token[1]] or
+ endprogs[token[2]])
+ contstr, needcont = line[start:], 1
+ contline = line
+ break
+ else: # ordinary string
+ yield TokenInfo(STRING, token, spos, epos, line)
+ elif initial.isidentifier(): # ordinary name
+ yield TokenInfo(NAME, token, spos, epos, line)
+ elif initial == '\\': # continued stmt
+ continued = 1
+ else:
+ if initial in '([{':
+ parenlev += 1
+ elif initial in ')]}':
+ parenlev -= 1
+ yield TokenInfo(OP, token, spos, epos, line)
+ else:
+ yield TokenInfo(ERRORTOKEN, line[pos],
+ (lnum, pos), (lnum, pos+1), line)
+ pos += 1
+
+ for indent in indents[1:]: # pop remaining indent levels
+ yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
+ yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+
+
+# An undocumented, backwards compatible, API for all the places in the standard
+# library that expect to be able to use tokenize with strings
+def generate_tokens(readline):
+ return _tokenize(readline, None)
+
+if __name__ == "__main__":
+ # Quick sanity check
+ s = b'''def parseline(self, line):
+ """Parse the line into a command name and a string containing
+ the arguments. Returns a tuple containing (command, args, line).
+ 'command' and 'args' may be None if the line couldn't be parsed.
+ """
+ line = line.strip()
+ if not line:
+ return None, None, line
+ elif line[0] == '?':
+ line = 'help ' + line[1:]
+ elif line[0] == '!':
+ if hasattr(self, 'do_shell'):
+ line = 'shell ' + line[1:]
+ else:
+ return None, None, line
+ i, n = 0, len(line)
+ while i < n and line[i] in self.identchars: i = i+1
+ cmd, arg = line[:i], line[i:].strip()
+ return cmd, arg, line
+ '''
+ for tok in tokenize(iter(s.splitlines()).__next__):
+ print(tok)
diff --git a/contrib/python/ipython/py2/IPython/utils/capture.py b/contrib/python/ipython/py2/IPython/utils/capture.py
index bb241b0fad..d8f919568c 100644
--- a/contrib/python/ipython/py2/IPython/utils/capture.py
+++ b/contrib/python/ipython/py2/IPython/utils/capture.py
@@ -1,176 +1,176 @@
-# encoding: utf-8
-"""IO capturing utilities."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function, absolute_import
-
-import sys
-
-from IPython.utils.py3compat import PY3
-
-if PY3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-
-
-class RichOutput(object):
+# encoding: utf-8
+"""IO capturing utilities."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function, absolute_import
+
+import sys
+
+from IPython.utils.py3compat import PY3
+
+if PY3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+
+
+class RichOutput(object):
def __init__(self, data=None, metadata=None, transient=None, update=False):
- self.data = data or {}
- self.metadata = metadata or {}
+ self.data = data or {}
+ self.metadata = metadata or {}
self.transient = transient or {}
self.update = update
- def display(self):
- from IPython.display import publish_display_data
+ def display(self):
+ from IPython.display import publish_display_data
publish_display_data(data=self.data, metadata=self.metadata,
transient=self.transient, update=self.update)
- def _repr_mime_(self, mime):
- if mime not in self.data:
- return
- data = self.data[mime]
- if mime in self.metadata:
- return data, self.metadata[mime]
- else:
- return data
-
- def _repr_html_(self):
- return self._repr_mime_("text/html")
-
- def _repr_latex_(self):
- return self._repr_mime_("text/latex")
-
- def _repr_json_(self):
- return self._repr_mime_("application/json")
-
- def _repr_javascript_(self):
- return self._repr_mime_("application/javascript")
-
- def _repr_png_(self):
- return self._repr_mime_("image/png")
-
- def _repr_jpeg_(self):
- return self._repr_mime_("image/jpeg")
-
- def _repr_svg_(self):
- return self._repr_mime_("image/svg+xml")
-
-
-class CapturedIO(object):
- """Simple object for containing captured stdout/err and rich display StringIO objects
-
- Each instance `c` has three attributes:
-
- - ``c.stdout`` : standard output as a string
- - ``c.stderr`` : standard error as a string
- - ``c.outputs``: a list of rich display outputs
-
- Additionally, there's a ``c.show()`` method which will print all of the
- above in the same order, and can be invoked simply via ``c()``.
- """
-
- def __init__(self, stdout, stderr, outputs=None):
- self._stdout = stdout
- self._stderr = stderr
- if outputs is None:
- outputs = []
- self._outputs = outputs
-
- def __str__(self):
- return self.stdout
-
- @property
- def stdout(self):
- "Captured standard output"
- if not self._stdout:
- return ''
- return self._stdout.getvalue()
-
- @property
- def stderr(self):
- "Captured standard error"
- if not self._stderr:
- return ''
- return self._stderr.getvalue()
-
- @property
- def outputs(self):
- """A list of the captured rich display outputs, if any.
-
- If you have a CapturedIO object ``c``, these can be displayed in IPython
- using::
-
- from IPython.display import display
- for o in c.outputs:
- display(o)
- """
+ def _repr_mime_(self, mime):
+ if mime not in self.data:
+ return
+ data = self.data[mime]
+ if mime in self.metadata:
+ return data, self.metadata[mime]
+ else:
+ return data
+
+ def _repr_html_(self):
+ return self._repr_mime_("text/html")
+
+ def _repr_latex_(self):
+ return self._repr_mime_("text/latex")
+
+ def _repr_json_(self):
+ return self._repr_mime_("application/json")
+
+ def _repr_javascript_(self):
+ return self._repr_mime_("application/javascript")
+
+ def _repr_png_(self):
+ return self._repr_mime_("image/png")
+
+ def _repr_jpeg_(self):
+ return self._repr_mime_("image/jpeg")
+
+ def _repr_svg_(self):
+ return self._repr_mime_("image/svg+xml")
+
+
+class CapturedIO(object):
+ """Simple object for containing captured stdout/err and rich display StringIO objects
+
+ Each instance `c` has three attributes:
+
+ - ``c.stdout`` : standard output as a string
+ - ``c.stderr`` : standard error as a string
+ - ``c.outputs``: a list of rich display outputs
+
+ Additionally, there's a ``c.show()`` method which will print all of the
+ above in the same order, and can be invoked simply via ``c()``.
+ """
+
+ def __init__(self, stdout, stderr, outputs=None):
+ self._stdout = stdout
+ self._stderr = stderr
+ if outputs is None:
+ outputs = []
+ self._outputs = outputs
+
+ def __str__(self):
+ return self.stdout
+
+ @property
+ def stdout(self):
+ "Captured standard output"
+ if not self._stdout:
+ return ''
+ return self._stdout.getvalue()
+
+ @property
+ def stderr(self):
+ "Captured standard error"
+ if not self._stderr:
+ return ''
+ return self._stderr.getvalue()
+
+ @property
+ def outputs(self):
+ """A list of the captured rich display outputs, if any.
+
+ If you have a CapturedIO object ``c``, these can be displayed in IPython
+ using::
+
+ from IPython.display import display
+ for o in c.outputs:
+ display(o)
+ """
return [ RichOutput(**kargs) for kargs in self._outputs ]
- def show(self):
- """write my output to sys.stdout/err as appropriate"""
- sys.stdout.write(self.stdout)
- sys.stderr.write(self.stderr)
- sys.stdout.flush()
- sys.stderr.flush()
+ def show(self):
+ """write my output to sys.stdout/err as appropriate"""
+ sys.stdout.write(self.stdout)
+ sys.stderr.write(self.stderr)
+ sys.stdout.flush()
+ sys.stderr.flush()
for kargs in self._outputs:
RichOutput(**kargs).display()
- __call__ = show
-
-
-class capture_output(object):
- """context manager for capturing stdout/err"""
- stdout = True
- stderr = True
- display = True
-
- def __init__(self, stdout=True, stderr=True, display=True):
- self.stdout = stdout
- self.stderr = stderr
- self.display = display
- self.shell = None
-
- def __enter__(self):
- from IPython.core.getipython import get_ipython
- from IPython.core.displaypub import CapturingDisplayPublisher
+ __call__ = show
+
+
+class capture_output(object):
+ """context manager for capturing stdout/err"""
+ stdout = True
+ stderr = True
+ display = True
+
+ def __init__(self, stdout=True, stderr=True, display=True):
+ self.stdout = stdout
+ self.stderr = stderr
+ self.display = display
+ self.shell = None
+
+ def __enter__(self):
+ from IPython.core.getipython import get_ipython
+ from IPython.core.displaypub import CapturingDisplayPublisher
from IPython.core.displayhook import CapturingDisplayHook
- self.sys_stdout = sys.stdout
- self.sys_stderr = sys.stderr
-
- if self.display:
- self.shell = get_ipython()
- if self.shell is None:
- self.save_display_pub = None
- self.display = False
-
- stdout = stderr = outputs = None
- if self.stdout:
- stdout = sys.stdout = StringIO()
- if self.stderr:
- stderr = sys.stderr = StringIO()
- if self.display:
- self.save_display_pub = self.shell.display_pub
- self.shell.display_pub = CapturingDisplayPublisher()
- outputs = self.shell.display_pub.outputs
+ self.sys_stdout = sys.stdout
+ self.sys_stderr = sys.stderr
+
+ if self.display:
+ self.shell = get_ipython()
+ if self.shell is None:
+ self.save_display_pub = None
+ self.display = False
+
+ stdout = stderr = outputs = None
+ if self.stdout:
+ stdout = sys.stdout = StringIO()
+ if self.stderr:
+ stderr = sys.stderr = StringIO()
+ if self.display:
+ self.save_display_pub = self.shell.display_pub
+ self.shell.display_pub = CapturingDisplayPublisher()
+ outputs = self.shell.display_pub.outputs
self.save_display_hook = sys.displayhook
sys.displayhook = CapturingDisplayHook(shell=self.shell,
outputs=outputs)
- return CapturedIO(stdout, stderr, outputs)
+ return CapturedIO(stdout, stderr, outputs)
- def __exit__(self, exc_type, exc_value, traceback):
- sys.stdout = self.sys_stdout
- sys.stderr = self.sys_stderr
- if self.display and self.shell:
- self.shell.display_pub = self.save_display_pub
+ def __exit__(self, exc_type, exc_value, traceback):
+ sys.stdout = self.sys_stdout
+ sys.stderr = self.sys_stderr
+ if self.display and self.shell:
+ self.shell.display_pub = self.save_display_pub
sys.displayhook = self.save_display_hook
-
-
+
+
diff --git a/contrib/python/ipython/py2/IPython/utils/coloransi.py b/contrib/python/ipython/py2/IPython/utils/coloransi.py
index 597c69fe11..bc8e8377f7 100644
--- a/contrib/python/ipython/py2/IPython/utils/coloransi.py
+++ b/contrib/python/ipython/py2/IPython/utils/coloransi.py
@@ -1,187 +1,187 @@
-# -*- coding: utf-8 -*-
-"""Tools for coloring text in ANSI terminals.
-"""
-
-#*****************************************************************************
-# Copyright (C) 2002-2006 Fernando Perez. <fperez@colorado.edu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#*****************************************************************************
-
-__all__ = ['TermColors','InputTermColors','ColorScheme','ColorSchemeTable']
-
-import os
-
-from IPython.utils.ipstruct import Struct
-
-color_templates = (
- # Dark colors
- ("Black" , "0;30"),
- ("Red" , "0;31"),
- ("Green" , "0;32"),
- ("Brown" , "0;33"),
- ("Blue" , "0;34"),
- ("Purple" , "0;35"),
- ("Cyan" , "0;36"),
- ("LightGray" , "0;37"),
- # Light colors
- ("DarkGray" , "1;30"),
- ("LightRed" , "1;31"),
- ("LightGreen" , "1;32"),
- ("Yellow" , "1;33"),
- ("LightBlue" , "1;34"),
- ("LightPurple" , "1;35"),
- ("LightCyan" , "1;36"),
- ("White" , "1;37"),
- # Blinking colors. Probably should not be used in anything serious.
- ("BlinkBlack" , "5;30"),
- ("BlinkRed" , "5;31"),
- ("BlinkGreen" , "5;32"),
- ("BlinkYellow" , "5;33"),
- ("BlinkBlue" , "5;34"),
- ("BlinkPurple" , "5;35"),
- ("BlinkCyan" , "5;36"),
- ("BlinkLightGray", "5;37"),
- )
-
-def make_color_table(in_class):
- """Build a set of color attributes in a class.
-
- Helper function for building the :class:`TermColors` and
- :class`InputTermColors`.
- """
- for name,value in color_templates:
- setattr(in_class,name,in_class._base % value)
-
-class TermColors:
- """Color escape sequences.
-
- This class defines the escape sequences for all the standard (ANSI?)
- colors in terminals. Also defines a NoColor escape which is just the null
- string, suitable for defining 'dummy' color schemes in terminals which get
- confused by color escapes.
-
- This class should be used as a mixin for building color schemes."""
-
- NoColor = '' # for color schemes in color-less terminals.
- Normal = '\033[0m' # Reset normal coloring
- _base = '\033[%sm' # Template for all other colors
-
-# Build the actual color table as a set of class attributes:
-make_color_table(TermColors)
-
-class InputTermColors:
- """Color escape sequences for input prompts.
-
- This class is similar to TermColors, but the escapes are wrapped in \001
- and \002 so that readline can properly know the length of each line and
- can wrap lines accordingly. Use this class for any colored text which
- needs to be used in input prompts, such as in calls to raw_input().
-
- This class defines the escape sequences for all the standard (ANSI?)
- colors in terminals. Also defines a NoColor escape which is just the null
- string, suitable for defining 'dummy' color schemes in terminals which get
- confused by color escapes.
-
- This class should be used as a mixin for building color schemes."""
-
- NoColor = '' # for color schemes in color-less terminals.
-
- if os.name == 'nt' and os.environ.get('TERM','dumb') == 'emacs':
- # (X)emacs on W32 gets confused with \001 and \002 so we remove them
- Normal = '\033[0m' # Reset normal coloring
- _base = '\033[%sm' # Template for all other colors
- else:
- Normal = '\001\033[0m\002' # Reset normal coloring
- _base = '\001\033[%sm\002' # Template for all other colors
-
-# Build the actual color table as a set of class attributes:
-make_color_table(InputTermColors)
-
-class NoColors:
- """This defines all the same names as the colour classes, but maps them to
- empty strings, so it can easily be substituted to turn off colours."""
- NoColor = ''
- Normal = ''
-
-for name, value in color_templates:
- setattr(NoColors, name, '')
-
-class ColorScheme:
- """Generic color scheme class. Just a name and a Struct."""
- def __init__(self,__scheme_name_,colordict=None,**colormap):
- self.name = __scheme_name_
- if colordict is None:
- self.colors = Struct(**colormap)
- else:
- self.colors = Struct(colordict)
-
- def copy(self,name=None):
- """Return a full copy of the object, optionally renaming it."""
- if name is None:
- name = self.name
- return ColorScheme(name, self.colors.dict())
-
-class ColorSchemeTable(dict):
- """General class to handle tables of color schemes.
-
- It's basically a dict of color schemes with a couple of shorthand
- attributes and some convenient methods.
-
- active_scheme_name -> obvious
- active_colors -> actual color table of the active scheme"""
-
- def __init__(self, scheme_list=None, default_scheme=''):
- """Create a table of color schemes.
-
- The table can be created empty and manually filled or it can be
- created with a list of valid color schemes AND the specification for
- the default active scheme.
- """
-
- # create object attributes to be set later
- self.active_scheme_name = ''
- self.active_colors = None
-
- if scheme_list:
- if default_scheme == '':
- raise ValueError('you must specify the default color scheme')
- for scheme in scheme_list:
- self.add_scheme(scheme)
- self.set_active_scheme(default_scheme)
-
- def copy(self):
- """Return full copy of object"""
- return ColorSchemeTable(self.values(),self.active_scheme_name)
-
- def add_scheme(self,new_scheme):
- """Add a new color scheme to the table."""
- if not isinstance(new_scheme,ColorScheme):
- raise ValueError('ColorSchemeTable only accepts ColorScheme instances')
- self[new_scheme.name] = new_scheme
-
- def set_active_scheme(self,scheme,case_sensitive=0):
- """Set the currently active scheme.
-
- Names are by default compared in a case-insensitive way, but this can
- be changed by setting the parameter case_sensitive to true."""
-
- scheme_names = list(self.keys())
- if case_sensitive:
- valid_schemes = scheme_names
- scheme_test = scheme
- else:
- valid_schemes = [s.lower() for s in scheme_names]
- scheme_test = scheme.lower()
- try:
- scheme_idx = valid_schemes.index(scheme_test)
- except ValueError:
- raise ValueError('Unrecognized color scheme: ' + scheme + \
- '\nValid schemes: '+str(scheme_names).replace("'', ",''))
- else:
- active = scheme_names[scheme_idx]
- self.active_scheme_name = active
- self.active_colors = self[active].colors
- # Now allow using '' as an index for the current active scheme
- self[''] = self[active]
+# -*- coding: utf-8 -*-
+"""Tools for coloring text in ANSI terminals.
+"""
+
+#*****************************************************************************
+# Copyright (C) 2002-2006 Fernando Perez. <fperez@colorado.edu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+__all__ = ['TermColors','InputTermColors','ColorScheme','ColorSchemeTable']
+
+import os
+
+from IPython.utils.ipstruct import Struct
+
+color_templates = (
+ # Dark colors
+ ("Black" , "0;30"),
+ ("Red" , "0;31"),
+ ("Green" , "0;32"),
+ ("Brown" , "0;33"),
+ ("Blue" , "0;34"),
+ ("Purple" , "0;35"),
+ ("Cyan" , "0;36"),
+ ("LightGray" , "0;37"),
+ # Light colors
+ ("DarkGray" , "1;30"),
+ ("LightRed" , "1;31"),
+ ("LightGreen" , "1;32"),
+ ("Yellow" , "1;33"),
+ ("LightBlue" , "1;34"),
+ ("LightPurple" , "1;35"),
+ ("LightCyan" , "1;36"),
+ ("White" , "1;37"),
+ # Blinking colors. Probably should not be used in anything serious.
+ ("BlinkBlack" , "5;30"),
+ ("BlinkRed" , "5;31"),
+ ("BlinkGreen" , "5;32"),
+ ("BlinkYellow" , "5;33"),
+ ("BlinkBlue" , "5;34"),
+ ("BlinkPurple" , "5;35"),
+ ("BlinkCyan" , "5;36"),
+ ("BlinkLightGray", "5;37"),
+ )
+
+def make_color_table(in_class):
+ """Build a set of color attributes in a class.
+
+ Helper function for building the :class:`TermColors` and
+ :class`InputTermColors`.
+ """
+ for name,value in color_templates:
+ setattr(in_class,name,in_class._base % value)
+
+class TermColors:
+ """Color escape sequences.
+
+ This class defines the escape sequences for all the standard (ANSI?)
+ colors in terminals. Also defines a NoColor escape which is just the null
+ string, suitable for defining 'dummy' color schemes in terminals which get
+ confused by color escapes.
+
+ This class should be used as a mixin for building color schemes."""
+
+ NoColor = '' # for color schemes in color-less terminals.
+ Normal = '\033[0m' # Reset normal coloring
+ _base = '\033[%sm' # Template for all other colors
+
+# Build the actual color table as a set of class attributes:
+make_color_table(TermColors)
+
+class InputTermColors:
+ """Color escape sequences for input prompts.
+
+ This class is similar to TermColors, but the escapes are wrapped in \001
+ and \002 so that readline can properly know the length of each line and
+ can wrap lines accordingly. Use this class for any colored text which
+ needs to be used in input prompts, such as in calls to raw_input().
+
+ This class defines the escape sequences for all the standard (ANSI?)
+ colors in terminals. Also defines a NoColor escape which is just the null
+ string, suitable for defining 'dummy' color schemes in terminals which get
+ confused by color escapes.
+
+ This class should be used as a mixin for building color schemes."""
+
+ NoColor = '' # for color schemes in color-less terminals.
+
+ if os.name == 'nt' and os.environ.get('TERM','dumb') == 'emacs':
+ # (X)emacs on W32 gets confused with \001 and \002 so we remove them
+ Normal = '\033[0m' # Reset normal coloring
+ _base = '\033[%sm' # Template for all other colors
+ else:
+ Normal = '\001\033[0m\002' # Reset normal coloring
+ _base = '\001\033[%sm\002' # Template for all other colors
+
+# Build the actual color table as a set of class attributes:
+make_color_table(InputTermColors)
+
+class NoColors:
+ """This defines all the same names as the colour classes, but maps them to
+ empty strings, so it can easily be substituted to turn off colours."""
+ NoColor = ''
+ Normal = ''
+
+for name, value in color_templates:
+ setattr(NoColors, name, '')
+
+class ColorScheme:
+ """Generic color scheme class. Just a name and a Struct."""
+ def __init__(self,__scheme_name_,colordict=None,**colormap):
+ self.name = __scheme_name_
+ if colordict is None:
+ self.colors = Struct(**colormap)
+ else:
+ self.colors = Struct(colordict)
+
+ def copy(self,name=None):
+ """Return a full copy of the object, optionally renaming it."""
+ if name is None:
+ name = self.name
+ return ColorScheme(name, self.colors.dict())
+
+class ColorSchemeTable(dict):
+ """General class to handle tables of color schemes.
+
+ It's basically a dict of color schemes with a couple of shorthand
+ attributes and some convenient methods.
+
+ active_scheme_name -> obvious
+ active_colors -> actual color table of the active scheme"""
+
+ def __init__(self, scheme_list=None, default_scheme=''):
+ """Create a table of color schemes.
+
+ The table can be created empty and manually filled or it can be
+ created with a list of valid color schemes AND the specification for
+ the default active scheme.
+ """
+
+ # create object attributes to be set later
+ self.active_scheme_name = ''
+ self.active_colors = None
+
+ if scheme_list:
+ if default_scheme == '':
+ raise ValueError('you must specify the default color scheme')
+ for scheme in scheme_list:
+ self.add_scheme(scheme)
+ self.set_active_scheme(default_scheme)
+
+ def copy(self):
+ """Return full copy of object"""
+ return ColorSchemeTable(self.values(),self.active_scheme_name)
+
+ def add_scheme(self,new_scheme):
+ """Add a new color scheme to the table."""
+ if not isinstance(new_scheme,ColorScheme):
+ raise ValueError('ColorSchemeTable only accepts ColorScheme instances')
+ self[new_scheme.name] = new_scheme
+
+ def set_active_scheme(self,scheme,case_sensitive=0):
+ """Set the currently active scheme.
+
+ Names are by default compared in a case-insensitive way, but this can
+ be changed by setting the parameter case_sensitive to true."""
+
+ scheme_names = list(self.keys())
+ if case_sensitive:
+ valid_schemes = scheme_names
+ scheme_test = scheme
+ else:
+ valid_schemes = [s.lower() for s in scheme_names]
+ scheme_test = scheme.lower()
+ try:
+ scheme_idx = valid_schemes.index(scheme_test)
+ except ValueError:
+ raise ValueError('Unrecognized color scheme: ' + scheme + \
+ '\nValid schemes: '+str(scheme_names).replace("'', ",''))
+ else:
+ active = scheme_names[scheme_idx]
+ self.active_scheme_name = active
+ self.active_colors = self[active].colors
+ # Now allow using '' as an index for the current active scheme
+ self[''] = self[active]
diff --git a/contrib/python/ipython/py2/IPython/utils/contexts.py b/contrib/python/ipython/py2/IPython/utils/contexts.py
index 358dfe8b29..4d379b0eda 100644
--- a/contrib/python/ipython/py2/IPython/utils/contexts.py
+++ b/contrib/python/ipython/py2/IPython/utils/contexts.py
@@ -1,66 +1,66 @@
-# encoding: utf-8
-"""Miscellaneous context managers.
-"""
-
+# encoding: utf-8
+"""Miscellaneous context managers.
+"""
+
import warnings
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-class preserve_keys(object):
- """Preserve a set of keys in a dictionary.
-
- Upon entering the context manager the current values of the keys
- will be saved. Upon exiting, the dictionary will be updated to
- restore the original value of the preserved keys. Preserved keys
- which did not exist when entering the context manager will be
- deleted.
-
- Examples
- --------
-
- >>> d = {'a': 1, 'b': 2, 'c': 3}
- >>> with preserve_keys(d, 'b', 'c', 'd'):
- ... del d['a']
- ... del d['b'] # will be reset to 2
- ... d['c'] = None # will be reset to 3
- ... d['d'] = 4 # will be deleted
- ... d['e'] = 5
- ... print(sorted(d.items()))
- ...
- [('c', None), ('d', 4), ('e', 5)]
- >>> print(sorted(d.items()))
- [('b', 2), ('c', 3), ('e', 5)]
- """
-
- def __init__(self, dictionary, *keys):
- self.dictionary = dictionary
- self.keys = keys
-
- def __enter__(self):
- # Actions to perform upon exiting.
- to_delete = []
- to_update = {}
-
- d = self.dictionary
- for k in self.keys:
- if k in d:
- to_update[k] = d[k]
- else:
- to_delete.append(k)
-
- self.to_delete = to_delete
- self.to_update = to_update
-
- def __exit__(self, *exc_info):
- d = self.dictionary
-
- for k in self.to_delete:
- d.pop(k, None)
- d.update(self.to_update)
-
-
-class NoOpContext(object):
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+class preserve_keys(object):
+ """Preserve a set of keys in a dictionary.
+
+ Upon entering the context manager the current values of the keys
+ will be saved. Upon exiting, the dictionary will be updated to
+ restore the original value of the preserved keys. Preserved keys
+ which did not exist when entering the context manager will be
+ deleted.
+
+ Examples
+ --------
+
+ >>> d = {'a': 1, 'b': 2, 'c': 3}
+ >>> with preserve_keys(d, 'b', 'c', 'd'):
+ ... del d['a']
+ ... del d['b'] # will be reset to 2
+ ... d['c'] = None # will be reset to 3
+ ... d['d'] = 4 # will be deleted
+ ... d['e'] = 5
+ ... print(sorted(d.items()))
+ ...
+ [('c', None), ('d', 4), ('e', 5)]
+ >>> print(sorted(d.items()))
+ [('b', 2), ('c', 3), ('e', 5)]
+ """
+
+ def __init__(self, dictionary, *keys):
+ self.dictionary = dictionary
+ self.keys = keys
+
+ def __enter__(self):
+ # Actions to perform upon exiting.
+ to_delete = []
+ to_update = {}
+
+ d = self.dictionary
+ for k in self.keys:
+ if k in d:
+ to_update[k] = d[k]
+ else:
+ to_delete.append(k)
+
+ self.to_delete = to_delete
+ self.to_update = to_update
+
+ def __exit__(self, *exc_info):
+ d = self.dictionary
+
+ for k in self.to_delete:
+ d.pop(k, None)
+ d.update(self.to_update)
+
+
+class NoOpContext(object):
"""
Deprecated
@@ -70,5 +70,5 @@ class NoOpContext(object):
warnings.warn("""NoOpContext is deprecated since IPython 5.0 """,
DeprecationWarning, stacklevel=2)
- def __enter__(self): pass
- def __exit__(self, type, value, traceback): pass
+ def __enter__(self): pass
+ def __exit__(self, type, value, traceback): pass
diff --git a/contrib/python/ipython/py2/IPython/utils/daemonize.py b/contrib/python/ipython/py2/IPython/utils/daemonize.py
index f093cf67cb..a1bfaa193b 100644
--- a/contrib/python/ipython/py2/IPython/utils/daemonize.py
+++ b/contrib/python/ipython/py2/IPython/utils/daemonize.py
@@ -1,4 +1,4 @@
-from warnings import warn
-
-warn("IPython.utils.daemonize has moved to ipyparallel.apps.daemonize")
-from ipyparallel.apps.daemonize import daemonize
+from warnings import warn
+
+warn("IPython.utils.daemonize has moved to ipyparallel.apps.daemonize")
+from ipyparallel.apps.daemonize import daemonize
diff --git a/contrib/python/ipython/py2/IPython/utils/data.py b/contrib/python/ipython/py2/IPython/utils/data.py
index 36a8aabd95..308a692559 100644
--- a/contrib/python/ipython/py2/IPython/utils/data.py
+++ b/contrib/python/ipython/py2/IPython/utils/data.py
@@ -1,37 +1,37 @@
-# encoding: utf-8
-"""Utilities for working with data structures like lists, dicts and tuples.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-from .py3compat import xrange
-
-def uniq_stable(elems):
- """uniq_stable(elems) -> list
-
- Return from an iterable, a list of all the unique elements in the input,
- but maintaining the order in which they first appear.
-
- Note: All elements in the input must be hashable for this routine
- to work, as it internally uses a set for efficiency reasons.
- """
- seen = set()
- return [x for x in elems if x not in seen and not seen.add(x)]
-
-
-def flatten(seq):
- """Flatten a list of lists (NOT recursive, only works for 2d lists)."""
-
- return [x for subseq in seq for x in subseq]
-
-
-def chop(seq, size):
- """Chop a sequence into chunks of the given size."""
- return [seq[i:i+size] for i in xrange(0,len(seq),size)]
-
-
+# encoding: utf-8
+"""Utilities for working with data structures like lists, dicts and tuples.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+from .py3compat import xrange
+
+def uniq_stable(elems):
+ """uniq_stable(elems) -> list
+
+ Return from an iterable, a list of all the unique elements in the input,
+ but maintaining the order in which they first appear.
+
+ Note: All elements in the input must be hashable for this routine
+ to work, as it internally uses a set for efficiency reasons.
+ """
+ seen = set()
+ return [x for x in elems if x not in seen and not seen.add(x)]
+
+
+def flatten(seq):
+ """Flatten a list of lists (NOT recursive, only works for 2d lists)."""
+
+ return [x for subseq in seq for x in subseq]
+
+
+def chop(seq, size):
+ """Chop a sequence into chunks of the given size."""
+ return [seq[i:i+size] for i in xrange(0,len(seq),size)]
+
+
diff --git a/contrib/python/ipython/py2/IPython/utils/decorators.py b/contrib/python/ipython/py2/IPython/utils/decorators.py
index 79be8ca1e6..c26485553c 100644
--- a/contrib/python/ipython/py2/IPython/utils/decorators.py
+++ b/contrib/python/ipython/py2/IPython/utils/decorators.py
@@ -1,58 +1,58 @@
-# encoding: utf-8
-"""Decorators that don't go anywhere else.
-
-This module contains misc. decorators that don't really go with another module
-in :mod:`IPython.utils`. Beore putting something here please see if it should
-go into another topical module in :mod:`IPython.utils`.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-def flag_calls(func):
- """Wrap a function to detect and flag when it gets called.
-
- This is a decorator which takes a function and wraps it in a function with
- a 'called' attribute. wrapper.called is initialized to False.
-
- The wrapper.called attribute is set to False right before each call to the
- wrapped function, so if the call fails it remains False. After the call
- completes, wrapper.called is set to True and the output is returned.
-
- Testing for truth in wrapper.called allows you to determine if a call to
- func() was attempted and succeeded."""
-
- # don't wrap twice
- if hasattr(func, 'called'):
- return func
-
- def wrapper(*args,**kw):
- wrapper.called = False
- out = func(*args,**kw)
- wrapper.called = True
- return out
-
- wrapper.called = False
- wrapper.__doc__ = func.__doc__
- return wrapper
-
-def undoc(func):
- """Mark a function or class as undocumented.
-
- This is found by inspecting the AST, so for now it must be used directly
- as @undoc, not as e.g. @decorators.undoc
- """
- return func
-
+# encoding: utf-8
+"""Decorators that don't go anywhere else.
+
+This module contains misc. decorators that don't really go with another module
+in :mod:`IPython.utils`. Beore putting something here please see if it should
+go into another topical module in :mod:`IPython.utils`.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def flag_calls(func):
+ """Wrap a function to detect and flag when it gets called.
+
+ This is a decorator which takes a function and wraps it in a function with
+ a 'called' attribute. wrapper.called is initialized to False.
+
+ The wrapper.called attribute is set to False right before each call to the
+ wrapped function, so if the call fails it remains False. After the call
+ completes, wrapper.called is set to True and the output is returned.
+
+ Testing for truth in wrapper.called allows you to determine if a call to
+ func() was attempted and succeeded."""
+
+ # don't wrap twice
+ if hasattr(func, 'called'):
+ return func
+
+ def wrapper(*args,**kw):
+ wrapper.called = False
+ out = func(*args,**kw)
+ wrapper.called = True
+ return out
+
+ wrapper.called = False
+ wrapper.__doc__ = func.__doc__
+ return wrapper
+
+def undoc(func):
+ """Mark a function or class as undocumented.
+
+ This is found by inspecting the AST, so for now it must be used directly
+ as @undoc, not as e.g. @decorators.undoc
+ """
+ return func
+
diff --git a/contrib/python/ipython/py2/IPython/utils/dir2.py b/contrib/python/ipython/py2/IPython/utils/dir2.py
index fb0cd719ef..f6f164f9b1 100644
--- a/contrib/python/ipython/py2/IPython/utils/dir2.py
+++ b/contrib/python/ipython/py2/IPython/utils/dir2.py
@@ -1,51 +1,51 @@
-# encoding: utf-8
-"""A fancy version of Python's builtin :func:`dir` function.
-"""
-
+# encoding: utf-8
+"""A fancy version of Python's builtin :func:`dir` function.
+"""
+
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
-
+
import inspect
-from .py3compat import string_types
-
-
-def safe_hasattr(obj, attr):
- """In recent versions of Python, hasattr() only catches AttributeError.
- This catches all errors.
- """
- try:
- getattr(obj, attr)
- return True
- except:
- return False
-
-
-def dir2(obj):
- """dir2(obj) -> list of strings
-
- Extended version of the Python builtin dir(), which does a few extra
- checks.
-
- This version is guaranteed to return only a list of true strings, whereas
- dir() returns anything that objects inject into themselves, even if they
- are later not really valid for attribute access (many extension libraries
- have such bugs).
- """
-
- # Start building the attribute list via dir(), and then complete it
- # with a few extra special-purpose calls.
-
- try:
- words = set(dir(obj))
- except Exception:
- # TypeError: dir(obj) does not return a list
- words = set()
-
- # filter out non-string attributes which may be stuffed by dir() calls
- # and poor coding in third-party modules
-
- words = [w for w in words if isinstance(w, string_types)]
- return sorted(words)
+from .py3compat import string_types
+
+
+def safe_hasattr(obj, attr):
+ """In recent versions of Python, hasattr() only catches AttributeError.
+ This catches all errors.
+ """
+ try:
+ getattr(obj, attr)
+ return True
+ except:
+ return False
+
+
+def dir2(obj):
+ """dir2(obj) -> list of strings
+
+ Extended version of the Python builtin dir(), which does a few extra
+ checks.
+
+ This version is guaranteed to return only a list of true strings, whereas
+ dir() returns anything that objects inject into themselves, even if they
+ are later not really valid for attribute access (many extension libraries
+ have such bugs).
+ """
+
+ # Start building the attribute list via dir(), and then complete it
+ # with a few extra special-purpose calls.
+
+ try:
+ words = set(dir(obj))
+ except Exception:
+ # TypeError: dir(obj) does not return a list
+ words = set()
+
+ # filter out non-string attributes which may be stuffed by dir() calls
+ # and poor coding in third-party modules
+
+ words = [w for w in words if isinstance(w, string_types)]
+ return sorted(words)
def get_real_method(obj, name):
diff --git a/contrib/python/ipython/py2/IPython/utils/encoding.py b/contrib/python/ipython/py2/IPython/utils/encoding.py
index ba8ca09534..387a24700c 100644
--- a/contrib/python/ipython/py2/IPython/utils/encoding.py
+++ b/contrib/python/ipython/py2/IPython/utils/encoding.py
@@ -1,71 +1,71 @@
-# coding: utf-8
-"""
-Utilities for dealing with text encodings
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2012 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-import sys
-import locale
-import warnings
-
-# to deal with the possibility of sys.std* not being a stream at all
-def get_stream_enc(stream, default=None):
- """Return the given stream's encoding or a default.
-
- There are cases where ``sys.std*`` might not actually be a stream, so
- check for the encoding attribute prior to returning it, and return
- a default if it doesn't exist or evaluates as False. ``default``
- is None if not provided.
- """
- if not hasattr(stream, 'encoding') or not stream.encoding:
- return default
- else:
- return stream.encoding
-
-# Less conservative replacement for sys.getdefaultencoding, that will try
-# to match the environment.
-# Defined here as central function, so if we find better choices, we
-# won't need to make changes all over IPython.
-def getdefaultencoding(prefer_stream=True):
- """Return IPython's guess for the default encoding for bytes as text.
-
- If prefer_stream is True (default), asks for stdin.encoding first,
- to match the calling Terminal, but that is often None for subprocesses.
-
- Then fall back on locale.getpreferredencoding(),
- which should be a sensible platform default (that respects LANG environment),
- and finally to sys.getdefaultencoding() which is the most conservative option,
- and usually ASCII on Python 2 or UTF8 on Python 3.
- """
- enc = None
- if prefer_stream:
- enc = get_stream_enc(sys.stdin)
- if not enc or enc=='ascii':
- try:
- # There are reports of getpreferredencoding raising errors
- # in some cases, which may well be fixed, but let's be conservative here.
- enc = locale.getpreferredencoding()
- except Exception:
- pass
- enc = enc or sys.getdefaultencoding()
- # On windows `cp0` can be returned to indicate that there is no code page.
- # Since cp0 is an invalid encoding return instead cp1252 which is the
- # Western European default.
- if enc == 'cp0':
- warnings.warn(
- "Invalid code page cp0 detected - using cp1252 instead."
- "If cp1252 is incorrect please ensure a valid code page "
- "is defined for the process.", RuntimeWarning)
- return 'cp1252'
- return enc
-
-DEFAULT_ENCODING = getdefaultencoding()
+# coding: utf-8
+"""
+Utilities for dealing with text encodings
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2012 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+import sys
+import locale
+import warnings
+
+# to deal with the possibility of sys.std* not being a stream at all
+def get_stream_enc(stream, default=None):
+ """Return the given stream's encoding or a default.
+
+ There are cases where ``sys.std*`` might not actually be a stream, so
+ check for the encoding attribute prior to returning it, and return
+ a default if it doesn't exist or evaluates as False. ``default``
+ is None if not provided.
+ """
+ if not hasattr(stream, 'encoding') or not stream.encoding:
+ return default
+ else:
+ return stream.encoding
+
+# Less conservative replacement for sys.getdefaultencoding, that will try
+# to match the environment.
+# Defined here as central function, so if we find better choices, we
+# won't need to make changes all over IPython.
+def getdefaultencoding(prefer_stream=True):
+ """Return IPython's guess for the default encoding for bytes as text.
+
+ If prefer_stream is True (default), asks for stdin.encoding first,
+ to match the calling Terminal, but that is often None for subprocesses.
+
+ Then fall back on locale.getpreferredencoding(),
+ which should be a sensible platform default (that respects LANG environment),
+ and finally to sys.getdefaultencoding() which is the most conservative option,
+ and usually ASCII on Python 2 or UTF8 on Python 3.
+ """
+ enc = None
+ if prefer_stream:
+ enc = get_stream_enc(sys.stdin)
+ if not enc or enc=='ascii':
+ try:
+ # There are reports of getpreferredencoding raising errors
+ # in some cases, which may well be fixed, but let's be conservative here.
+ enc = locale.getpreferredencoding()
+ except Exception:
+ pass
+ enc = enc or sys.getdefaultencoding()
+ # On windows `cp0` can be returned to indicate that there is no code page.
+ # Since cp0 is an invalid encoding return instead cp1252 which is the
+ # Western European default.
+ if enc == 'cp0':
+ warnings.warn(
+ "Invalid code page cp0 detected - using cp1252 instead."
+ "If cp1252 is incorrect please ensure a valid code page "
+ "is defined for the process.", RuntimeWarning)
+ return 'cp1252'
+ return enc
+
+DEFAULT_ENCODING = getdefaultencoding()
diff --git a/contrib/python/ipython/py2/IPython/utils/eventful.py b/contrib/python/ipython/py2/IPython/utils/eventful.py
index e954a45e0a..fc0f7aee4f 100644
--- a/contrib/python/ipython/py2/IPython/utils/eventful.py
+++ b/contrib/python/ipython/py2/IPython/utils/eventful.py
@@ -1,7 +1,7 @@
-from __future__ import absolute_import
-
-from warnings import warn
-
-warn("IPython.utils.eventful has moved to traitlets.eventful")
-
-from traitlets.eventful import *
+from __future__ import absolute_import
+
+from warnings import warn
+
+warn("IPython.utils.eventful has moved to traitlets.eventful")
+
+from traitlets.eventful import *
diff --git a/contrib/python/ipython/py2/IPython/utils/frame.py b/contrib/python/ipython/py2/IPython/utils/frame.py
index ebf9e47bf9..76ccc71c44 100644
--- a/contrib/python/ipython/py2/IPython/utils/frame.py
+++ b/contrib/python/ipython/py2/IPython/utils/frame.py
@@ -1,98 +1,98 @@
-# encoding: utf-8
-"""
-Utilities for working with stack frames.
-"""
-from __future__ import print_function
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import sys
-from IPython.utils import py3compat
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-@py3compat.doctest_refactor_print
-def extract_vars(*names,**kw):
- """Extract a set of variables by name from another frame.
-
- Parameters
- ----------
- *names : str
- One or more variable names which will be extracted from the caller's
- frame.
-
- depth : integer, optional
- How many frames in the stack to walk when looking for your variables.
- The default is 0, which will use the frame where the call was made.
-
-
- Examples
- --------
- ::
-
- In [2]: def func(x):
- ...: y = 1
- ...: print(sorted(extract_vars('x','y').items()))
- ...:
-
- In [3]: func('hello')
- [('x', 'hello'), ('y', 1)]
- """
-
- depth = kw.get('depth',0)
-
- callerNS = sys._getframe(depth+1).f_locals
- return dict((k,callerNS[k]) for k in names)
-
-
-def extract_vars_above(*names):
- """Extract a set of variables by name from another frame.
-
- Similar to extractVars(), but with a specified depth of 1, so that names
- are exctracted exactly from above the caller.
-
- This is simply a convenience function so that the very common case (for us)
- of skipping exactly 1 frame doesn't have to construct a special dict for
- keyword passing."""
-
- callerNS = sys._getframe(2).f_locals
- return dict((k,callerNS[k]) for k in names)
-
-
-def debugx(expr,pre_msg=''):
- """Print the value of an expression from the caller's frame.
-
- Takes an expression, evaluates it in the caller's frame and prints both
- the given expression and the resulting value (as well as a debug mark
- indicating the name of the calling function. The input must be of a form
- suitable for eval().
-
- An optional message can be passed, which will be prepended to the printed
- expr->value pair."""
-
- cf = sys._getframe(1)
- print('[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr,
- eval(expr,cf.f_globals,cf.f_locals)))
-
-
-# deactivate it by uncommenting the following line, which makes it a no-op
-#def debugx(expr,pre_msg=''): pass
-
-def extract_module_locals(depth=0):
- """Returns (module, locals) of the function `depth` frames away from the caller"""
- f = sys._getframe(depth + 1)
- global_ns = f.f_globals
- module = sys.modules[global_ns['__name__']]
- return (module, f.f_locals)
-
+# encoding: utf-8
+"""
+Utilities for working with stack frames.
+"""
+from __future__ import print_function
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+from IPython.utils import py3compat
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+@py3compat.doctest_refactor_print
+def extract_vars(*names,**kw):
+ """Extract a set of variables by name from another frame.
+
+ Parameters
+ ----------
+ *names : str
+ One or more variable names which will be extracted from the caller's
+ frame.
+
+ depth : integer, optional
+ How many frames in the stack to walk when looking for your variables.
+ The default is 0, which will use the frame where the call was made.
+
+
+ Examples
+ --------
+ ::
+
+ In [2]: def func(x):
+ ...: y = 1
+ ...: print(sorted(extract_vars('x','y').items()))
+ ...:
+
+ In [3]: func('hello')
+ [('x', 'hello'), ('y', 1)]
+ """
+
+ depth = kw.get('depth',0)
+
+ callerNS = sys._getframe(depth+1).f_locals
+ return dict((k,callerNS[k]) for k in names)
+
+
+def extract_vars_above(*names):
+ """Extract a set of variables by name from another frame.
+
+ Similar to extractVars(), but with a specified depth of 1, so that names
+ are exctracted exactly from above the caller.
+
+ This is simply a convenience function so that the very common case (for us)
+ of skipping exactly 1 frame doesn't have to construct a special dict for
+ keyword passing."""
+
+ callerNS = sys._getframe(2).f_locals
+ return dict((k,callerNS[k]) for k in names)
+
+
+def debugx(expr,pre_msg=''):
+ """Print the value of an expression from the caller's frame.
+
+ Takes an expression, evaluates it in the caller's frame and prints both
+ the given expression and the resulting value (as well as a debug mark
+ indicating the name of the calling function. The input must be of a form
+ suitable for eval().
+
+ An optional message can be passed, which will be prepended to the printed
+ expr->value pair."""
+
+ cf = sys._getframe(1)
+ print('[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr,
+ eval(expr,cf.f_globals,cf.f_locals)))
+
+
+# deactivate it by uncommenting the following line, which makes it a no-op
+#def debugx(expr,pre_msg=''): pass
+
+def extract_module_locals(depth=0):
+ """Returns (module, locals) of the function `depth` frames away from the caller"""
+ f = sys._getframe(depth + 1)
+ global_ns = f.f_globals
+ module = sys.modules[global_ns['__name__']]
+ return (module, f.f_locals)
+
diff --git a/contrib/python/ipython/py2/IPython/utils/generics.py b/contrib/python/ipython/py2/IPython/utils/generics.py
index ff856a7e55..5ffdc86ebd 100644
--- a/contrib/python/ipython/py2/IPython/utils/generics.py
+++ b/contrib/python/ipython/py2/IPython/utils/generics.py
@@ -1,34 +1,34 @@
-# encoding: utf-8
-"""Generic functions for extending IPython.
-
-See http://pypi.python.org/pypi/simplegeneric.
-"""
-
-from IPython.core.error import TryNext
-from simplegeneric import generic
-
-
-@generic
-def inspect_object(obj):
- """Called when you do obj?"""
- raise TryNext
-
-
-@generic
-def complete_object(obj, prev_completions):
- """Custom completer dispatching for python objects.
-
- Parameters
- ----------
- obj : object
- The object to complete.
- prev_completions : list
- List of attributes discovered so far.
-
- This should return the list of attributes in obj. If you only wish to
- add to the attributes already discovered normally, return
- own_attrs + prev_completions.
- """
- raise TryNext
-
-
+# encoding: utf-8
+"""Generic functions for extending IPython.
+
+See http://pypi.python.org/pypi/simplegeneric.
+"""
+
+from IPython.core.error import TryNext
+from simplegeneric import generic
+
+
+@generic
+def inspect_object(obj):
+ """Called when you do obj?"""
+ raise TryNext
+
+
+@generic
+def complete_object(obj, prev_completions):
+ """Custom completer dispatching for python objects.
+
+ Parameters
+ ----------
+ obj : object
+ The object to complete.
+ prev_completions : list
+ List of attributes discovered so far.
+
+ This should return the list of attributes in obj. If you only wish to
+ add to the attributes already discovered normally, return
+ own_attrs + prev_completions.
+ """
+ raise TryNext
+
+
diff --git a/contrib/python/ipython/py2/IPython/utils/importstring.py b/contrib/python/ipython/py2/IPython/utils/importstring.py
index 2c7a2a167e..c8e1840eb3 100644
--- a/contrib/python/ipython/py2/IPython/utils/importstring.py
+++ b/contrib/python/ipython/py2/IPython/utils/importstring.py
@@ -1,39 +1,39 @@
-# encoding: utf-8
-"""
-A simple utility to import something by its string name.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-
-def import_item(name):
- """Import and return ``bar`` given the string ``foo.bar``.
-
- Calling ``bar = import_item("foo.bar")`` is the functional equivalent of
- executing the code ``from foo import bar``.
-
- Parameters
- ----------
- name : string
- The fully qualified name of the module/package being imported.
-
- Returns
- -------
- mod : module object
- The module that was imported.
- """
-
- parts = name.rsplit('.', 1)
- if len(parts) == 2:
- # called with 'foo.bar....'
- package, obj = parts
- module = __import__(package, fromlist=[obj])
- try:
- pak = getattr(module, obj)
- except AttributeError:
- raise ImportError('No module named %s' % obj)
- return pak
- else:
- # called with un-dotted string
- return __import__(parts[0])
+# encoding: utf-8
+"""
+A simple utility to import something by its string name.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+def import_item(name):
+ """Import and return ``bar`` given the string ``foo.bar``.
+
+ Calling ``bar = import_item("foo.bar")`` is the functional equivalent of
+ executing the code ``from foo import bar``.
+
+ Parameters
+ ----------
+ name : string
+ The fully qualified name of the module/package being imported.
+
+ Returns
+ -------
+ mod : module object
+ The module that was imported.
+ """
+
+ parts = name.rsplit('.', 1)
+ if len(parts) == 2:
+ # called with 'foo.bar....'
+ package, obj = parts
+ module = __import__(package, fromlist=[obj])
+ try:
+ pak = getattr(module, obj)
+ except AttributeError:
+ raise ImportError('No module named %s' % obj)
+ return pak
+ else:
+ # called with un-dotted string
+ return __import__(parts[0])
diff --git a/contrib/python/ipython/py2/IPython/utils/io.py b/contrib/python/ipython/py2/IPython/utils/io.py
index c316c73bcd..036d6e3926 100644
--- a/contrib/python/ipython/py2/IPython/utils/io.py
+++ b/contrib/python/ipython/py2/IPython/utils/io.py
@@ -1,95 +1,95 @@
-# encoding: utf-8
-"""
-IO related utilities.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-from __future__ import absolute_import
-
-
-import atexit
-import os
-import sys
-import tempfile
+# encoding: utf-8
+"""
+IO related utilities.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+
+import atexit
+import os
+import sys
+import tempfile
import warnings
-from warnings import warn
+from warnings import warn
from IPython.utils.decorators import undoc
-from .capture import CapturedIO, capture_output
-from .py3compat import string_types, input, PY3
-
+from .capture import CapturedIO, capture_output
+from .py3compat import string_types, input, PY3
+
@undoc
-class IOStream:
-
+class IOStream:
+
def __init__(self, stream, fallback=None):
warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead',
DeprecationWarning, stacklevel=2)
- if not hasattr(stream,'write') or not hasattr(stream,'flush'):
- if fallback is not None:
- stream = fallback
- else:
- raise ValueError("fallback required, but not specified")
- self.stream = stream
- self._swrite = stream.write
-
- # clone all methods not overridden:
- def clone(meth):
- return not hasattr(self, meth) and not meth.startswith('_')
- for meth in filter(clone, dir(stream)):
+ if not hasattr(stream,'write') or not hasattr(stream,'flush'):
+ if fallback is not None:
+ stream = fallback
+ else:
+ raise ValueError("fallback required, but not specified")
+ self.stream = stream
+ self._swrite = stream.write
+
+ # clone all methods not overridden:
+ def clone(meth):
+ return not hasattr(self, meth) and not meth.startswith('_')
+ for meth in filter(clone, dir(stream)):
try:
val = getattr(stream, meth)
except AttributeError:
pass
else:
setattr(self, meth, val)
-
- def __repr__(self):
- cls = self.__class__
- tpl = '{mod}.{cls}({args})'
- return tpl.format(mod=cls.__module__, cls=cls.__name__, args=self.stream)
-
- def write(self,data):
+
+ def __repr__(self):
+ cls = self.__class__
+ tpl = '{mod}.{cls}({args})'
+ return tpl.format(mod=cls.__module__, cls=cls.__name__, args=self.stream)
+
+ def write(self,data):
warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead',
DeprecationWarning, stacklevel=2)
- try:
- self._swrite(data)
- except:
- try:
- # print handles some unicode issues which may trip a plain
- # write() call. Emulate write() by using an empty end
- # argument.
- print(data, end='', file=self.stream)
- except:
- # if we get here, something is seriously broken.
- print('ERROR - failed to write data to stream:', self.stream,
- file=sys.stderr)
-
- def writelines(self, lines):
+ try:
+ self._swrite(data)
+ except:
+ try:
+ # print handles some unicode issues which may trip a plain
+ # write() call. Emulate write() by using an empty end
+ # argument.
+ print(data, end='', file=self.stream)
+ except:
+ # if we get here, something is seriously broken.
+ print('ERROR - failed to write data to stream:', self.stream,
+ file=sys.stderr)
+
+ def writelines(self, lines):
warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead',
DeprecationWarning, stacklevel=2)
- if isinstance(lines, string_types):
- lines = [lines]
- for line in lines:
- self.write(line)
-
- # This class used to have a writeln method, but regular files and streams
- # in Python don't have this method. We need to keep this completely
- # compatible so we removed it.
-
- @property
- def closed(self):
- return self.stream.closed
-
- def close(self):
- pass
-
-# setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr
+ if isinstance(lines, string_types):
+ lines = [lines]
+ for line in lines:
+ self.write(line)
+
+ # This class used to have a writeln method, but regular files and streams
+ # in Python don't have this method. We need to keep this completely
+ # compatible so we removed it.
+
+ @property
+ def closed(self):
+ return self.stream.closed
+
+ def close(self):
+ pass
+
+# setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr
devnull = open(os.devnull, 'w')
-atexit.register(devnull.close)
-
+atexit.register(devnull.close)
+
# io.std* are deprecated, but don't show our own deprecation warnings
# during initialization of the deprecated API.
with warnings.catch_warnings():
@@ -98,149 +98,149 @@ with warnings.catch_warnings():
stdout = IOStream(sys.stdout, fallback=devnull)
stderr = IOStream(sys.stderr, fallback=devnull)
-class Tee(object):
- """A class to duplicate an output stream to stdout/err.
-
- This works in a manner very similar to the Unix 'tee' command.
-
- When the object is closed or deleted, it closes the original file given to
- it for duplication.
- """
- # Inspired by:
- # http://mail.python.org/pipermail/python-list/2007-May/442737.html
-
- def __init__(self, file_or_name, mode="w", channel='stdout'):
- """Construct a new Tee object.
-
- Parameters
- ----------
- file_or_name : filename or open filehandle (writable)
- File that will be duplicated
-
- mode : optional, valid mode for open().
- If a filename was give, open with this mode.
-
- channel : str, one of ['stdout', 'stderr']
- """
- if channel not in ['stdout', 'stderr']:
- raise ValueError('Invalid channel spec %s' % channel)
-
- if hasattr(file_or_name, 'write') and hasattr(file_or_name, 'seek'):
- self.file = file_or_name
- else:
- self.file = open(file_or_name, mode)
- self.channel = channel
- self.ostream = getattr(sys, channel)
- setattr(sys, channel, self)
- self._closed = False
-
- def close(self):
- """Close the file and restore the channel."""
- self.flush()
- setattr(sys, self.channel, self.ostream)
- self.file.close()
- self._closed = True
-
- def write(self, data):
- """Write data to both channels."""
- self.file.write(data)
- self.ostream.write(data)
- self.ostream.flush()
-
- def flush(self):
- """Flush both channels."""
- self.file.flush()
- self.ostream.flush()
-
- def __del__(self):
- if not self._closed:
- self.close()
-
-
-def ask_yes_no(prompt, default=None, interrupt=None):
- """Asks a question and returns a boolean (y/n) answer.
-
- If default is given (one of 'y','n'), it is used if the user input is
- empty. If interrupt is given (one of 'y','n'), it is used if the user
- presses Ctrl-C. Otherwise the question is repeated until an answer is
- given.
-
- An EOF is treated as the default answer. If there is no default, an
- exception is raised to prevent infinite loops.
-
- Valid answers are: y/yes/n/no (match is not case sensitive)."""
-
- answers = {'y':True,'n':False,'yes':True,'no':False}
- ans = None
- while ans not in answers.keys():
- try:
- ans = input(prompt+' ').lower()
- if not ans: # response was an empty string
- ans = default
- except KeyboardInterrupt:
- if interrupt:
- ans = interrupt
- except EOFError:
- if default in answers.keys():
- ans = default
- print()
- else:
- raise
-
- return answers[ans]
-
-
-def temp_pyfile(src, ext='.py'):
- """Make a temporary python file, return filename and filehandle.
-
- Parameters
- ----------
- src : string or list of strings (no need for ending newlines if list)
- Source code to be written to the file.
-
- ext : optional, string
- Extension for the generated file.
-
- Returns
- -------
- (filename, open filehandle)
- It is the caller's responsibility to close the open file and unlink it.
- """
- fname = tempfile.mkstemp(ext)[1]
- f = open(fname,'w')
- f.write(src)
- f.flush()
- return fname, f
-
-def atomic_writing(*args, **kwargs):
- """DEPRECATED: moved to notebook.services.contents.fileio"""
- warn("IPython.utils.io.atomic_writing has moved to notebook.services.contents.fileio")
- from notebook.services.contents.fileio import atomic_writing
- return atomic_writing(*args, **kwargs)
-
-def raw_print(*args, **kw):
- """Raw print to sys.__stdout__, otherwise identical interface to print()."""
-
- print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
- file=sys.__stdout__)
- sys.__stdout__.flush()
-
-
-def raw_print_err(*args, **kw):
- """Raw print to sys.__stderr__, otherwise identical interface to print()."""
-
- print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
- file=sys.__stderr__)
- sys.__stderr__.flush()
-
-
-# Short aliases for quick debugging, do NOT use these in production code.
-rprint = raw_print
-rprinte = raw_print_err
-
-
-def unicode_std_stream(stream='stdout'):
- """DEPRECATED, moved to nbconvert.utils.io"""
- warn("IPython.utils.io.unicode_std_stream has moved to nbconvert.utils.io")
- from nbconvert.utils.io import unicode_std_stream
- return unicode_std_stream(stream)
+class Tee(object):
+ """A class to duplicate an output stream to stdout/err.
+
+ This works in a manner very similar to the Unix 'tee' command.
+
+ When the object is closed or deleted, it closes the original file given to
+ it for duplication.
+ """
+ # Inspired by:
+ # http://mail.python.org/pipermail/python-list/2007-May/442737.html
+
+ def __init__(self, file_or_name, mode="w", channel='stdout'):
+ """Construct a new Tee object.
+
+ Parameters
+ ----------
+ file_or_name : filename or open filehandle (writable)
+ File that will be duplicated
+
+ mode : optional, valid mode for open().
+ If a filename was give, open with this mode.
+
+ channel : str, one of ['stdout', 'stderr']
+ """
+ if channel not in ['stdout', 'stderr']:
+ raise ValueError('Invalid channel spec %s' % channel)
+
+ if hasattr(file_or_name, 'write') and hasattr(file_or_name, 'seek'):
+ self.file = file_or_name
+ else:
+ self.file = open(file_or_name, mode)
+ self.channel = channel
+ self.ostream = getattr(sys, channel)
+ setattr(sys, channel, self)
+ self._closed = False
+
+ def close(self):
+ """Close the file and restore the channel."""
+ self.flush()
+ setattr(sys, self.channel, self.ostream)
+ self.file.close()
+ self._closed = True
+
+ def write(self, data):
+ """Write data to both channels."""
+ self.file.write(data)
+ self.ostream.write(data)
+ self.ostream.flush()
+
+ def flush(self):
+ """Flush both channels."""
+ self.file.flush()
+ self.ostream.flush()
+
+ def __del__(self):
+ if not self._closed:
+ self.close()
+
+
+def ask_yes_no(prompt, default=None, interrupt=None):
+ """Asks a question and returns a boolean (y/n) answer.
+
+ If default is given (one of 'y','n'), it is used if the user input is
+ empty. If interrupt is given (one of 'y','n'), it is used if the user
+ presses Ctrl-C. Otherwise the question is repeated until an answer is
+ given.
+
+ An EOF is treated as the default answer. If there is no default, an
+ exception is raised to prevent infinite loops.
+
+ Valid answers are: y/yes/n/no (match is not case sensitive)."""
+
+ answers = {'y':True,'n':False,'yes':True,'no':False}
+ ans = None
+ while ans not in answers.keys():
+ try:
+ ans = input(prompt+' ').lower()
+ if not ans: # response was an empty string
+ ans = default
+ except KeyboardInterrupt:
+ if interrupt:
+ ans = interrupt
+ except EOFError:
+ if default in answers.keys():
+ ans = default
+ print()
+ else:
+ raise
+
+ return answers[ans]
+
+
+def temp_pyfile(src, ext='.py'):
+ """Make a temporary python file, return filename and filehandle.
+
+ Parameters
+ ----------
+ src : string or list of strings (no need for ending newlines if list)
+ Source code to be written to the file.
+
+ ext : optional, string
+ Extension for the generated file.
+
+ Returns
+ -------
+ (filename, open filehandle)
+ It is the caller's responsibility to close the open file and unlink it.
+ """
+ fname = tempfile.mkstemp(ext)[1]
+ f = open(fname,'w')
+ f.write(src)
+ f.flush()
+ return fname, f
+
+def atomic_writing(*args, **kwargs):
+ """DEPRECATED: moved to notebook.services.contents.fileio"""
+ warn("IPython.utils.io.atomic_writing has moved to notebook.services.contents.fileio")
+ from notebook.services.contents.fileio import atomic_writing
+ return atomic_writing(*args, **kwargs)
+
+def raw_print(*args, **kw):
+ """Raw print to sys.__stdout__, otherwise identical interface to print()."""
+
+ print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
+ file=sys.__stdout__)
+ sys.__stdout__.flush()
+
+
+def raw_print_err(*args, **kw):
+ """Raw print to sys.__stderr__, otherwise identical interface to print()."""
+
+ print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
+ file=sys.__stderr__)
+ sys.__stderr__.flush()
+
+
+# Short aliases for quick debugging, do NOT use these in production code.
+rprint = raw_print
+rprinte = raw_print_err
+
+
+def unicode_std_stream(stream='stdout'):
+ """DEPRECATED, moved to nbconvert.utils.io"""
+ warn("IPython.utils.io.unicode_std_stream has moved to nbconvert.utils.io")
+ from nbconvert.utils.io import unicode_std_stream
+ return unicode_std_stream(stream)
diff --git a/contrib/python/ipython/py2/IPython/utils/ipstruct.py b/contrib/python/ipython/py2/IPython/utils/ipstruct.py
index e17760b4f9..e2b3e8fa4c 100644
--- a/contrib/python/ipython/py2/IPython/utils/ipstruct.py
+++ b/contrib/python/ipython/py2/IPython/utils/ipstruct.py
@@ -1,391 +1,391 @@
-# encoding: utf-8
-"""A dict subclass that supports attribute style access.
-
-Authors:
-
-* Fernando Perez (original)
-* Brian Granger (refactoring to a dict subclass)
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-__all__ = ['Struct']
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-
-class Struct(dict):
- """A dict subclass with attribute style access.
-
- This dict subclass has a a few extra features:
-
- * Attribute style access.
- * Protection of class members (like keys, items) when using attribute
- style access.
- * The ability to restrict assignment to only existing keys.
- * Intelligent merging.
- * Overloaded operators.
- """
- _allownew = True
- def __init__(self, *args, **kw):
- """Initialize with a dictionary, another Struct, or data.
-
- Parameters
- ----------
- args : dict, Struct
- Initialize with one dict or Struct
- kw : dict
- Initialize with key, value pairs.
-
- Examples
- --------
-
- >>> s = Struct(a=10,b=30)
- >>> s.a
- 10
- >>> s.b
- 30
- >>> s2 = Struct(s,c=30)
- >>> sorted(s2.keys())
- ['a', 'b', 'c']
- """
- object.__setattr__(self, '_allownew', True)
- dict.__init__(self, *args, **kw)
-
- def __setitem__(self, key, value):
- """Set an item with check for allownew.
-
- Examples
- --------
-
- >>> s = Struct()
- >>> s['a'] = 10
- >>> s.allow_new_attr(False)
- >>> s['a'] = 10
- >>> s['a']
- 10
- >>> try:
- ... s['b'] = 20
- ... except KeyError:
- ... print('this is not allowed')
- ...
- this is not allowed
- """
- if not self._allownew and key not in self:
- raise KeyError(
- "can't create new attribute %s when allow_new_attr(False)" % key)
- dict.__setitem__(self, key, value)
-
- def __setattr__(self, key, value):
- """Set an attr with protection of class members.
-
- This calls :meth:`self.__setitem__` but convert :exc:`KeyError` to
- :exc:`AttributeError`.
-
- Examples
- --------
-
- >>> s = Struct()
- >>> s.a = 10
- >>> s.a
- 10
- >>> try:
- ... s.get = 10
- ... except AttributeError:
- ... print("you can't set a class member")
- ...
- you can't set a class member
- """
- # If key is an str it might be a class member or instance var
- if isinstance(key, str):
- # I can't simply call hasattr here because it calls getattr, which
- # calls self.__getattr__, which returns True for keys in
- # self._data. But I only want keys in the class and in
- # self.__dict__
- if key in self.__dict__ or hasattr(Struct, key):
- raise AttributeError(
- 'attr %s is a protected member of class Struct.' % key
- )
- try:
- self.__setitem__(key, value)
- except KeyError as e:
- raise AttributeError(e)
-
- def __getattr__(self, key):
- """Get an attr by calling :meth:`dict.__getitem__`.
-
- Like :meth:`__setattr__`, this method converts :exc:`KeyError` to
- :exc:`AttributeError`.
-
- Examples
- --------
-
- >>> s = Struct(a=10)
- >>> s.a
- 10
- >>> type(s.get)
- <... 'builtin_function_or_method'>
- >>> try:
- ... s.b
- ... except AttributeError:
- ... print("I don't have that key")
- ...
- I don't have that key
- """
- try:
- result = self[key]
- except KeyError:
- raise AttributeError(key)
- else:
- return result
-
- def __iadd__(self, other):
- """s += s2 is a shorthand for s.merge(s2).
-
- Examples
- --------
-
- >>> s = Struct(a=10,b=30)
- >>> s2 = Struct(a=20,c=40)
- >>> s += s2
- >>> sorted(s.keys())
- ['a', 'b', 'c']
- """
- self.merge(other)
- return self
-
- def __add__(self,other):
- """s + s2 -> New Struct made from s.merge(s2).
-
- Examples
- --------
-
- >>> s1 = Struct(a=10,b=30)
- >>> s2 = Struct(a=20,c=40)
- >>> s = s1 + s2
- >>> sorted(s.keys())
- ['a', 'b', 'c']
- """
- sout = self.copy()
- sout.merge(other)
- return sout
-
- def __sub__(self,other):
- """s1 - s2 -> remove keys in s2 from s1.
-
- Examples
- --------
-
- >>> s1 = Struct(a=10,b=30)
- >>> s2 = Struct(a=40)
- >>> s = s1 - s2
- >>> s
- {'b': 30}
- """
- sout = self.copy()
- sout -= other
- return sout
-
- def __isub__(self,other):
- """Inplace remove keys from self that are in other.
-
- Examples
- --------
-
- >>> s1 = Struct(a=10,b=30)
- >>> s2 = Struct(a=40)
- >>> s1 -= s2
- >>> s1
- {'b': 30}
- """
- for k in other.keys():
- if k in self:
- del self[k]
- return self
-
- def __dict_invert(self, data):
- """Helper function for merge.
-
- Takes a dictionary whose values are lists and returns a dict with
- the elements of each list as keys and the original keys as values.
- """
- outdict = {}
- for k,lst in data.items():
- if isinstance(lst, str):
- lst = lst.split()
- for entry in lst:
- outdict[entry] = k
- return outdict
-
- def dict(self):
- return self
-
- def copy(self):
- """Return a copy as a Struct.
-
- Examples
- --------
-
- >>> s = Struct(a=10,b=30)
- >>> s2 = s.copy()
- >>> type(s2) is Struct
- True
- """
- return Struct(dict.copy(self))
-
- def hasattr(self, key):
- """hasattr function available as a method.
-
- Implemented like has_key.
-
- Examples
- --------
-
- >>> s = Struct(a=10)
- >>> s.hasattr('a')
- True
- >>> s.hasattr('b')
- False
- >>> s.hasattr('get')
- False
- """
- return key in self
-
- def allow_new_attr(self, allow = True):
- """Set whether new attributes can be created in this Struct.
-
- This can be used to catch typos by verifying that the attribute user
- tries to change already exists in this Struct.
- """
- object.__setattr__(self, '_allownew', allow)
-
- def merge(self, __loc_data__=None, __conflict_solve=None, **kw):
- """Merge two Structs with customizable conflict resolution.
-
- This is similar to :meth:`update`, but much more flexible. First, a
- dict is made from data+key=value pairs. When merging this dict with
- the Struct S, the optional dictionary 'conflict' is used to decide
- what to do.
-
- If conflict is not given, the default behavior is to preserve any keys
- with their current value (the opposite of the :meth:`update` method's
- behavior).
-
- Parameters
- ----------
- __loc_data : dict, Struct
- The data to merge into self
- __conflict_solve : dict
- The conflict policy dict. The keys are binary functions used to
- resolve the conflict and the values are lists of strings naming
- the keys the conflict resolution function applies to. Instead of
- a list of strings a space separated string can be used, like
- 'a b c'.
- kw : dict
- Additional key, value pairs to merge in
-
- Notes
- -----
-
- The `__conflict_solve` dict is a dictionary of binary functions which will be used to
- solve key conflicts. Here is an example::
-
- __conflict_solve = dict(
- func1=['a','b','c'],
- func2=['d','e']
- )
-
- In this case, the function :func:`func1` will be used to resolve
- keys 'a', 'b' and 'c' and the function :func:`func2` will be used for
- keys 'd' and 'e'. This could also be written as::
-
- __conflict_solve = dict(func1='a b c',func2='d e')
-
- These functions will be called for each key they apply to with the
- form::
-
- func1(self['a'], other['a'])
-
- The return value is used as the final merged value.
-
- As a convenience, merge() provides five (the most commonly needed)
- pre-defined policies: preserve, update, add, add_flip and add_s. The
- easiest explanation is their implementation::
-
- preserve = lambda old,new: old
- update = lambda old,new: new
- add = lambda old,new: old + new
- add_flip = lambda old,new: new + old # note change of order!
- add_s = lambda old,new: old + ' ' + new # only for str!
-
- You can use those four words (as strings) as keys instead
- of defining them as functions, and the merge method will substitute
- the appropriate functions for you.
-
- For more complicated conflict resolution policies, you still need to
- construct your own functions.
-
- Examples
- --------
-
- This show the default policy:
-
- >>> s = Struct(a=10,b=30)
- >>> s2 = Struct(a=20,c=40)
- >>> s.merge(s2)
- >>> sorted(s.items())
- [('a', 10), ('b', 30), ('c', 40)]
-
- Now, show how to specify a conflict dict:
-
- >>> s = Struct(a=10,b=30)
- >>> s2 = Struct(a=20,b=40)
- >>> conflict = {'update':'a','add':'b'}
- >>> s.merge(s2,conflict)
- >>> sorted(s.items())
- [('a', 20), ('b', 70)]
- """
-
- data_dict = dict(__loc_data__,**kw)
-
- # policies for conflict resolution: two argument functions which return
- # the value that will go in the new struct
- preserve = lambda old,new: old
- update = lambda old,new: new
- add = lambda old,new: old + new
- add_flip = lambda old,new: new + old # note change of order!
- add_s = lambda old,new: old + ' ' + new
-
- # default policy is to keep current keys when there's a conflict
- conflict_solve = dict.fromkeys(self, preserve)
-
- # the conflict_solve dictionary is given by the user 'inverted': we
- # need a name-function mapping, it comes as a function -> names
- # dict. Make a local copy (b/c we'll make changes), replace user
- # strings for the three builtin policies and invert it.
- if __conflict_solve:
- inv_conflict_solve_user = __conflict_solve.copy()
- for name, func in [('preserve',preserve), ('update',update),
- ('add',add), ('add_flip',add_flip),
- ('add_s',add_s)]:
- if name in inv_conflict_solve_user.keys():
- inv_conflict_solve_user[func] = inv_conflict_solve_user[name]
- del inv_conflict_solve_user[name]
- conflict_solve.update(self.__dict_invert(inv_conflict_solve_user))
- for key in data_dict:
- if key not in self:
- self[key] = data_dict[key]
- else:
- self[key] = conflict_solve[key](self[key],data_dict[key])
-
+# encoding: utf-8
+"""A dict subclass that supports attribute style access.
+
+Authors:
+
+* Fernando Perez (original)
+* Brian Granger (refactoring to a dict subclass)
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+__all__ = ['Struct']
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+
+class Struct(dict):
+ """A dict subclass with attribute style access.
+
+ This dict subclass has a a few extra features:
+
+ * Attribute style access.
+ * Protection of class members (like keys, items) when using attribute
+ style access.
+ * The ability to restrict assignment to only existing keys.
+ * Intelligent merging.
+ * Overloaded operators.
+ """
+ _allownew = True
+ def __init__(self, *args, **kw):
+ """Initialize with a dictionary, another Struct, or data.
+
+ Parameters
+ ----------
+ args : dict, Struct
+ Initialize with one dict or Struct
+ kw : dict
+ Initialize with key, value pairs.
+
+ Examples
+ --------
+
+ >>> s = Struct(a=10,b=30)
+ >>> s.a
+ 10
+ >>> s.b
+ 30
+ >>> s2 = Struct(s,c=30)
+ >>> sorted(s2.keys())
+ ['a', 'b', 'c']
+ """
+ object.__setattr__(self, '_allownew', True)
+ dict.__init__(self, *args, **kw)
+
+ def __setitem__(self, key, value):
+ """Set an item with check for allownew.
+
+ Examples
+ --------
+
+ >>> s = Struct()
+ >>> s['a'] = 10
+ >>> s.allow_new_attr(False)
+ >>> s['a'] = 10
+ >>> s['a']
+ 10
+ >>> try:
+ ... s['b'] = 20
+ ... except KeyError:
+ ... print('this is not allowed')
+ ...
+ this is not allowed
+ """
+ if not self._allownew and key not in self:
+ raise KeyError(
+ "can't create new attribute %s when allow_new_attr(False)" % key)
+ dict.__setitem__(self, key, value)
+
+ def __setattr__(self, key, value):
+ """Set an attr with protection of class members.
+
+ This calls :meth:`self.__setitem__` but convert :exc:`KeyError` to
+ :exc:`AttributeError`.
+
+ Examples
+ --------
+
+ >>> s = Struct()
+ >>> s.a = 10
+ >>> s.a
+ 10
+ >>> try:
+ ... s.get = 10
+ ... except AttributeError:
+ ... print("you can't set a class member")
+ ...
+ you can't set a class member
+ """
+ # If key is an str it might be a class member or instance var
+ if isinstance(key, str):
+ # I can't simply call hasattr here because it calls getattr, which
+ # calls self.__getattr__, which returns True for keys in
+ # self._data. But I only want keys in the class and in
+ # self.__dict__
+ if key in self.__dict__ or hasattr(Struct, key):
+ raise AttributeError(
+ 'attr %s is a protected member of class Struct.' % key
+ )
+ try:
+ self.__setitem__(key, value)
+ except KeyError as e:
+ raise AttributeError(e)
+
+ def __getattr__(self, key):
+ """Get an attr by calling :meth:`dict.__getitem__`.
+
+ Like :meth:`__setattr__`, this method converts :exc:`KeyError` to
+ :exc:`AttributeError`.
+
+ Examples
+ --------
+
+ >>> s = Struct(a=10)
+ >>> s.a
+ 10
+ >>> type(s.get)
+ <... 'builtin_function_or_method'>
+ >>> try:
+ ... s.b
+ ... except AttributeError:
+ ... print("I don't have that key")
+ ...
+ I don't have that key
+ """
+ try:
+ result = self[key]
+ except KeyError:
+ raise AttributeError(key)
+ else:
+ return result
+
+ def __iadd__(self, other):
+ """s += s2 is a shorthand for s.merge(s2).
+
+ Examples
+ --------
+
+ >>> s = Struct(a=10,b=30)
+ >>> s2 = Struct(a=20,c=40)
+ >>> s += s2
+ >>> sorted(s.keys())
+ ['a', 'b', 'c']
+ """
+ self.merge(other)
+ return self
+
+ def __add__(self,other):
+ """s + s2 -> New Struct made from s.merge(s2).
+
+ Examples
+ --------
+
+ >>> s1 = Struct(a=10,b=30)
+ >>> s2 = Struct(a=20,c=40)
+ >>> s = s1 + s2
+ >>> sorted(s.keys())
+ ['a', 'b', 'c']
+ """
+ sout = self.copy()
+ sout.merge(other)
+ return sout
+
+ def __sub__(self,other):
+ """s1 - s2 -> remove keys in s2 from s1.
+
+ Examples
+ --------
+
+ >>> s1 = Struct(a=10,b=30)
+ >>> s2 = Struct(a=40)
+ >>> s = s1 - s2
+ >>> s
+ {'b': 30}
+ """
+ sout = self.copy()
+ sout -= other
+ return sout
+
+ def __isub__(self,other):
+ """Inplace remove keys from self that are in other.
+
+ Examples
+ --------
+
+ >>> s1 = Struct(a=10,b=30)
+ >>> s2 = Struct(a=40)
+ >>> s1 -= s2
+ >>> s1
+ {'b': 30}
+ """
+ for k in other.keys():
+ if k in self:
+ del self[k]
+ return self
+
+ def __dict_invert(self, data):
+ """Helper function for merge.
+
+ Takes a dictionary whose values are lists and returns a dict with
+ the elements of each list as keys and the original keys as values.
+ """
+ outdict = {}
+ for k,lst in data.items():
+ if isinstance(lst, str):
+ lst = lst.split()
+ for entry in lst:
+ outdict[entry] = k
+ return outdict
+
+ def dict(self):
+ return self
+
+ def copy(self):
+ """Return a copy as a Struct.
+
+ Examples
+ --------
+
+ >>> s = Struct(a=10,b=30)
+ >>> s2 = s.copy()
+ >>> type(s2) is Struct
+ True
+ """
+ return Struct(dict.copy(self))
+
+ def hasattr(self, key):
+ """hasattr function available as a method.
+
+ Implemented like has_key.
+
+ Examples
+ --------
+
+ >>> s = Struct(a=10)
+ >>> s.hasattr('a')
+ True
+ >>> s.hasattr('b')
+ False
+ >>> s.hasattr('get')
+ False
+ """
+ return key in self
+
+ def allow_new_attr(self, allow = True):
+ """Set whether new attributes can be created in this Struct.
+
+ This can be used to catch typos by verifying that the attribute user
+ tries to change already exists in this Struct.
+ """
+ object.__setattr__(self, '_allownew', allow)
+
+ def merge(self, __loc_data__=None, __conflict_solve=None, **kw):
+ """Merge two Structs with customizable conflict resolution.
+
+ This is similar to :meth:`update`, but much more flexible. First, a
+ dict is made from data+key=value pairs. When merging this dict with
+ the Struct S, the optional dictionary 'conflict' is used to decide
+ what to do.
+
+ If conflict is not given, the default behavior is to preserve any keys
+ with their current value (the opposite of the :meth:`update` method's
+ behavior).
+
+ Parameters
+ ----------
+ __loc_data : dict, Struct
+ The data to merge into self
+ __conflict_solve : dict
+ The conflict policy dict. The keys are binary functions used to
+ resolve the conflict and the values are lists of strings naming
+ the keys the conflict resolution function applies to. Instead of
+ a list of strings a space separated string can be used, like
+ 'a b c'.
+ kw : dict
+ Additional key, value pairs to merge in
+
+ Notes
+ -----
+
+ The `__conflict_solve` dict is a dictionary of binary functions which will be used to
+ solve key conflicts. Here is an example::
+
+ __conflict_solve = dict(
+ func1=['a','b','c'],
+ func2=['d','e']
+ )
+
+ In this case, the function :func:`func1` will be used to resolve
+ keys 'a', 'b' and 'c' and the function :func:`func2` will be used for
+ keys 'd' and 'e'. This could also be written as::
+
+ __conflict_solve = dict(func1='a b c',func2='d e')
+
+ These functions will be called for each key they apply to with the
+ form::
+
+ func1(self['a'], other['a'])
+
+ The return value is used as the final merged value.
+
+ As a convenience, merge() provides five (the most commonly needed)
+ pre-defined policies: preserve, update, add, add_flip and add_s. The
+ easiest explanation is their implementation::
+
+ preserve = lambda old,new: old
+ update = lambda old,new: new
+ add = lambda old,new: old + new
+ add_flip = lambda old,new: new + old # note change of order!
+ add_s = lambda old,new: old + ' ' + new # only for str!
+
+ You can use those four words (as strings) as keys instead
+ of defining them as functions, and the merge method will substitute
+ the appropriate functions for you.
+
+ For more complicated conflict resolution policies, you still need to
+ construct your own functions.
+
+ Examples
+ --------
+
+ This show the default policy:
+
+ >>> s = Struct(a=10,b=30)
+ >>> s2 = Struct(a=20,c=40)
+ >>> s.merge(s2)
+ >>> sorted(s.items())
+ [('a', 10), ('b', 30), ('c', 40)]
+
+ Now, show how to specify a conflict dict:
+
+ >>> s = Struct(a=10,b=30)
+ >>> s2 = Struct(a=20,b=40)
+ >>> conflict = {'update':'a','add':'b'}
+ >>> s.merge(s2,conflict)
+ >>> sorted(s.items())
+ [('a', 20), ('b', 70)]
+ """
+
+ data_dict = dict(__loc_data__,**kw)
+
+ # policies for conflict resolution: two argument functions which return
+ # the value that will go in the new struct
+ preserve = lambda old,new: old
+ update = lambda old,new: new
+ add = lambda old,new: old + new
+ add_flip = lambda old,new: new + old # note change of order!
+ add_s = lambda old,new: old + ' ' + new
+
+ # default policy is to keep current keys when there's a conflict
+ conflict_solve = dict.fromkeys(self, preserve)
+
+ # the conflict_solve dictionary is given by the user 'inverted': we
+ # need a name-function mapping, it comes as a function -> names
+ # dict. Make a local copy (b/c we'll make changes), replace user
+ # strings for the three builtin policies and invert it.
+ if __conflict_solve:
+ inv_conflict_solve_user = __conflict_solve.copy()
+ for name, func in [('preserve',preserve), ('update',update),
+ ('add',add), ('add_flip',add_flip),
+ ('add_s',add_s)]:
+ if name in inv_conflict_solve_user.keys():
+ inv_conflict_solve_user[func] = inv_conflict_solve_user[name]
+ del inv_conflict_solve_user[name]
+ conflict_solve.update(self.__dict_invert(inv_conflict_solve_user))
+ for key in data_dict:
+ if key not in self:
+ self[key] = data_dict[key]
+ else:
+ self[key] = conflict_solve[key](self[key],data_dict[key])
+
diff --git a/contrib/python/ipython/py2/IPython/utils/jsonutil.py b/contrib/python/ipython/py2/IPython/utils/jsonutil.py
index 4bb400ca1e..c3ee93859e 100644
--- a/contrib/python/ipython/py2/IPython/utils/jsonutil.py
+++ b/contrib/python/ipython/py2/IPython/utils/jsonutil.py
@@ -1,5 +1,5 @@
-from warnings import warn
-
-warn("IPython.utils.jsonutil has moved to jupyter_client.jsonutil")
-
-from jupyter_client.jsonutil import *
+from warnings import warn
+
+warn("IPython.utils.jsonutil has moved to jupyter_client.jsonutil")
+
+from jupyter_client.jsonutil import *
diff --git a/contrib/python/ipython/py2/IPython/utils/localinterfaces.py b/contrib/python/ipython/py2/IPython/utils/localinterfaces.py
index f90564def5..89b8fdeb54 100644
--- a/contrib/python/ipython/py2/IPython/utils/localinterfaces.py
+++ b/contrib/python/ipython/py2/IPython/utils/localinterfaces.py
@@ -1,5 +1,5 @@
-from warnings import warn
-
-warn("IPython.utils.localinterfaces has moved to jupyter_client.localinterfaces")
-
-from jupyter_client.localinterfaces import *
+from warnings import warn
+
+warn("IPython.utils.localinterfaces has moved to jupyter_client.localinterfaces")
+
+from jupyter_client.localinterfaces import *
diff --git a/contrib/python/ipython/py2/IPython/utils/log.py b/contrib/python/ipython/py2/IPython/utils/log.py
index 422bb9b343..3eb9bdadd8 100644
--- a/contrib/python/ipython/py2/IPython/utils/log.py
+++ b/contrib/python/ipython/py2/IPython/utils/log.py
@@ -1,7 +1,7 @@
-from __future__ import absolute_import
-
-from warnings import warn
-
-warn("IPython.utils.log has moved to traitlets.log")
-
-from traitlets.log import *
+from __future__ import absolute_import
+
+from warnings import warn
+
+warn("IPython.utils.log has moved to traitlets.log")
+
+from traitlets.log import *
diff --git a/contrib/python/ipython/py2/IPython/utils/module_paths.py b/contrib/python/ipython/py2/IPython/utils/module_paths.py
index fc2c7f07c0..45a711c0b4 100644
--- a/contrib/python/ipython/py2/IPython/utils/module_paths.py
+++ b/contrib/python/ipython/py2/IPython/utils/module_paths.py
@@ -1,125 +1,125 @@
-"""Utility functions for finding modules
-
-Utility functions for finding modules on sys.path.
-
-`find_mod` finds named module on sys.path.
-
-`get_init` helper function that finds __init__ file in a directory.
-
-`find_module` variant of imp.find_module in std_lib that only returns
-path to module and not an open file object as well.
-
-
-
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2011, the IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-from __future__ import print_function
-
-# Stdlib imports
-import imp
-import os
-
-# Third-party imports
-
-# Our own imports
-
-
-#-----------------------------------------------------------------------------
-# Globals and constants
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Local utilities
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Classes and functions
-#-----------------------------------------------------------------------------
-def find_module(name, path=None):
- """imp.find_module variant that only return path of module.
-
- The `imp.find_module` returns a filehandle that we are not interested in.
- Also we ignore any bytecode files that `imp.find_module` finds.
-
- Parameters
- ----------
- name : str
- name of module to locate
- path : list of str
- list of paths to search for `name`. If path=None then search sys.path
-
- Returns
- -------
- filename : str
- Return full path of module or None if module is missing or does not have
- .py or .pyw extension
- """
- if name is None:
- return None
- try:
- file, filename, _ = imp.find_module(name, path)
- except ImportError:
- return None
- if file is None:
- return filename
- else:
- file.close()
- if os.path.splitext(filename)[1] in [".py", ".pyc"]:
- return filename
- else:
- return None
-
-def get_init(dirname):
- """Get __init__ file path for module directory
-
- Parameters
- ----------
- dirname : str
- Find the __init__ file in directory `dirname`
-
- Returns
- -------
- init_path : str
- Path to __init__ file
- """
- fbase = os.path.join(dirname, "__init__")
- for ext in [".py", ".pyw"]:
- fname = fbase + ext
- if os.path.isfile(fname):
- return fname
-
-
-def find_mod(module_name):
- """Find module `module_name` on sys.path
-
- Return the path to module `module_name`. If `module_name` refers to
- a module directory then return path to __init__ file. Return full
- path of module or None if module is missing or does not have .py or .pyw
- extension. We are not interested in running bytecode.
-
- Parameters
- ----------
- module_name : str
-
- Returns
- -------
- modulepath : str
- Path to module `module_name`.
- """
- parts = module_name.split(".")
- basepath = find_module(parts[0])
- for submodname in parts[1:]:
- basepath = find_module(submodname, [basepath])
- if basepath and os.path.isdir(basepath):
- basepath = get_init(basepath)
- return basepath
+"""Utility functions for finding modules
+
+Utility functions for finding modules on sys.path.
+
+`find_mod` finds named module on sys.path.
+
+`get_init` helper function that finds __init__ file in a directory.
+
+`find_module` variant of imp.find_module in std_lib that only returns
+path to module and not an open file object as well.
+
+
+
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2011, the IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+from __future__ import print_function
+
+# Stdlib imports
+import imp
+import os
+
+# Third-party imports
+
+# Our own imports
+
+
+#-----------------------------------------------------------------------------
+# Globals and constants
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Local utilities
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Classes and functions
+#-----------------------------------------------------------------------------
+def find_module(name, path=None):
+ """imp.find_module variant that only return path of module.
+
+ The `imp.find_module` returns a filehandle that we are not interested in.
+ Also we ignore any bytecode files that `imp.find_module` finds.
+
+ Parameters
+ ----------
+ name : str
+ name of module to locate
+ path : list of str
+ list of paths to search for `name`. If path=None then search sys.path
+
+ Returns
+ -------
+ filename : str
+ Return full path of module or None if module is missing or does not have
+ .py or .pyw extension
+ """
+ if name is None:
+ return None
+ try:
+ file, filename, _ = imp.find_module(name, path)
+ except ImportError:
+ return None
+ if file is None:
+ return filename
+ else:
+ file.close()
+ if os.path.splitext(filename)[1] in [".py", ".pyc"]:
+ return filename
+ else:
+ return None
+
+def get_init(dirname):
+ """Get __init__ file path for module directory
+
+ Parameters
+ ----------
+ dirname : str
+ Find the __init__ file in directory `dirname`
+
+ Returns
+ -------
+ init_path : str
+ Path to __init__ file
+ """
+ fbase = os.path.join(dirname, "__init__")
+ for ext in [".py", ".pyw"]:
+ fname = fbase + ext
+ if os.path.isfile(fname):
+ return fname
+
+
+def find_mod(module_name):
+ """Find module `module_name` on sys.path
+
+ Return the path to module `module_name`. If `module_name` refers to
+ a module directory then return path to __init__ file. Return full
+ path of module or None if module is missing or does not have .py or .pyw
+ extension. We are not interested in running bytecode.
+
+ Parameters
+ ----------
+ module_name : str
+
+ Returns
+ -------
+ modulepath : str
+ Path to module `module_name`.
+ """
+ parts = module_name.split(".")
+ basepath = find_module(parts[0])
+ for submodname in parts[1:]:
+ basepath = find_module(submodname, [basepath])
+ if basepath and os.path.isdir(basepath):
+ basepath = get_init(basepath)
+ return basepath
diff --git a/contrib/python/ipython/py2/IPython/utils/openpy.py b/contrib/python/ipython/py2/IPython/utils/openpy.py
index f55f254bc1..0a7cc0f00e 100644
--- a/contrib/python/ipython/py2/IPython/utils/openpy.py
+++ b/contrib/python/ipython/py2/IPython/utils/openpy.py
@@ -1,249 +1,249 @@
-"""
-Tools to open .py files as Unicode, using the encoding specified within the file,
-as per PEP 263.
-
-Much of the code is taken from the tokenize module in Python 3.2.
-"""
-from __future__ import absolute_import
-
-import io
-from io import TextIOWrapper, BytesIO
-import os.path
-import re
-
-from .py3compat import unicode_type
-
-cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE)
-cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE)
-
-try:
- # Available in Python 3
- from tokenize import detect_encoding
-except ImportError:
- from codecs import lookup, BOM_UTF8
-
- # Copied from Python 3.2 tokenize
- def _get_normal_name(orig_enc):
- """Imitates get_normal_name in tokenizer.c."""
- # Only care about the first 12 characters.
- enc = orig_enc[:12].lower().replace("_", "-")
- if enc == "utf-8" or enc.startswith("utf-8-"):
- return "utf-8"
- if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
- enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
- return "iso-8859-1"
- return orig_enc
-
- # Copied from Python 3.2 tokenize
- def detect_encoding(readline):
- """
- The detect_encoding() function is used to detect the encoding that should
- be used to decode a Python source file. It requires one argment, readline,
- in the same way as the tokenize() generator.
-
- It will call readline a maximum of twice, and return the encoding used
- (as a string) and a list of any lines (left as bytes) it has read in.
-
- It detects the encoding from the presence of a utf-8 bom or an encoding
- cookie as specified in pep-0263. If both a bom and a cookie are present,
- but disagree, a SyntaxError will be raised. If the encoding cookie is an
- invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
- 'utf-8-sig' is returned.
-
- If no encoding is specified, then the default of 'utf-8' will be returned.
- """
- bom_found = False
- encoding = None
- default = 'utf-8'
- def read_or_stop():
- try:
- return readline()
- except StopIteration:
- return b''
-
- def find_cookie(line):
- try:
- line_string = line.decode('ascii')
- except UnicodeDecodeError:
- return None
-
- matches = cookie_re.findall(line_string)
- if not matches:
- return None
- encoding = _get_normal_name(matches[0])
- try:
- codec = lookup(encoding)
- except LookupError:
- # This behaviour mimics the Python interpreter
- raise SyntaxError("unknown encoding: " + encoding)
-
- if bom_found:
- if codec.name != 'utf-8':
- # This behaviour mimics the Python interpreter
- raise SyntaxError('encoding problem: utf-8')
- encoding += '-sig'
- return encoding
-
- first = read_or_stop()
- if first.startswith(BOM_UTF8):
- bom_found = True
- first = first[3:]
- default = 'utf-8-sig'
- if not first:
- return default, []
-
- encoding = find_cookie(first)
- if encoding:
- return encoding, [first]
-
- second = read_or_stop()
- if not second:
- return default, [first]
-
- encoding = find_cookie(second)
- if encoding:
- return encoding, [first, second]
-
- return default, [first, second]
-
-try:
- # Available in Python 3.2 and above.
- from tokenize import open
-except ImportError:
- # Copied from Python 3.2 tokenize
- def open(filename):
- """Open a file in read only mode using the encoding detected by
- detect_encoding().
- """
- buffer = io.open(filename, 'rb') # Tweaked to use io.open for Python 2
- encoding, lines = detect_encoding(buffer.readline)
- buffer.seek(0)
- text = TextIOWrapper(buffer, encoding, line_buffering=True)
- text.mode = 'r'
- return text
-
-def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
- """Converts a bytes string with python source code to unicode.
-
- Unicode strings are passed through unchanged. Byte strings are checked
- for the python source file encoding cookie to determine encoding.
- txt can be either a bytes buffer or a string containing the source
- code.
- """
- if isinstance(txt, unicode_type):
- return txt
- if isinstance(txt, bytes):
- buffer = BytesIO(txt)
- else:
- buffer = txt
+"""
+Tools to open .py files as Unicode, using the encoding specified within the file,
+as per PEP 263.
+
+Much of the code is taken from the tokenize module in Python 3.2.
+"""
+from __future__ import absolute_import
+
+import io
+from io import TextIOWrapper, BytesIO
+import os.path
+import re
+
+from .py3compat import unicode_type
+
+cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE)
+cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE)
+
+try:
+ # Available in Python 3
+ from tokenize import detect_encoding
+except ImportError:
+ from codecs import lookup, BOM_UTF8
+
+ # Copied from Python 3.2 tokenize
+ def _get_normal_name(orig_enc):
+ """Imitates get_normal_name in tokenizer.c."""
+ # Only care about the first 12 characters.
+ enc = orig_enc[:12].lower().replace("_", "-")
+ if enc == "utf-8" or enc.startswith("utf-8-"):
+ return "utf-8"
+ if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
+ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
+ return "iso-8859-1"
+ return orig_enc
+
+ # Copied from Python 3.2 tokenize
+ def detect_encoding(readline):
+ """
+ The detect_encoding() function is used to detect the encoding that should
+ be used to decode a Python source file. It requires one argment, readline,
+ in the same way as the tokenize() generator.
+
+ It will call readline a maximum of twice, and return the encoding used
+ (as a string) and a list of any lines (left as bytes) it has read in.
+
+ It detects the encoding from the presence of a utf-8 bom or an encoding
+ cookie as specified in pep-0263. If both a bom and a cookie are present,
+ but disagree, a SyntaxError will be raised. If the encoding cookie is an
+ invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
+ 'utf-8-sig' is returned.
+
+ If no encoding is specified, then the default of 'utf-8' will be returned.
+ """
+ bom_found = False
+ encoding = None
+ default = 'utf-8'
+ def read_or_stop():
+ try:
+ return readline()
+ except StopIteration:
+ return b''
+
+ def find_cookie(line):
+ try:
+ line_string = line.decode('ascii')
+ except UnicodeDecodeError:
+ return None
+
+ matches = cookie_re.findall(line_string)
+ if not matches:
+ return None
+ encoding = _get_normal_name(matches[0])
+ try:
+ codec = lookup(encoding)
+ except LookupError:
+ # This behaviour mimics the Python interpreter
+ raise SyntaxError("unknown encoding: " + encoding)
+
+ if bom_found:
+ if codec.name != 'utf-8':
+ # This behaviour mimics the Python interpreter
+ raise SyntaxError('encoding problem: utf-8')
+ encoding += '-sig'
+ return encoding
+
+ first = read_or_stop()
+ if first.startswith(BOM_UTF8):
+ bom_found = True
+ first = first[3:]
+ default = 'utf-8-sig'
+ if not first:
+ return default, []
+
+ encoding = find_cookie(first)
+ if encoding:
+ return encoding, [first]
+
+ second = read_or_stop()
+ if not second:
+ return default, [first]
+
+ encoding = find_cookie(second)
+ if encoding:
+ return encoding, [first, second]
+
+ return default, [first, second]
+
+try:
+ # Available in Python 3.2 and above.
+ from tokenize import open
+except ImportError:
+ # Copied from Python 3.2 tokenize
+ def open(filename):
+ """Open a file in read only mode using the encoding detected by
+ detect_encoding().
+ """
+ buffer = io.open(filename, 'rb') # Tweaked to use io.open for Python 2
+ encoding, lines = detect_encoding(buffer.readline)
+ buffer.seek(0)
+ text = TextIOWrapper(buffer, encoding, line_buffering=True)
+ text.mode = 'r'
+ return text
+
+def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
+ """Converts a bytes string with python source code to unicode.
+
+ Unicode strings are passed through unchanged. Byte strings are checked
+ for the python source file encoding cookie to determine encoding.
+ txt can be either a bytes buffer or a string containing the source
+ code.
+ """
+ if isinstance(txt, unicode_type):
+ return txt
+ if isinstance(txt, bytes):
+ buffer = BytesIO(txt)
+ else:
+ buffer = txt
+ try:
+ encoding, _ = detect_encoding(buffer.readline)
+ except SyntaxError:
+ encoding = "ascii"
+ buffer.seek(0)
+ text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
+ text.mode = 'r'
+ if skip_encoding_cookie:
+ return u"".join(strip_encoding_cookie(text))
+ else:
+ return text.read()
+
+def strip_encoding_cookie(filelike):
+ """Generator to pull lines from a text-mode file, skipping the encoding
+ cookie if it is found in the first two lines.
+ """
+ it = iter(filelike)
+ try:
+ first = next(it)
+ if not cookie_comment_re.match(first):
+ yield first
+ second = next(it)
+ if not cookie_comment_re.match(second):
+ yield second
+ except StopIteration:
+ return
+
+ for line in it:
+ yield line
+
+def read_py_file(filename, skip_encoding_cookie=True):
+ """Read a Python file, using the encoding declared inside the file.
+
+ Parameters
+ ----------
+ filename : str
+ The path to the file to read.
+ skip_encoding_cookie : bool
+ If True (the default), and the encoding declaration is found in the first
+ two lines, that line will be excluded from the output - compiling a
+ unicode string with an encoding declaration is a SyntaxError in Python 2.
+
+ Returns
+ -------
+ A unicode string containing the contents of the file.
+ """
+ with open(filename) as f: # the open function defined in this module.
+ if skip_encoding_cookie:
+ return "".join(strip_encoding_cookie(f))
+ else:
+ return f.read()
+
+def read_py_url(url, errors='replace', skip_encoding_cookie=True):
+ """Read a Python file from a URL, using the encoding declared inside the file.
+
+ Parameters
+ ----------
+ url : str
+ The URL from which to fetch the file.
+ errors : str
+ How to handle decoding errors in the file. Options are the same as for
+ bytes.decode(), but here 'replace' is the default.
+ skip_encoding_cookie : bool
+ If True (the default), and the encoding declaration is found in the first
+ two lines, that line will be excluded from the output - compiling a
+ unicode string with an encoding declaration is a SyntaxError in Python 2.
+
+ Returns
+ -------
+ A unicode string containing the contents of the file.
+ """
+ # Deferred import for faster start
+ try:
+ from urllib.request import urlopen # Py 3
+ except ImportError:
+ from urllib import urlopen
+ response = urlopen(url)
+ buffer = io.BytesIO(response.read())
+ return source_to_unicode(buffer, errors, skip_encoding_cookie)
+
+def _list_readline(x):
+ """Given a list, returns a readline() function that returns the next element
+ with each call.
+ """
+ x = iter(x)
+ def readline():
+ return next(x)
+ return readline
+
+# Code for going between .py files and cached .pyc files ----------------------
+
+try: # Python 3.2, see PEP 3147
try:
- encoding, _ = detect_encoding(buffer.readline)
- except SyntaxError:
- encoding = "ascii"
- buffer.seek(0)
- text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
- text.mode = 'r'
- if skip_encoding_cookie:
- return u"".join(strip_encoding_cookie(text))
- else:
- return text.read()
-
-def strip_encoding_cookie(filelike):
- """Generator to pull lines from a text-mode file, skipping the encoding
- cookie if it is found in the first two lines.
- """
- it = iter(filelike)
- try:
- first = next(it)
- if not cookie_comment_re.match(first):
- yield first
- second = next(it)
- if not cookie_comment_re.match(second):
- yield second
- except StopIteration:
- return
-
- for line in it:
- yield line
-
-def read_py_file(filename, skip_encoding_cookie=True):
- """Read a Python file, using the encoding declared inside the file.
-
- Parameters
- ----------
- filename : str
- The path to the file to read.
- skip_encoding_cookie : bool
- If True (the default), and the encoding declaration is found in the first
- two lines, that line will be excluded from the output - compiling a
- unicode string with an encoding declaration is a SyntaxError in Python 2.
-
- Returns
- -------
- A unicode string containing the contents of the file.
- """
- with open(filename) as f: # the open function defined in this module.
- if skip_encoding_cookie:
- return "".join(strip_encoding_cookie(f))
- else:
- return f.read()
-
-def read_py_url(url, errors='replace', skip_encoding_cookie=True):
- """Read a Python file from a URL, using the encoding declared inside the file.
-
- Parameters
- ----------
- url : str
- The URL from which to fetch the file.
- errors : str
- How to handle decoding errors in the file. Options are the same as for
- bytes.decode(), but here 'replace' is the default.
- skip_encoding_cookie : bool
- If True (the default), and the encoding declaration is found in the first
- two lines, that line will be excluded from the output - compiling a
- unicode string with an encoding declaration is a SyntaxError in Python 2.
-
- Returns
- -------
- A unicode string containing the contents of the file.
- """
- # Deferred import for faster start
- try:
- from urllib.request import urlopen # Py 3
- except ImportError:
- from urllib import urlopen
- response = urlopen(url)
- buffer = io.BytesIO(response.read())
- return source_to_unicode(buffer, errors, skip_encoding_cookie)
-
-def _list_readline(x):
- """Given a list, returns a readline() function that returns the next element
- with each call.
- """
- x = iter(x)
- def readline():
- return next(x)
- return readline
-
-# Code for going between .py files and cached .pyc files ----------------------
-
-try: # Python 3.2, see PEP 3147
- try:
- from importlib.util import source_from_cache, cache_from_source
- except ImportError :
- ## deprecated since 3.4
- from imp import source_from_cache, cache_from_source
-except ImportError:
- # Python <= 3.1: .pyc files go next to .py
- def source_from_cache(path):
- basename, ext = os.path.splitext(path)
- if ext not in ('.pyc', '.pyo'):
- raise ValueError('Not a cached Python file extension', ext)
- # Should we look for .pyw files?
- return basename + '.py'
-
- def cache_from_source(path, debug_override=None):
- if debug_override is None:
- debug_override = __debug__
- basename, ext = os.path.splitext(path)
- return basename + '.pyc' if debug_override else '.pyo'
+ from importlib.util import source_from_cache, cache_from_source
+ except ImportError :
+ ## deprecated since 3.4
+ from imp import source_from_cache, cache_from_source
+except ImportError:
+ # Python <= 3.1: .pyc files go next to .py
+ def source_from_cache(path):
+ basename, ext = os.path.splitext(path)
+ if ext not in ('.pyc', '.pyo'):
+ raise ValueError('Not a cached Python file extension', ext)
+ # Should we look for .pyw files?
+ return basename + '.py'
+
+ def cache_from_source(path, debug_override=None):
+ if debug_override is None:
+ debug_override = __debug__
+ basename, ext = os.path.splitext(path)
+ return basename + '.pyc' if debug_override else '.pyo'
diff --git a/contrib/python/ipython/py2/IPython/utils/path.py b/contrib/python/ipython/py2/IPython/utils/path.py
index 800e0e13ee..fa850812c7 100644
--- a/contrib/python/ipython/py2/IPython/utils/path.py
+++ b/contrib/python/ipython/py2/IPython/utils/path.py
@@ -1,447 +1,447 @@
-# encoding: utf-8
-"""
-Utilities for path handling.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import os
-import sys
-import errno
-import shutil
-import random
-import glob
-from warnings import warn
-from hashlib import md5
-
-from IPython.utils.process import system
-from IPython.utils import py3compat
-from IPython.utils.decorators import undoc
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-fs_encoding = sys.getfilesystemencoding()
-
-def _writable_dir(path):
- """Whether `path` is a directory, to which the user has write access."""
- return os.path.isdir(path) and os.access(path, os.W_OK)
-
-if sys.platform == 'win32':
- def _get_long_path_name(path):
- """Get a long path name (expand ~) on Windows using ctypes.
-
- Examples
- --------
-
- >>> get_long_path_name('c:\\docume~1')
- u'c:\\\\Documents and Settings'
-
- """
- try:
- import ctypes
- except ImportError:
- raise ImportError('you need to have ctypes installed for this to work')
- _GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
- _GetLongPathName.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p,
- ctypes.c_uint ]
-
- buf = ctypes.create_unicode_buffer(260)
- rv = _GetLongPathName(path, buf, 260)
- if rv == 0 or rv > 260:
- return path
- else:
- return buf.value
-else:
- def _get_long_path_name(path):
- """Dummy no-op."""
- return path
-
-
-
-def get_long_path_name(path):
- """Expand a path into its long form.
-
- On Windows this expands any ~ in the paths. On other platforms, it is
- a null operation.
- """
- return _get_long_path_name(path)
-
-
-def unquote_filename(name, win32=(sys.platform=='win32')):
- """ On Windows, remove leading and trailing quotes from filenames.
+# encoding: utf-8
+"""
+Utilities for path handling.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import os
+import sys
+import errno
+import shutil
+import random
+import glob
+from warnings import warn
+from hashlib import md5
+
+from IPython.utils.process import system
+from IPython.utils import py3compat
+from IPython.utils.decorators import undoc
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+fs_encoding = sys.getfilesystemencoding()
+
+def _writable_dir(path):
+ """Whether `path` is a directory, to which the user has write access."""
+ return os.path.isdir(path) and os.access(path, os.W_OK)
+
+if sys.platform == 'win32':
+ def _get_long_path_name(path):
+ """Get a long path name (expand ~) on Windows using ctypes.
+
+ Examples
+ --------
+
+ >>> get_long_path_name('c:\\docume~1')
+ u'c:\\\\Documents and Settings'
+
+ """
+ try:
+ import ctypes
+ except ImportError:
+ raise ImportError('you need to have ctypes installed for this to work')
+ _GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
+ _GetLongPathName.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p,
+ ctypes.c_uint ]
+
+ buf = ctypes.create_unicode_buffer(260)
+ rv = _GetLongPathName(path, buf, 260)
+ if rv == 0 or rv > 260:
+ return path
+ else:
+ return buf.value
+else:
+ def _get_long_path_name(path):
+ """Dummy no-op."""
+ return path
+
+
+
+def get_long_path_name(path):
+ """Expand a path into its long form.
+
+ On Windows this expands any ~ in the paths. On other platforms, it is
+ a null operation.
+ """
+ return _get_long_path_name(path)
+
+
+def unquote_filename(name, win32=(sys.platform=='win32')):
+ """ On Windows, remove leading and trailing quotes from filenames.
This function has been deprecated and should not be used any more:
unquoting is now taken care of by :func:`IPython.utils.process.arg_split`.
- """
+ """
warn("'unquote_filename' is deprecated since IPython 5.0 and should not "
"be used anymore", DeprecationWarning, stacklevel=2)
- if win32:
- if name.startswith(("'", '"')) and name.endswith(("'", '"')):
- name = name[1:-1]
- return name
-
-
-def compress_user(path):
- """Reverse of :func:`os.path.expanduser`
+ if win32:
+ if name.startswith(("'", '"')) and name.endswith(("'", '"')):
+ name = name[1:-1]
+ return name
+
+
+def compress_user(path):
+ """Reverse of :func:`os.path.expanduser`
"""
- path = py3compat.unicode_to_str(path, sys.getfilesystemencoding())
- home = os.path.expanduser('~')
- if path.startswith(home):
- path = "~" + path[len(home):]
- return path
-
-def get_py_filename(name, force_win32=None):
- """Return a valid python filename in the current directory.
-
- If the given name is not a file, it adds '.py' and searches again.
- Raises IOError with an informative message if the file isn't found.
- """
-
- name = os.path.expanduser(name)
+ path = py3compat.unicode_to_str(path, sys.getfilesystemencoding())
+ home = os.path.expanduser('~')
+ if path.startswith(home):
+ path = "~" + path[len(home):]
+ return path
+
+def get_py_filename(name, force_win32=None):
+ """Return a valid python filename in the current directory.
+
+ If the given name is not a file, it adds '.py' and searches again.
+ Raises IOError with an informative message if the file isn't found.
+ """
+
+ name = os.path.expanduser(name)
if force_win32 is not None:
warn("The 'force_win32' argument to 'get_py_filename' is deprecated "
"since IPython 5.0 and should not be used anymore",
DeprecationWarning, stacklevel=2)
- if not os.path.isfile(name) and not name.endswith('.py'):
- name += '.py'
- if os.path.isfile(name):
- return name
- else:
- raise IOError('File `%r` not found.' % name)
-
-
-def filefind(filename, path_dirs=None):
- """Find a file by looking through a sequence of paths.
-
- This iterates through a sequence of paths looking for a file and returns
- the full, absolute path of the first occurence of the file. If no set of
- path dirs is given, the filename is tested as is, after running through
- :func:`expandvars` and :func:`expanduser`. Thus a simple call::
-
- filefind('myfile.txt')
-
- will find the file in the current working dir, but::
-
- filefind('~/myfile.txt')
-
- Will find the file in the users home directory. This function does not
- automatically try any paths, such as the cwd or the user's home directory.
-
- Parameters
- ----------
- filename : str
- The filename to look for.
- path_dirs : str, None or sequence of str
- The sequence of paths to look for the file in. If None, the filename
- need to be absolute or be in the cwd. If a string, the string is
- put into a sequence and the searched. If a sequence, walk through
- each element and join with ``filename``, calling :func:`expandvars`
- and :func:`expanduser` before testing for existence.
-
- Returns
- -------
- Raises :exc:`IOError` or returns absolute path to file.
- """
-
- # If paths are quoted, abspath gets confused, strip them...
- filename = filename.strip('"').strip("'")
- # If the input is an absolute path, just check it exists
- if os.path.isabs(filename) and os.path.isfile(filename):
- return filename
-
- if path_dirs is None:
- path_dirs = ("",)
- elif isinstance(path_dirs, py3compat.string_types):
- path_dirs = (path_dirs,)
-
- for path in path_dirs:
- if path == '.': path = py3compat.getcwd()
- testname = expand_path(os.path.join(path, filename))
- if os.path.isfile(testname):
- return os.path.abspath(testname)
-
- raise IOError("File %r does not exist in any of the search paths: %r" %
- (filename, path_dirs) )
-
-
-class HomeDirError(Exception):
- pass
-
-
-def get_home_dir(require_writable=False):
- """Return the 'home' directory, as a unicode string.
-
- Uses os.path.expanduser('~'), and checks for writability.
-
- See stdlib docs for how this is determined.
- $HOME is first priority on *ALL* platforms.
-
- Parameters
- ----------
-
- require_writable : bool [default: False]
- if True:
- guarantees the return value is a writable directory, otherwise
- raises HomeDirError
- if False:
- The path is resolved, but it is not guaranteed to exist or be writable.
- """
-
- homedir = os.path.expanduser('~')
- # Next line will make things work even when /home/ is a symlink to
- # /usr/home as it is on FreeBSD, for example
- homedir = os.path.realpath(homedir)
-
- if not _writable_dir(homedir) and os.name == 'nt':
- # expanduser failed, use the registry to get the 'My Documents' folder.
- try:
- try:
- import winreg as wreg # Py 3
- except ImportError:
- import _winreg as wreg # Py 2
- key = wreg.OpenKey(
- wreg.HKEY_CURRENT_USER,
- "Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
- )
- homedir = wreg.QueryValueEx(key,'Personal')[0]
- key.Close()
- except:
- pass
-
- if (not require_writable) or _writable_dir(homedir):
- return py3compat.cast_unicode(homedir, fs_encoding)
- else:
- raise HomeDirError('%s is not a writable dir, '
- 'set $HOME environment variable to override' % homedir)
-
-def get_xdg_dir():
- """Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
-
- This is only for non-OS X posix (Linux,Unix,etc.) systems.
- """
-
- env = os.environ
-
- if os.name == 'posix' and sys.platform != 'darwin':
- # Linux, Unix, AIX, etc.
- # use ~/.config if empty OR not set
- xdg = env.get("XDG_CONFIG_HOME", None) or os.path.join(get_home_dir(), '.config')
- if xdg and _writable_dir(xdg):
- return py3compat.cast_unicode(xdg, fs_encoding)
-
- return None
-
-
-def get_xdg_cache_dir():
- """Return the XDG_CACHE_HOME, if it is defined and exists, else None.
-
- This is only for non-OS X posix (Linux,Unix,etc.) systems.
- """
-
- env = os.environ
-
- if os.name == 'posix' and sys.platform != 'darwin':
- # Linux, Unix, AIX, etc.
- # use ~/.cache if empty OR not set
- xdg = env.get("XDG_CACHE_HOME", None) or os.path.join(get_home_dir(), '.cache')
- if xdg and _writable_dir(xdg):
- return py3compat.cast_unicode(xdg, fs_encoding)
-
- return None
-
-
-@undoc
-def get_ipython_dir():
+ if not os.path.isfile(name) and not name.endswith('.py'):
+ name += '.py'
+ if os.path.isfile(name):
+ return name
+ else:
+ raise IOError('File `%r` not found.' % name)
+
+
+def filefind(filename, path_dirs=None):
+ """Find a file by looking through a sequence of paths.
+
+ This iterates through a sequence of paths looking for a file and returns
+ the full, absolute path of the first occurence of the file. If no set of
+ path dirs is given, the filename is tested as is, after running through
+ :func:`expandvars` and :func:`expanduser`. Thus a simple call::
+
+ filefind('myfile.txt')
+
+ will find the file in the current working dir, but::
+
+ filefind('~/myfile.txt')
+
+ Will find the file in the users home directory. This function does not
+ automatically try any paths, such as the cwd or the user's home directory.
+
+ Parameters
+ ----------
+ filename : str
+ The filename to look for.
+ path_dirs : str, None or sequence of str
+ The sequence of paths to look for the file in. If None, the filename
+ need to be absolute or be in the cwd. If a string, the string is
+ put into a sequence and the searched. If a sequence, walk through
+ each element and join with ``filename``, calling :func:`expandvars`
+ and :func:`expanduser` before testing for existence.
+
+ Returns
+ -------
+ Raises :exc:`IOError` or returns absolute path to file.
+ """
+
+ # If paths are quoted, abspath gets confused, strip them...
+ filename = filename.strip('"').strip("'")
+ # If the input is an absolute path, just check it exists
+ if os.path.isabs(filename) and os.path.isfile(filename):
+ return filename
+
+ if path_dirs is None:
+ path_dirs = ("",)
+ elif isinstance(path_dirs, py3compat.string_types):
+ path_dirs = (path_dirs,)
+
+ for path in path_dirs:
+ if path == '.': path = py3compat.getcwd()
+ testname = expand_path(os.path.join(path, filename))
+ if os.path.isfile(testname):
+ return os.path.abspath(testname)
+
+ raise IOError("File %r does not exist in any of the search paths: %r" %
+ (filename, path_dirs) )
+
+
+class HomeDirError(Exception):
+ pass
+
+
+def get_home_dir(require_writable=False):
+ """Return the 'home' directory, as a unicode string.
+
+ Uses os.path.expanduser('~'), and checks for writability.
+
+ See stdlib docs for how this is determined.
+ $HOME is first priority on *ALL* platforms.
+
+ Parameters
+ ----------
+
+ require_writable : bool [default: False]
+ if True:
+ guarantees the return value is a writable directory, otherwise
+ raises HomeDirError
+ if False:
+ The path is resolved, but it is not guaranteed to exist or be writable.
+ """
+
+ homedir = os.path.expanduser('~')
+ # Next line will make things work even when /home/ is a symlink to
+ # /usr/home as it is on FreeBSD, for example
+ homedir = os.path.realpath(homedir)
+
+ if not _writable_dir(homedir) and os.name == 'nt':
+ # expanduser failed, use the registry to get the 'My Documents' folder.
+ try:
+ try:
+ import winreg as wreg # Py 3
+ except ImportError:
+ import _winreg as wreg # Py 2
+ key = wreg.OpenKey(
+ wreg.HKEY_CURRENT_USER,
+ "Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+ )
+ homedir = wreg.QueryValueEx(key,'Personal')[0]
+ key.Close()
+ except:
+ pass
+
+ if (not require_writable) or _writable_dir(homedir):
+ return py3compat.cast_unicode(homedir, fs_encoding)
+ else:
+ raise HomeDirError('%s is not a writable dir, '
+ 'set $HOME environment variable to override' % homedir)
+
+def get_xdg_dir():
+ """Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
+
+ This is only for non-OS X posix (Linux,Unix,etc.) systems.
+ """
+
+ env = os.environ
+
+ if os.name == 'posix' and sys.platform != 'darwin':
+ # Linux, Unix, AIX, etc.
+ # use ~/.config if empty OR not set
+ xdg = env.get("XDG_CONFIG_HOME", None) or os.path.join(get_home_dir(), '.config')
+ if xdg and _writable_dir(xdg):
+ return py3compat.cast_unicode(xdg, fs_encoding)
+
+ return None
+
+
+def get_xdg_cache_dir():
+ """Return the XDG_CACHE_HOME, if it is defined and exists, else None.
+
+ This is only for non-OS X posix (Linux,Unix,etc.) systems.
+ """
+
+ env = os.environ
+
+ if os.name == 'posix' and sys.platform != 'darwin':
+ # Linux, Unix, AIX, etc.
+ # use ~/.cache if empty OR not set
+ xdg = env.get("XDG_CACHE_HOME", None) or os.path.join(get_home_dir(), '.cache')
+ if xdg and _writable_dir(xdg):
+ return py3compat.cast_unicode(xdg, fs_encoding)
+
+ return None
+
+
+@undoc
+def get_ipython_dir():
warn("get_ipython_dir has moved to the IPython.paths module since IPython 4.0.", stacklevel=2)
- from IPython.paths import get_ipython_dir
- return get_ipython_dir()
-
-@undoc
-def get_ipython_cache_dir():
+ from IPython.paths import get_ipython_dir
+ return get_ipython_dir()
+
+@undoc
+def get_ipython_cache_dir():
warn("get_ipython_cache_dir has moved to the IPython.paths module since IPython 4.0.", stacklevel=2)
- from IPython.paths import get_ipython_cache_dir
- return get_ipython_cache_dir()
-
-@undoc
-def get_ipython_package_dir():
+ from IPython.paths import get_ipython_cache_dir
+ return get_ipython_cache_dir()
+
+@undoc
+def get_ipython_package_dir():
warn("get_ipython_package_dir has moved to the IPython.paths module since IPython 4.0.", stacklevel=2)
- from IPython.paths import get_ipython_package_dir
- return get_ipython_package_dir()
-
-@undoc
-def get_ipython_module_path(module_str):
+ from IPython.paths import get_ipython_package_dir
+ return get_ipython_package_dir()
+
+@undoc
+def get_ipython_module_path(module_str):
warn("get_ipython_module_path has moved to the IPython.paths module since IPython 4.0.", stacklevel=2)
- from IPython.paths import get_ipython_module_path
- return get_ipython_module_path(module_str)
-
-@undoc
-def locate_profile(profile='default'):
+ from IPython.paths import get_ipython_module_path
+ return get_ipython_module_path(module_str)
+
+@undoc
+def locate_profile(profile='default'):
warn("locate_profile has moved to the IPython.paths module since IPython 4.0.", stacklevel=2)
- from IPython.paths import locate_profile
- return locate_profile(profile=profile)
-
-def expand_path(s):
- """Expand $VARS and ~names in a string, like a shell
-
- :Examples:
-
- In [2]: os.environ['FOO']='test'
-
- In [3]: expand_path('variable FOO is $FOO')
- Out[3]: 'variable FOO is test'
- """
- # This is a pretty subtle hack. When expand user is given a UNC path
- # on Windows (\\server\share$\%username%), os.path.expandvars, removes
- # the $ to get (\\server\share\%username%). I think it considered $
- # alone an empty var. But, we need the $ to remains there (it indicates
- # a hidden share).
- if os.name=='nt':
- s = s.replace('$\\', 'IPYTHON_TEMP')
- s = os.path.expandvars(os.path.expanduser(s))
- if os.name=='nt':
- s = s.replace('IPYTHON_TEMP', '$\\')
- return s
-
-
-def unescape_glob(string):
- """Unescape glob pattern in `string`."""
- def unescape(s):
- for pattern in '*[]!?':
- s = s.replace(r'\{0}'.format(pattern), pattern)
- return s
- return '\\'.join(map(unescape, string.split('\\\\')))
-
-
-def shellglob(args):
- """
- Do glob expansion for each element in `args` and return a flattened list.
-
- Unmatched glob pattern will remain as-is in the returned list.
-
- """
- expanded = []
- # Do not unescape backslash in Windows as it is interpreted as
- # path separator:
- unescape = unescape_glob if sys.platform != 'win32' else lambda x: x
- for a in args:
- expanded.extend(glob.glob(a) or [unescape(a)])
- return expanded
-
-
-def target_outdated(target,deps):
- """Determine whether a target is out of date.
-
- target_outdated(target,deps) -> 1/0
-
- deps: list of filenames which MUST exist.
- target: single filename which may or may not exist.
-
- If target doesn't exist or is older than any file listed in deps, return
- true, otherwise return false.
- """
- try:
- target_time = os.path.getmtime(target)
- except os.error:
- return 1
- for dep in deps:
- dep_time = os.path.getmtime(dep)
- if dep_time > target_time:
- #print "For target",target,"Dep failed:",dep # dbg
- #print "times (dep,tar):",dep_time,target_time # dbg
- return 1
- return 0
-
-
-def target_update(target,deps,cmd):
- """Update a target with a given command given a list of dependencies.
-
- target_update(target,deps,cmd) -> runs cmd if target is outdated.
-
- This is just a wrapper around target_outdated() which calls the given
- command if target is outdated."""
-
- if target_outdated(target,deps):
- system(cmd)
-
-@undoc
-def filehash(path):
- """Make an MD5 hash of a file, ignoring any differences in line
- ending characters."""
+ from IPython.paths import locate_profile
+ return locate_profile(profile=profile)
+
+def expand_path(s):
+ """Expand $VARS and ~names in a string, like a shell
+
+ :Examples:
+
+ In [2]: os.environ['FOO']='test'
+
+ In [3]: expand_path('variable FOO is $FOO')
+ Out[3]: 'variable FOO is test'
+ """
+ # This is a pretty subtle hack. When expand user is given a UNC path
+ # on Windows (\\server\share$\%username%), os.path.expandvars, removes
+ # the $ to get (\\server\share\%username%). I think it considered $
+ # alone an empty var. But, we need the $ to remains there (it indicates
+ # a hidden share).
+ if os.name=='nt':
+ s = s.replace('$\\', 'IPYTHON_TEMP')
+ s = os.path.expandvars(os.path.expanduser(s))
+ if os.name=='nt':
+ s = s.replace('IPYTHON_TEMP', '$\\')
+ return s
+
+
+def unescape_glob(string):
+ """Unescape glob pattern in `string`."""
+ def unescape(s):
+ for pattern in '*[]!?':
+ s = s.replace(r'\{0}'.format(pattern), pattern)
+ return s
+ return '\\'.join(map(unescape, string.split('\\\\')))
+
+
+def shellglob(args):
+ """
+ Do glob expansion for each element in `args` and return a flattened list.
+
+ Unmatched glob pattern will remain as-is in the returned list.
+
+ """
+ expanded = []
+ # Do not unescape backslash in Windows as it is interpreted as
+ # path separator:
+ unescape = unescape_glob if sys.platform != 'win32' else lambda x: x
+ for a in args:
+ expanded.extend(glob.glob(a) or [unescape(a)])
+ return expanded
+
+
+def target_outdated(target,deps):
+ """Determine whether a target is out of date.
+
+ target_outdated(target,deps) -> 1/0
+
+ deps: list of filenames which MUST exist.
+ target: single filename which may or may not exist.
+
+ If target doesn't exist or is older than any file listed in deps, return
+ true, otherwise return false.
+ """
+ try:
+ target_time = os.path.getmtime(target)
+ except os.error:
+ return 1
+ for dep in deps:
+ dep_time = os.path.getmtime(dep)
+ if dep_time > target_time:
+ #print "For target",target,"Dep failed:",dep # dbg
+ #print "times (dep,tar):",dep_time,target_time # dbg
+ return 1
+ return 0
+
+
+def target_update(target,deps,cmd):
+ """Update a target with a given command given a list of dependencies.
+
+ target_update(target,deps,cmd) -> runs cmd if target is outdated.
+
+ This is just a wrapper around target_outdated() which calls the given
+ command if target is outdated."""
+
+ if target_outdated(target,deps):
+ system(cmd)
+
+@undoc
+def filehash(path):
+ """Make an MD5 hash of a file, ignoring any differences in line
+ ending characters."""
warn("filehash() is deprecated since IPython 4.0", DeprecationWarning, stacklevel=2)
- with open(path, "rU") as f:
- return md5(py3compat.str_to_bytes(f.read())).hexdigest()
-
-ENOLINK = 1998
-
-def link(src, dst):
- """Hard links ``src`` to ``dst``, returning 0 or errno.
-
- Note that the special errno ``ENOLINK`` will be returned if ``os.link`` isn't
- supported by the operating system.
- """
-
- if not hasattr(os, "link"):
- return ENOLINK
- link_errno = 0
- try:
- os.link(src, dst)
- except OSError as e:
- link_errno = e.errno
- return link_errno
-
-
-def link_or_copy(src, dst):
- """Attempts to hardlink ``src`` to ``dst``, copying if the link fails.
-
- Attempts to maintain the semantics of ``shutil.copy``.
-
- Because ``os.link`` does not overwrite files, a unique temporary file
- will be used if the target already exists, then that file will be moved
- into place.
- """
-
- if os.path.isdir(dst):
- dst = os.path.join(dst, os.path.basename(src))
-
- link_errno = link(src, dst)
- if link_errno == errno.EEXIST:
- if os.stat(src).st_ino == os.stat(dst).st_ino:
- # dst is already a hard link to the correct file, so we don't need
- # to do anything else. If we try to link and rename the file
- # anyway, we get duplicate files - see http://bugs.python.org/issue21876
- return
-
- new_dst = dst + "-temp-%04X" %(random.randint(1, 16**4), )
- try:
- link_or_copy(src, new_dst)
- except:
- try:
- os.remove(new_dst)
- except OSError:
- pass
- raise
- os.rename(new_dst, dst)
- elif link_errno != 0:
- # Either link isn't supported, or the filesystem doesn't support
- # linking, or 'src' and 'dst' are on different filesystems.
- shutil.copy(src, dst)
-
-def ensure_dir_exists(path, mode=0o755):
- """ensure that a directory exists
-
- If it doesn't exist, try to create it and protect against a race condition
- if another process is doing the same.
-
- The default permissions are 755, which differ from os.makedirs default of 777.
- """
- if not os.path.exists(path):
- try:
- os.makedirs(path, mode=mode)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
- elif not os.path.isdir(path):
- raise IOError("%r exists but is not a directory" % path)
+ with open(path, "rU") as f:
+ return md5(py3compat.str_to_bytes(f.read())).hexdigest()
+
+ENOLINK = 1998
+
+def link(src, dst):
+ """Hard links ``src`` to ``dst``, returning 0 or errno.
+
+ Note that the special errno ``ENOLINK`` will be returned if ``os.link`` isn't
+ supported by the operating system.
+ """
+
+ if not hasattr(os, "link"):
+ return ENOLINK
+ link_errno = 0
+ try:
+ os.link(src, dst)
+ except OSError as e:
+ link_errno = e.errno
+ return link_errno
+
+
+def link_or_copy(src, dst):
+ """Attempts to hardlink ``src`` to ``dst``, copying if the link fails.
+
+ Attempts to maintain the semantics of ``shutil.copy``.
+
+ Because ``os.link`` does not overwrite files, a unique temporary file
+ will be used if the target already exists, then that file will be moved
+ into place.
+ """
+
+ if os.path.isdir(dst):
+ dst = os.path.join(dst, os.path.basename(src))
+
+ link_errno = link(src, dst)
+ if link_errno == errno.EEXIST:
+ if os.stat(src).st_ino == os.stat(dst).st_ino:
+ # dst is already a hard link to the correct file, so we don't need
+ # to do anything else. If we try to link and rename the file
+ # anyway, we get duplicate files - see http://bugs.python.org/issue21876
+ return
+
+ new_dst = dst + "-temp-%04X" %(random.randint(1, 16**4), )
+ try:
+ link_or_copy(src, new_dst)
+ except:
+ try:
+ os.remove(new_dst)
+ except OSError:
+ pass
+ raise
+ os.rename(new_dst, dst)
+ elif link_errno != 0:
+ # Either link isn't supported, or the filesystem doesn't support
+ # linking, or 'src' and 'dst' are on different filesystems.
+ shutil.copy(src, dst)
+
+def ensure_dir_exists(path, mode=0o755):
+ """ensure that a directory exists
+
+ If it doesn't exist, try to create it and protect against a race condition
+ if another process is doing the same.
+
+ The default permissions are 755, which differ from os.makedirs default of 777.
+ """
+ if not os.path.exists(path):
+ try:
+ os.makedirs(path, mode=mode)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ elif not os.path.isdir(path):
+ raise IOError("%r exists but is not a directory" % path)
diff --git a/contrib/python/ipython/py2/IPython/utils/pickleutil.py b/contrib/python/ipython/py2/IPython/utils/pickleutil.py
index 9111ad73c0..665ff09f2d 100644
--- a/contrib/python/ipython/py2/IPython/utils/pickleutil.py
+++ b/contrib/python/ipython/py2/IPython/utils/pickleutil.py
@@ -1,5 +1,5 @@
-from warnings import warn
-
-warn("IPython.utils.pickleutil has moved to ipykernel.pickleutil")
-
-from ipykernel.pickleutil import *
+from warnings import warn
+
+warn("IPython.utils.pickleutil has moved to ipykernel.pickleutil")
+
+from ipykernel.pickleutil import *
diff --git a/contrib/python/ipython/py2/IPython/utils/process.py b/contrib/python/ipython/py2/IPython/utils/process.py
index ca85a03b5c..a274f43f3a 100644
--- a/contrib/python/ipython/py2/IPython/utils/process.py
+++ b/contrib/python/ipython/py2/IPython/utils/process.py
@@ -1,106 +1,106 @@
-# encoding: utf-8
-"""
-Utilities for working with external processes.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
-import os
-import sys
-
-if sys.platform == 'win32':
- from ._process_win32 import system, getoutput, arg_split, check_pid
-elif sys.platform == 'cli':
- from ._process_cli import system, getoutput, arg_split, check_pid
-else:
- from ._process_posix import system, getoutput, arg_split, check_pid
-
-from ._process_common import getoutputerror, get_output_error_code, process_handler
-from . import py3compat
-
-
-class FindCmdError(Exception):
- pass
-
-
-def find_cmd(cmd):
- """Find absolute path to executable cmd in a cross platform manner.
-
- This function tries to determine the full path to a command line program
- using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the
- time it will use the version that is first on the users `PATH`.
-
- Warning, don't use this to find IPython command line programs as there
- is a risk you will find the wrong one. Instead find those using the
- following code and looking for the application itself::
-
- from IPython.utils.path import get_ipython_module_path
- from IPython.utils.process import pycmd2argv
- argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp'))
-
- Parameters
- ----------
- cmd : str
- The command line program to look for.
- """
- path = py3compat.which(cmd)
- if path is None:
- raise FindCmdError('command could not be found: %s' % cmd)
- return path
-
-
-def is_cmd_found(cmd):
- """Check whether executable `cmd` exists or not and return a bool."""
- try:
- find_cmd(cmd)
- return True
- except FindCmdError:
- return False
-
-
-def pycmd2argv(cmd):
- r"""Take the path of a python command and return a list (argv-style).
-
- This only works on Python based command line programs and will find the
- location of the ``python`` executable using ``sys.executable`` to make
- sure the right version is used.
-
- For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe,
- .com or .bat, and [, cmd] otherwise.
-
- Parameters
- ----------
- cmd : string
- The path of the command.
-
- Returns
- -------
- argv-style list.
- """
- ext = os.path.splitext(cmd)[1]
- if ext in ['.exe', '.com', '.bat']:
- return [cmd]
- else:
- return [sys.executable, cmd]
-
-
-def abbrev_cwd():
- """ Return abbreviated version of cwd, e.g. d:mydir """
- cwd = py3compat.getcwd().replace('\\','/')
- drivepart = ''
- tail = cwd
- if sys.platform == 'win32':
- if len(cwd) < 4:
- return cwd
- drivepart,tail = os.path.splitdrive(cwd)
-
-
- parts = tail.split('/')
- if len(parts) > 2:
- tail = '/'.join(parts[-2:])
-
- return (drivepart + (
- cwd == '/' and '/' or tail))
+# encoding: utf-8
+"""
+Utilities for working with external processes.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ from ._process_win32 import system, getoutput, arg_split, check_pid
+elif sys.platform == 'cli':
+ from ._process_cli import system, getoutput, arg_split, check_pid
+else:
+ from ._process_posix import system, getoutput, arg_split, check_pid
+
+from ._process_common import getoutputerror, get_output_error_code, process_handler
+from . import py3compat
+
+
+class FindCmdError(Exception):
+ pass
+
+
+def find_cmd(cmd):
+ """Find absolute path to executable cmd in a cross platform manner.
+
+ This function tries to determine the full path to a command line program
+ using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the
+ time it will use the version that is first on the users `PATH`.
+
+ Warning, don't use this to find IPython command line programs as there
+ is a risk you will find the wrong one. Instead find those using the
+ following code and looking for the application itself::
+
+ from IPython.utils.path import get_ipython_module_path
+ from IPython.utils.process import pycmd2argv
+ argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp'))
+
+ Parameters
+ ----------
+ cmd : str
+ The command line program to look for.
+ """
+ path = py3compat.which(cmd)
+ if path is None:
+ raise FindCmdError('command could not be found: %s' % cmd)
+ return path
+
+
+def is_cmd_found(cmd):
+ """Check whether executable `cmd` exists or not and return a bool."""
+ try:
+ find_cmd(cmd)
+ return True
+ except FindCmdError:
+ return False
+
+
+def pycmd2argv(cmd):
+ r"""Take the path of a python command and return a list (argv-style).
+
+ This only works on Python based command line programs and will find the
+ location of the ``python`` executable using ``sys.executable`` to make
+ sure the right version is used.
+
+ For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe,
+ .com or .bat, and [, cmd] otherwise.
+
+ Parameters
+ ----------
+ cmd : string
+ The path of the command.
+
+ Returns
+ -------
+ argv-style list.
+ """
+ ext = os.path.splitext(cmd)[1]
+ if ext in ['.exe', '.com', '.bat']:
+ return [cmd]
+ else:
+ return [sys.executable, cmd]
+
+
+def abbrev_cwd():
+ """ Return abbreviated version of cwd, e.g. d:mydir """
+ cwd = py3compat.getcwd().replace('\\','/')
+ drivepart = ''
+ tail = cwd
+ if sys.platform == 'win32':
+ if len(cwd) < 4:
+ return cwd
+ drivepart,tail = os.path.splitdrive(cwd)
+
+
+ parts = tail.split('/')
+ if len(parts) > 2:
+ tail = '/'.join(parts[-2:])
+
+ return (drivepart + (
+ cwd == '/' and '/' or tail))
diff --git a/contrib/python/ipython/py2/IPython/utils/py3compat.py b/contrib/python/ipython/py2/IPython/utils/py3compat.py
index adaac362a7..88602e5342 100644
--- a/contrib/python/ipython/py2/IPython/utils/py3compat.py
+++ b/contrib/python/ipython/py2/IPython/utils/py3compat.py
@@ -1,336 +1,336 @@
-# coding: utf-8
-"""Compatibility tricks for Python 3. Mainly to do with unicode."""
-import functools
-import os
-import sys
-import re
-import shutil
-import types
+# coding: utf-8
+"""Compatibility tricks for Python 3. Mainly to do with unicode."""
+import functools
+import os
+import sys
+import re
+import shutil
+import types
import platform
-
-from .encoding import DEFAULT_ENCODING
-
-def no_code(x, encoding=None):
- return x
-
-def decode(s, encoding=None):
- encoding = encoding or DEFAULT_ENCODING
- return s.decode(encoding, "replace")
-
-def encode(u, encoding=None):
- encoding = encoding or DEFAULT_ENCODING
- return u.encode(encoding, "replace")
-
-
-def cast_unicode(s, encoding=None):
- if isinstance(s, bytes):
- return decode(s, encoding)
- return s
-
-def cast_bytes(s, encoding=None):
- if not isinstance(s, bytes):
- return encode(s, encoding)
- return s
-
-def buffer_to_bytes(buf):
- """Cast a buffer object to bytes"""
- if not isinstance(buf, bytes):
- buf = bytes(buf)
- return buf
-
-def _modify_str_or_docstring(str_change_func):
- @functools.wraps(str_change_func)
- def wrapper(func_or_str):
- if isinstance(func_or_str, string_types):
- func = None
- doc = func_or_str
- else:
- func = func_or_str
- doc = func.__doc__
-
- # PYTHONOPTIMIZE=2 strips docstrings, so they can disappear unexpectedly
- if doc is not None:
- doc = str_change_func(doc)
-
- if func:
- func.__doc__ = doc
- return func
- return doc
- return wrapper
-
-def safe_unicode(e):
- """unicode(e) with various fallbacks. Used for exceptions, which may not be
- safe to call unicode() on.
- """
- try:
- return unicode_type(e)
- except UnicodeError:
- pass
-
- try:
- return str_to_unicode(str(e))
- except UnicodeError:
- pass
-
- try:
- return str_to_unicode(repr(e))
- except UnicodeError:
- pass
-
- return u'Unrecoverably corrupt evalue'
-
-# shutil.which from Python 3.4
-def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None):
- """Given a command, mode, and a PATH string, return the path which
- conforms to the given mode on the PATH, or None if there is no such
- file.
-
- `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
- of os.environ.get("PATH"), or can be overridden with a custom search
- path.
-
- This is a backport of shutil.which from Python 3.4
- """
- # Check that a given file can be accessed with the correct mode.
- # Additionally check that `file` is not a directory, as on Windows
- # directories pass the os.access check.
- def _access_check(fn, mode):
- return (os.path.exists(fn) and os.access(fn, mode)
- and not os.path.isdir(fn))
-
- # If we're given a path with a directory part, look it up directly rather
- # than referring to PATH directories. This includes checking relative to the
- # current directory, e.g. ./script
- if os.path.dirname(cmd):
- if _access_check(cmd, mode):
- return cmd
- return None
-
- if path is None:
- path = os.environ.get("PATH", os.defpath)
- if not path:
- return None
- path = path.split(os.pathsep)
-
- if sys.platform == "win32":
- # The current directory takes precedence on Windows.
- if not os.curdir in path:
- path.insert(0, os.curdir)
-
- # PATHEXT is necessary to check on Windows.
- pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
- # See if the given file matches any of the expected path extensions.
- # This will allow us to short circuit when given "python.exe".
- # If it does match, only test that one, otherwise we have to try
- # others.
- if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
- files = [cmd]
- else:
- files = [cmd + ext for ext in pathext]
- else:
- # On other platforms you don't have things like PATHEXT to tell you
- # what file suffixes are executable, so just pass on cmd as-is.
- files = [cmd]
-
- seen = set()
- for dir in path:
- normdir = os.path.normcase(dir)
- if not normdir in seen:
- seen.add(normdir)
- for thefile in files:
- name = os.path.join(dir, thefile)
- if _access_check(name, mode):
- return name
- return None
-
-if sys.version_info[0] >= 3:
- PY3 = True
-
- # keep reference to builtin_mod because the kernel overrides that value
- # to forward requests to a frontend.
- def input(prompt=''):
- return builtin_mod.input(prompt)
-
- builtin_mod_name = "builtins"
- import builtins as builtin_mod
-
- str_to_unicode = no_code
- unicode_to_str = no_code
- str_to_bytes = encode
- bytes_to_str = decode
- cast_bytes_py2 = no_code
- cast_unicode_py2 = no_code
- buffer_to_bytes_py2 = no_code
-
- string_types = (str,)
- unicode_type = str
-
- which = shutil.which
-
- def isidentifier(s, dotted=False):
- if dotted:
- return all(isidentifier(a) for a in s.split("."))
- return s.isidentifier()
-
- xrange = range
- def iteritems(d): return iter(d.items())
- def itervalues(d): return iter(d.values())
- getcwd = os.getcwd
-
- MethodType = types.MethodType
-
- def execfile(fname, glob, loc=None, compiler=None):
- loc = loc if (loc is not None) else glob
- with open(fname, 'rb') as f:
- compiler = compiler or compile
- exec(compiler(f.read(), fname, 'exec'), glob, loc)
-
- # Refactor print statements in doctests.
- _print_statement_re = re.compile(r"\bprint (?P<expr>.*)$", re.MULTILINE)
- def _print_statement_sub(match):
- expr = match.groups('expr')
- return "print(%s)" % expr
-
- @_modify_str_or_docstring
- def doctest_refactor_print(doc):
- """Refactor 'print x' statements in a doctest to print(x) style. 2to3
- unfortunately doesn't pick up on our doctests.
-
- Can accept a string or a function, so it can be used as a decorator."""
- return _print_statement_re.sub(_print_statement_sub, doc)
-
- # Abstract u'abc' syntax:
- @_modify_str_or_docstring
- def u_format(s):
- """"{u}'abc'" --> "'abc'" (Python 3)
-
- Accepts a string or a function, so it can be used as a decorator."""
- return s.format(u='')
-
- def get_closure(f):
- """Get a function's closure attribute"""
- return f.__closure__
-
-else:
- PY3 = False
-
- # keep reference to builtin_mod because the kernel overrides that value
- # to forward requests to a frontend.
- def input(prompt=''):
- return builtin_mod.raw_input(prompt)
-
- builtin_mod_name = "__builtin__"
- import __builtin__ as builtin_mod
-
- str_to_unicode = decode
- unicode_to_str = encode
- str_to_bytes = no_code
- bytes_to_str = no_code
- cast_bytes_py2 = cast_bytes
- cast_unicode_py2 = cast_unicode
- buffer_to_bytes_py2 = buffer_to_bytes
-
- string_types = (str, unicode)
- unicode_type = unicode
-
- import re
- _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
- def isidentifier(s, dotted=False):
- if dotted:
- return all(isidentifier(a) for a in s.split("."))
- return bool(_name_re.match(s))
-
- xrange = xrange
- def iteritems(d): return d.iteritems()
- def itervalues(d): return d.itervalues()
- getcwd = os.getcwdu
-
- def MethodType(func, instance):
- return types.MethodType(func, instance, type(instance))
-
- def doctest_refactor_print(func_or_str):
- return func_or_str
-
- def get_closure(f):
- """Get a function's closure attribute"""
- return f.func_closure
-
- which = _shutil_which
-
- # Abstract u'abc' syntax:
- @_modify_str_or_docstring
- def u_format(s):
- """"{u}'abc'" --> "u'abc'" (Python 2)
-
- Accepts a string or a function, so it can be used as a decorator."""
- return s.format(u='u')
-
- if sys.platform == 'win32':
- def execfile(fname, glob=None, loc=None, compiler=None):
- loc = loc if (loc is not None) else glob
- scripttext = builtin_mod.open(fname).read()+ '\n'
- # compile converts unicode filename to str assuming
- # ascii. Let's do the conversion before calling compile
- if isinstance(fname, unicode):
- filename = unicode_to_str(fname)
- else:
- filename = fname
- compiler = compiler or compile
- exec(compiler(scripttext, filename, 'exec'), glob, loc)
-
- else:
- def execfile(fname, glob=None, loc=None, compiler=None):
- if isinstance(fname, unicode):
- filename = fname.encode(sys.getfilesystemencoding())
- else:
- filename = fname
- where = [ns for ns in [glob, loc] if ns is not None]
- if compiler is None:
- builtin_mod.execfile(filename, *where)
- else:
- scripttext = builtin_mod.open(fname).read().rstrip() + '\n'
- exec(compiler(scripttext, filename, 'exec'), glob, loc)
-
-
-PY2 = not PY3
+
+from .encoding import DEFAULT_ENCODING
+
+def no_code(x, encoding=None):
+ return x
+
+def decode(s, encoding=None):
+ encoding = encoding or DEFAULT_ENCODING
+ return s.decode(encoding, "replace")
+
+def encode(u, encoding=None):
+ encoding = encoding or DEFAULT_ENCODING
+ return u.encode(encoding, "replace")
+
+
+def cast_unicode(s, encoding=None):
+ if isinstance(s, bytes):
+ return decode(s, encoding)
+ return s
+
+def cast_bytes(s, encoding=None):
+ if not isinstance(s, bytes):
+ return encode(s, encoding)
+ return s
+
+def buffer_to_bytes(buf):
+ """Cast a buffer object to bytes"""
+ if not isinstance(buf, bytes):
+ buf = bytes(buf)
+ return buf
+
+def _modify_str_or_docstring(str_change_func):
+ @functools.wraps(str_change_func)
+ def wrapper(func_or_str):
+ if isinstance(func_or_str, string_types):
+ func = None
+ doc = func_or_str
+ else:
+ func = func_or_str
+ doc = func.__doc__
+
+ # PYTHONOPTIMIZE=2 strips docstrings, so they can disappear unexpectedly
+ if doc is not None:
+ doc = str_change_func(doc)
+
+ if func:
+ func.__doc__ = doc
+ return func
+ return doc
+ return wrapper
+
+def safe_unicode(e):
+ """unicode(e) with various fallbacks. Used for exceptions, which may not be
+ safe to call unicode() on.
+ """
+ try:
+ return unicode_type(e)
+ except UnicodeError:
+ pass
+
+ try:
+ return str_to_unicode(str(e))
+ except UnicodeError:
+ pass
+
+ try:
+ return str_to_unicode(repr(e))
+ except UnicodeError:
+ pass
+
+ return u'Unrecoverably corrupt evalue'
+
+# shutil.which from Python 3.4
+def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None):
+ """Given a command, mode, and a PATH string, return the path which
+ conforms to the given mode on the PATH, or None if there is no such
+ file.
+
+ `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
+ of os.environ.get("PATH"), or can be overridden with a custom search
+ path.
+
+ This is a backport of shutil.which from Python 3.4
+ """
+ # Check that a given file can be accessed with the correct mode.
+ # Additionally check that `file` is not a directory, as on Windows
+ # directories pass the os.access check.
+ def _access_check(fn, mode):
+ return (os.path.exists(fn) and os.access(fn, mode)
+ and not os.path.isdir(fn))
+
+ # If we're given a path with a directory part, look it up directly rather
+ # than referring to PATH directories. This includes checking relative to the
+ # current directory, e.g. ./script
+ if os.path.dirname(cmd):
+ if _access_check(cmd, mode):
+ return cmd
+ return None
+
+ if path is None:
+ path = os.environ.get("PATH", os.defpath)
+ if not path:
+ return None
+ path = path.split(os.pathsep)
+
+ if sys.platform == "win32":
+ # The current directory takes precedence on Windows.
+ if not os.curdir in path:
+ path.insert(0, os.curdir)
+
+ # PATHEXT is necessary to check on Windows.
+ pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
+ # See if the given file matches any of the expected path extensions.
+ # This will allow us to short circuit when given "python.exe".
+ # If it does match, only test that one, otherwise we have to try
+ # others.
+ if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
+ files = [cmd]
+ else:
+ files = [cmd + ext for ext in pathext]
+ else:
+ # On other platforms you don't have things like PATHEXT to tell you
+ # what file suffixes are executable, so just pass on cmd as-is.
+ files = [cmd]
+
+ seen = set()
+ for dir in path:
+ normdir = os.path.normcase(dir)
+ if not normdir in seen:
+ seen.add(normdir)
+ for thefile in files:
+ name = os.path.join(dir, thefile)
+ if _access_check(name, mode):
+ return name
+ return None
+
+if sys.version_info[0] >= 3:
+ PY3 = True
+
+ # keep reference to builtin_mod because the kernel overrides that value
+ # to forward requests to a frontend.
+ def input(prompt=''):
+ return builtin_mod.input(prompt)
+
+ builtin_mod_name = "builtins"
+ import builtins as builtin_mod
+
+ str_to_unicode = no_code
+ unicode_to_str = no_code
+ str_to_bytes = encode
+ bytes_to_str = decode
+ cast_bytes_py2 = no_code
+ cast_unicode_py2 = no_code
+ buffer_to_bytes_py2 = no_code
+
+ string_types = (str,)
+ unicode_type = str
+
+ which = shutil.which
+
+ def isidentifier(s, dotted=False):
+ if dotted:
+ return all(isidentifier(a) for a in s.split("."))
+ return s.isidentifier()
+
+ xrange = range
+ def iteritems(d): return iter(d.items())
+ def itervalues(d): return iter(d.values())
+ getcwd = os.getcwd
+
+ MethodType = types.MethodType
+
+ def execfile(fname, glob, loc=None, compiler=None):
+ loc = loc if (loc is not None) else glob
+ with open(fname, 'rb') as f:
+ compiler = compiler or compile
+ exec(compiler(f.read(), fname, 'exec'), glob, loc)
+
+ # Refactor print statements in doctests.
+ _print_statement_re = re.compile(r"\bprint (?P<expr>.*)$", re.MULTILINE)
+ def _print_statement_sub(match):
+ expr = match.groups('expr')
+ return "print(%s)" % expr
+
+ @_modify_str_or_docstring
+ def doctest_refactor_print(doc):
+ """Refactor 'print x' statements in a doctest to print(x) style. 2to3
+ unfortunately doesn't pick up on our doctests.
+
+ Can accept a string or a function, so it can be used as a decorator."""
+ return _print_statement_re.sub(_print_statement_sub, doc)
+
+ # Abstract u'abc' syntax:
+ @_modify_str_or_docstring
+ def u_format(s):
+ """"{u}'abc'" --> "'abc'" (Python 3)
+
+ Accepts a string or a function, so it can be used as a decorator."""
+ return s.format(u='')
+
+ def get_closure(f):
+ """Get a function's closure attribute"""
+ return f.__closure__
+
+else:
+ PY3 = False
+
+ # keep reference to builtin_mod because the kernel overrides that value
+ # to forward requests to a frontend.
+ def input(prompt=''):
+ return builtin_mod.raw_input(prompt)
+
+ builtin_mod_name = "__builtin__"
+ import __builtin__ as builtin_mod
+
+ str_to_unicode = decode
+ unicode_to_str = encode
+ str_to_bytes = no_code
+ bytes_to_str = no_code
+ cast_bytes_py2 = cast_bytes
+ cast_unicode_py2 = cast_unicode
+ buffer_to_bytes_py2 = buffer_to_bytes
+
+ string_types = (str, unicode)
+ unicode_type = unicode
+
+ import re
+ _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
+ def isidentifier(s, dotted=False):
+ if dotted:
+ return all(isidentifier(a) for a in s.split("."))
+ return bool(_name_re.match(s))
+
+ xrange = xrange
+ def iteritems(d): return d.iteritems()
+ def itervalues(d): return d.itervalues()
+ getcwd = os.getcwdu
+
+ def MethodType(func, instance):
+ return types.MethodType(func, instance, type(instance))
+
+ def doctest_refactor_print(func_or_str):
+ return func_or_str
+
+ def get_closure(f):
+ """Get a function's closure attribute"""
+ return f.func_closure
+
+ which = _shutil_which
+
+ # Abstract u'abc' syntax:
+ @_modify_str_or_docstring
+ def u_format(s):
+ """"{u}'abc'" --> "u'abc'" (Python 2)
+
+ Accepts a string or a function, so it can be used as a decorator."""
+ return s.format(u='u')
+
+ if sys.platform == 'win32':
+ def execfile(fname, glob=None, loc=None, compiler=None):
+ loc = loc if (loc is not None) else glob
+ scripttext = builtin_mod.open(fname).read()+ '\n'
+ # compile converts unicode filename to str assuming
+ # ascii. Let's do the conversion before calling compile
+ if isinstance(fname, unicode):
+ filename = unicode_to_str(fname)
+ else:
+ filename = fname
+ compiler = compiler or compile
+ exec(compiler(scripttext, filename, 'exec'), glob, loc)
+
+ else:
+ def execfile(fname, glob=None, loc=None, compiler=None):
+ if isinstance(fname, unicode):
+ filename = fname.encode(sys.getfilesystemencoding())
+ else:
+ filename = fname
+ where = [ns for ns in [glob, loc] if ns is not None]
+ if compiler is None:
+ builtin_mod.execfile(filename, *where)
+ else:
+ scripttext = builtin_mod.open(fname).read().rstrip() + '\n'
+ exec(compiler(scripttext, filename, 'exec'), glob, loc)
+
+
+PY2 = not PY3
PYPY = platform.python_implementation() == "PyPy"
-
-
-def annotate(**kwargs):
- """Python 3 compatible function annotation for Python 2."""
- if not kwargs:
- raise ValueError('annotations must be provided as keyword arguments')
- def dec(f):
- if hasattr(f, '__annotations__'):
- for k, v in kwargs.items():
- f.__annotations__[k] = v
- else:
- f.__annotations__ = kwargs
- return f
- return dec
-
-
-# Parts below taken from six:
-# Copyright (c) 2010-2013 Benjamin Peterson
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- return meta("_NewBase", bases, {})
+
+
+def annotate(**kwargs):
+ """Python 3 compatible function annotation for Python 2."""
+ if not kwargs:
+ raise ValueError('annotations must be provided as keyword arguments')
+ def dec(f):
+ if hasattr(f, '__annotations__'):
+ for k, v in kwargs.items():
+ f.__annotations__[k] = v
+ else:
+ f.__annotations__ = kwargs
+ return f
+ return dec
+
+
+# Parts below taken from six:
+# Copyright (c) 2010-2013 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ return meta("_NewBase", bases, {})
diff --git a/contrib/python/ipython/py2/IPython/utils/rlineimpl.py b/contrib/python/ipython/py2/IPython/utils/rlineimpl.py
index f8c4e84334..e1cf03942c 100644
--- a/contrib/python/ipython/py2/IPython/utils/rlineimpl.py
+++ b/contrib/python/ipython/py2/IPython/utils/rlineimpl.py
@@ -1,74 +1,74 @@
-# -*- coding: utf-8 -*-
-""" Imports and provides the 'correct' version of readline for the platform.
-
-Readline is used throughout IPython as::
-
- import IPython.utils.rlineimpl as readline
-
-In addition to normal readline stuff, this module provides have_readline
-boolean and _outputfile variable used in IPython.utils.
-"""
-
-import sys
-import warnings
-
-_rlmod_names = ['gnureadline', 'readline']
-
-have_readline = False
-for _rlmod_name in _rlmod_names:
- try:
- # import readline as _rl
- _rl = __import__(_rlmod_name)
- # from readline import *
- globals().update({k:v for k,v in _rl.__dict__.items() if not k.startswith('_')})
- except ImportError:
- pass
- else:
- have_readline = True
- break
-
-if have_readline and (sys.platform == 'win32' or sys.platform == 'cli'):
- try:
- _outputfile=_rl.GetOutputFile()
- except AttributeError:
- warnings.warn("Failed GetOutputFile")
- have_readline = False
-
-# Test to see if libedit is being used instead of GNU readline.
-# Thanks to Boyd Waters for the original patch.
-uses_libedit = False
-
-if have_readline:
- # Official Python docs state that 'libedit' is in the docstring for libedit readline:
- uses_libedit = _rl.__doc__ and 'libedit' in _rl.__doc__
- # Note that many non-System Pythons also do not use proper readline,
- # but do not report libedit at all, nor are they linked dynamically against libedit.
- # known culprits of this include: EPD, Fink
- # There is not much we can do to detect this, until we find a specific failure
- # case, rather than relying on the readline module to self-identify as broken.
-
-if uses_libedit and sys.platform == 'darwin':
- _rl.parse_and_bind("bind ^I rl_complete")
- warnings.warn('\n'.join(['', "*"*78,
- "libedit detected - readline will not be well behaved, including but not limited to:",
- " * crashes on tab completion",
- " * incorrect history navigation",
- " * corrupting long-lines",
- " * failure to wrap or indent lines properly",
- "It is highly recommended that you install gnureadline, which is installable with:",
- " pip install gnureadline",
- "*"*78]),
- RuntimeWarning)
-
-# the clear_history() function was only introduced in Python 2.4 and is
-# actually optional in the readline API, so we must explicitly check for its
-# existence. Some known platforms actually don't have it. This thread:
-# http://mail.python.org/pipermail/python-dev/2003-August/037845.html
-# has the original discussion.
-
-if have_readline:
- try:
- _rl.clear_history
- except AttributeError:
- def clear_history(): pass
- _rl.clear_history = clear_history
+# -*- coding: utf-8 -*-
+""" Imports and provides the 'correct' version of readline for the platform.
+
+Readline is used throughout IPython as::
+
+ import IPython.utils.rlineimpl as readline
+
+In addition to normal readline stuff, this module provides have_readline
+boolean and _outputfile variable used in IPython.utils.
+"""
+
+import sys
+import warnings
+
+_rlmod_names = ['gnureadline', 'readline']
+
+have_readline = False
+for _rlmod_name in _rlmod_names:
+ try:
+ # import readline as _rl
+ _rl = __import__(_rlmod_name)
+ # from readline import *
+ globals().update({k:v for k,v in _rl.__dict__.items() if not k.startswith('_')})
+ except ImportError:
+ pass
+ else:
+ have_readline = True
+ break
+
+if have_readline and (sys.platform == 'win32' or sys.platform == 'cli'):
+ try:
+ _outputfile=_rl.GetOutputFile()
+ except AttributeError:
+ warnings.warn("Failed GetOutputFile")
+ have_readline = False
+
+# Test to see if libedit is being used instead of GNU readline.
+# Thanks to Boyd Waters for the original patch.
+uses_libedit = False
+
+if have_readline:
+ # Official Python docs state that 'libedit' is in the docstring for libedit readline:
+ uses_libedit = _rl.__doc__ and 'libedit' in _rl.__doc__
+ # Note that many non-System Pythons also do not use proper readline,
+ # but do not report libedit at all, nor are they linked dynamically against libedit.
+ # known culprits of this include: EPD, Fink
+ # There is not much we can do to detect this, until we find a specific failure
+ # case, rather than relying on the readline module to self-identify as broken.
+
+if uses_libedit and sys.platform == 'darwin':
+ _rl.parse_and_bind("bind ^I rl_complete")
+ warnings.warn('\n'.join(['', "*"*78,
+ "libedit detected - readline will not be well behaved, including but not limited to:",
+ " * crashes on tab completion",
+ " * incorrect history navigation",
+ " * corrupting long-lines",
+ " * failure to wrap or indent lines properly",
+ "It is highly recommended that you install gnureadline, which is installable with:",
+ " pip install gnureadline",
+ "*"*78]),
+ RuntimeWarning)
+
+# the clear_history() function was only introduced in Python 2.4 and is
+# actually optional in the readline API, so we must explicitly check for its
+# existence. Some known platforms actually don't have it. This thread:
+# http://mail.python.org/pipermail/python-dev/2003-August/037845.html
+# has the original discussion.
+
+if have_readline:
+ try:
+ _rl.clear_history
+ except AttributeError:
+ def clear_history(): pass
+ _rl.clear_history = clear_history
diff --git a/contrib/python/ipython/py2/IPython/utils/sentinel.py b/contrib/python/ipython/py2/IPython/utils/sentinel.py
index 7af2558c1a..dc57a2591c 100644
--- a/contrib/python/ipython/py2/IPython/utils/sentinel.py
+++ b/contrib/python/ipython/py2/IPython/utils/sentinel.py
@@ -1,17 +1,17 @@
-"""Sentinel class for constants with useful reprs"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-class Sentinel(object):
-
- def __init__(self, name, module, docstring=None):
- self.name = name
- self.module = module
- if docstring:
- self.__doc__ = docstring
-
-
- def __repr__(self):
- return str(self.module)+'.'+self.name
-
+"""Sentinel class for constants with useful reprs"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+class Sentinel(object):
+
+ def __init__(self, name, module, docstring=None):
+ self.name = name
+ self.module = module
+ if docstring:
+ self.__doc__ = docstring
+
+
+ def __repr__(self):
+ return str(self.module)+'.'+self.name
+
diff --git a/contrib/python/ipython/py2/IPython/utils/shimmodule.py b/contrib/python/ipython/py2/IPython/utils/shimmodule.py
index c2cf6c6de7..8b74f5011a 100644
--- a/contrib/python/ipython/py2/IPython/utils/shimmodule.py
+++ b/contrib/python/ipython/py2/IPython/utils/shimmodule.py
@@ -1,92 +1,92 @@
-"""A shim module for deprecated imports
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-import types
-
-from .importstring import import_item
-
-class ShimWarning(Warning):
- """A warning to show when a module has moved, and a shim is in its place."""
-
-class ShimImporter(object):
- """Import hook for a shim.
-
- This ensures that submodule imports return the real target module,
- not a clone that will confuse `is` and `isinstance` checks.
- """
- def __init__(self, src, mirror):
- self.src = src
- self.mirror = mirror
-
- def _mirror_name(self, fullname):
- """get the name of the mirrored module"""
-
- return self.mirror + fullname[len(self.src):]
-
- def find_module(self, fullname, path=None):
- """Return self if we should be used to import the module."""
- if fullname.startswith(self.src + '.'):
- mirror_name = self._mirror_name(fullname)
- try:
- mod = import_item(mirror_name)
- except ImportError:
- return
- else:
- if not isinstance(mod, types.ModuleType):
- # not a module
- return None
- return self
-
- def load_module(self, fullname):
- """Import the mirrored module, and insert it into sys.modules"""
- mirror_name = self._mirror_name(fullname)
- mod = import_item(mirror_name)
- sys.modules[fullname] = mod
- return mod
-
-
-class ShimModule(types.ModuleType):
-
- def __init__(self, *args, **kwargs):
- self._mirror = kwargs.pop("mirror")
- src = kwargs.pop("src", None)
- if src:
- kwargs['name'] = src.rsplit('.', 1)[-1]
- super(ShimModule, self).__init__(*args, **kwargs)
- # add import hook for descendent modules
- if src:
- sys.meta_path.append(
- ShimImporter(src=src, mirror=self._mirror)
- )
-
- @property
- def __path__(self):
- return []
-
- @property
- def __spec__(self):
- """Don't produce __spec__ until requested"""
- return __import__(self._mirror).__spec__
-
- def __dir__(self):
- return dir(__import__(self._mirror))
-
- @property
- def __all__(self):
- """Ensure __all__ is always defined"""
- mod = __import__(self._mirror)
- try:
- return mod.__all__
- except AttributeError:
- return [name for name in dir(mod) if not name.startswith('_')]
-
- def __getattr__(self, key):
- # Use the equivalent of import_item(name), see below
- name = "%s.%s" % (self._mirror, key)
- try:
- return import_item(name)
- except ImportError:
- raise AttributeError(key)
+"""A shim module for deprecated imports
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import sys
+import types
+
+from .importstring import import_item
+
+class ShimWarning(Warning):
+ """A warning to show when a module has moved, and a shim is in its place."""
+
+class ShimImporter(object):
+ """Import hook for a shim.
+
+ This ensures that submodule imports return the real target module,
+ not a clone that will confuse `is` and `isinstance` checks.
+ """
+ def __init__(self, src, mirror):
+ self.src = src
+ self.mirror = mirror
+
+ def _mirror_name(self, fullname):
+ """get the name of the mirrored module"""
+
+ return self.mirror + fullname[len(self.src):]
+
+ def find_module(self, fullname, path=None):
+ """Return self if we should be used to import the module."""
+ if fullname.startswith(self.src + '.'):
+ mirror_name = self._mirror_name(fullname)
+ try:
+ mod = import_item(mirror_name)
+ except ImportError:
+ return
+ else:
+ if not isinstance(mod, types.ModuleType):
+ # not a module
+ return None
+ return self
+
+ def load_module(self, fullname):
+ """Import the mirrored module, and insert it into sys.modules"""
+ mirror_name = self._mirror_name(fullname)
+ mod = import_item(mirror_name)
+ sys.modules[fullname] = mod
+ return mod
+
+
+class ShimModule(types.ModuleType):
+
+ def __init__(self, *args, **kwargs):
+ self._mirror = kwargs.pop("mirror")
+ src = kwargs.pop("src", None)
+ if src:
+ kwargs['name'] = src.rsplit('.', 1)[-1]
+ super(ShimModule, self).__init__(*args, **kwargs)
+ # add import hook for descendent modules
+ if src:
+ sys.meta_path.append(
+ ShimImporter(src=src, mirror=self._mirror)
+ )
+
+ @property
+ def __path__(self):
+ return []
+
+ @property
+ def __spec__(self):
+ """Don't produce __spec__ until requested"""
+ return __import__(self._mirror).__spec__
+
+ def __dir__(self):
+ return dir(__import__(self._mirror))
+
+ @property
+ def __all__(self):
+ """Ensure __all__ is always defined"""
+ mod = __import__(self._mirror)
+ try:
+ return mod.__all__
+ except AttributeError:
+ return [name for name in dir(mod) if not name.startswith('_')]
+
+ def __getattr__(self, key):
+ # Use the equivalent of import_item(name), see below
+ name = "%s.%s" % (self._mirror, key)
+ try:
+ return import_item(name)
+ except ImportError:
+ raise AttributeError(key)
diff --git a/contrib/python/ipython/py2/IPython/utils/signatures.py b/contrib/python/ipython/py2/IPython/utils/signatures.py
index 4d0eb74a7e..dedc51cfda 100644
--- a/contrib/python/ipython/py2/IPython/utils/signatures.py
+++ b/contrib/python/ipython/py2/IPython/utils/signatures.py
@@ -1,11 +1,11 @@
-"""Function signature objects for callables.
-
-Use the standard library version if available, as it is more up to date.
-Fallback on backport otherwise.
-"""
-
-
-try:
- from inspect import BoundArguments, Parameter, Signature, signature
-except ImportError:
- from ._signatures import BoundArguments, Parameter, Signature, signature
+"""Function signature objects for callables.
+
+Use the standard library version if available, as it is more up to date.
+Fallback on backport otherwise.
+"""
+
+
+try:
+ from inspect import BoundArguments, Parameter, Signature, signature
+except ImportError:
+ from ._signatures import BoundArguments, Parameter, Signature, signature
diff --git a/contrib/python/ipython/py2/IPython/utils/strdispatch.py b/contrib/python/ipython/py2/IPython/utils/strdispatch.py
index a6183404e7..d6bf510535 100644
--- a/contrib/python/ipython/py2/IPython/utils/strdispatch.py
+++ b/contrib/python/ipython/py2/IPython/utils/strdispatch.py
@@ -1,68 +1,68 @@
-"""String dispatch class to match regexps and dispatch commands.
-"""
-
-# Stdlib imports
-import re
-
-# Our own modules
-from IPython.core.hooks import CommandChainDispatcher
-
-# Code begins
-class StrDispatch(object):
- """Dispatch (lookup) a set of strings / regexps for match.
-
- Example:
-
- >>> dis = StrDispatch()
- >>> dis.add_s('hei',34, priority = 4)
- >>> dis.add_s('hei',123, priority = 2)
- >>> dis.add_re('h.i', 686)
- >>> print(list(dis.flat_matches('hei')))
- [123, 34, 686]
- """
-
- def __init__(self):
- self.strs = {}
- self.regexs = {}
-
- def add_s(self, s, obj, priority= 0 ):
- """ Adds a target 'string' for dispatching """
-
- chain = self.strs.get(s, CommandChainDispatcher())
- chain.add(obj,priority)
- self.strs[s] = chain
-
- def add_re(self, regex, obj, priority= 0 ):
- """ Adds a target regexp for dispatching """
-
- chain = self.regexs.get(regex, CommandChainDispatcher())
- chain.add(obj,priority)
- self.regexs[regex] = chain
-
- def dispatch(self, key):
- """ Get a seq of Commandchain objects that match key """
- if key in self.strs:
- yield self.strs[key]
-
- for r, obj in self.regexs.items():
- if re.match(r, key):
- yield obj
- else:
- #print "nomatch",key # dbg
- pass
-
- def __repr__(self):
- return "<Strdispatch %s, %s>" % (self.strs, self.regexs)
-
- def s_matches(self, key):
- if key not in self.strs:
- return
- for el in self.strs[key]:
- yield el[1]
-
- def flat_matches(self, key):
- """ Yield all 'value' targets, without priority """
- for val in self.dispatch(key):
- for el in val:
- yield el[1] # only value, no priority
- return
+"""String dispatch class to match regexps and dispatch commands.
+"""
+
+# Stdlib imports
+import re
+
+# Our own modules
+from IPython.core.hooks import CommandChainDispatcher
+
+# Code begins
+class StrDispatch(object):
+ """Dispatch (lookup) a set of strings / regexps for match.
+
+ Example:
+
+ >>> dis = StrDispatch()
+ >>> dis.add_s('hei',34, priority = 4)
+ >>> dis.add_s('hei',123, priority = 2)
+ >>> dis.add_re('h.i', 686)
+ >>> print(list(dis.flat_matches('hei')))
+ [123, 34, 686]
+ """
+
+ def __init__(self):
+ self.strs = {}
+ self.regexs = {}
+
+ def add_s(self, s, obj, priority= 0 ):
+ """ Adds a target 'string' for dispatching """
+
+ chain = self.strs.get(s, CommandChainDispatcher())
+ chain.add(obj,priority)
+ self.strs[s] = chain
+
+ def add_re(self, regex, obj, priority= 0 ):
+ """ Adds a target regexp for dispatching """
+
+ chain = self.regexs.get(regex, CommandChainDispatcher())
+ chain.add(obj,priority)
+ self.regexs[regex] = chain
+
+ def dispatch(self, key):
+ """ Get a seq of Commandchain objects that match key """
+ if key in self.strs:
+ yield self.strs[key]
+
+ for r, obj in self.regexs.items():
+ if re.match(r, key):
+ yield obj
+ else:
+ #print "nomatch",key # dbg
+ pass
+
+ def __repr__(self):
+ return "<Strdispatch %s, %s>" % (self.strs, self.regexs)
+
+ def s_matches(self, key):
+ if key not in self.strs:
+ return
+ for el in self.strs[key]:
+ yield el[1]
+
+ def flat_matches(self, key):
+ """ Yield all 'value' targets, without priority """
+ for val in self.dispatch(key):
+ for el in val:
+ yield el[1] # only value, no priority
+ return
diff --git a/contrib/python/ipython/py2/IPython/utils/sysinfo.py b/contrib/python/ipython/py2/IPython/utils/sysinfo.py
index 51ca68d9cf..db7f2914d4 100644
--- a/contrib/python/ipython/py2/IPython/utils/sysinfo.py
+++ b/contrib/python/ipython/py2/IPython/utils/sysinfo.py
@@ -1,167 +1,167 @@
-# encoding: utf-8
-"""
-Utilities for getting information about IPython and the system it's running in.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import os
-import platform
-import pprint
-import sys
-import subprocess
-
-from IPython.core import release
-from IPython.utils import py3compat, _sysinfo, encoding
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-def pkg_commit_hash(pkg_path):
- """Get short form of commit hash given directory `pkg_path`
-
- We get the commit hash from (in order of preference):
-
- * IPython.utils._sysinfo.commit
- * git output, if we are in a git repository
-
- If these fail, we return a not-found placeholder tuple
-
- Parameters
- ----------
- pkg_path : str
- directory containing package
- only used for getting commit from active repo
-
- Returns
- -------
- hash_from : str
- Where we got the hash from - description
- hash_str : str
- short form of hash
- """
- # Try and get commit from written commit text file
- if _sysinfo.commit:
- return "installation", _sysinfo.commit
-
- # maybe we are in a repository
- proc = subprocess.Popen('git rev-parse --short HEAD',
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- cwd=pkg_path, shell=True)
- repo_commit, _ = proc.communicate()
- if repo_commit:
- return 'repository', repo_commit.strip().decode('ascii')
- return '(none found)', u'<not found>'
-
-
-def pkg_info(pkg_path):
- """Return dict describing the context of this package
-
- Parameters
- ----------
- pkg_path : str
- path containing __init__.py for package
-
- Returns
- -------
- context : dict
- with named parameters of interest
- """
- src, hsh = pkg_commit_hash(pkg_path)
- return dict(
- ipython_version=release.version,
- ipython_path=pkg_path,
- commit_source=src,
- commit_hash=hsh,
- sys_version=sys.version,
- sys_executable=sys.executable,
- sys_platform=sys.platform,
- platform=platform.platform(),
- os_name=os.name,
- default_encoding=encoding.DEFAULT_ENCODING,
- )
-
-def get_sys_info():
- """Return useful information about IPython and the system, as a dict."""
- p = os.path
- path = p.realpath(p.dirname(p.abspath(p.join(__file__, '..'))))
- return pkg_info(path)
-
-@py3compat.doctest_refactor_print
-def sys_info():
- """Return useful information about IPython and the system, as a string.
-
- Examples
- --------
- ::
-
- In [2]: print sys_info()
- {'commit_hash': '144fdae', # random
- 'commit_source': 'repository',
- 'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',
- 'ipython_version': '0.11.dev',
- 'os_name': 'posix',
- 'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',
- 'sys_executable': '/usr/bin/python',
- 'sys_platform': 'linux2',
- 'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
- """
- return pprint.pformat(get_sys_info())
-
-def _num_cpus_unix():
- """Return the number of active CPUs on a Unix system."""
- return os.sysconf("SC_NPROCESSORS_ONLN")
-
-
-def _num_cpus_darwin():
- """Return the number of active CPUs on a Darwin system."""
- p = subprocess.Popen(['sysctl','-n','hw.ncpu'],stdout=subprocess.PIPE)
- return p.stdout.read()
-
-
-def _num_cpus_windows():
- """Return the number of active CPUs on a Windows system."""
- return os.environ.get("NUMBER_OF_PROCESSORS")
-
-
-def num_cpus():
- """Return the effective number of CPUs in the system as an integer.
-
- This cross-platform function makes an attempt at finding the total number of
- available CPUs in the system, as returned by various underlying system and
- python calls.
-
- If it can't find a sensible answer, it returns 1 (though an error *may* make
- it return a large positive number that's actually incorrect).
- """
-
- # Many thanks to the Parallel Python project (http://www.parallelpython.com)
- # for the names of the keys we needed to look up for this function. This
- # code was inspired by their equivalent function.
-
- ncpufuncs = {'Linux':_num_cpus_unix,
- 'Darwin':_num_cpus_darwin,
- 'Windows':_num_cpus_windows
- }
-
- ncpufunc = ncpufuncs.get(platform.system(),
- # default to unix version (Solaris, AIX, etc)
- _num_cpus_unix)
-
- try:
- ncpus = max(1,int(ncpufunc()))
- except:
- ncpus = 1
- return ncpus
-
+# encoding: utf-8
+"""
+Utilities for getting information about IPython and the system it's running in.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import os
+import platform
+import pprint
+import sys
+import subprocess
+
+from IPython.core import release
+from IPython.utils import py3compat, _sysinfo, encoding
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def pkg_commit_hash(pkg_path):
+ """Get short form of commit hash given directory `pkg_path`
+
+ We get the commit hash from (in order of preference):
+
+ * IPython.utils._sysinfo.commit
+ * git output, if we are in a git repository
+
+ If these fail, we return a not-found placeholder tuple
+
+ Parameters
+ ----------
+ pkg_path : str
+ directory containing package
+ only used for getting commit from active repo
+
+ Returns
+ -------
+ hash_from : str
+ Where we got the hash from - description
+ hash_str : str
+ short form of hash
+ """
+ # Try and get commit from written commit text file
+ if _sysinfo.commit:
+ return "installation", _sysinfo.commit
+
+ # maybe we are in a repository
+ proc = subprocess.Popen('git rev-parse --short HEAD',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=pkg_path, shell=True)
+ repo_commit, _ = proc.communicate()
+ if repo_commit:
+ return 'repository', repo_commit.strip().decode('ascii')
+ return '(none found)', u'<not found>'
+
+
+def pkg_info(pkg_path):
+ """Return dict describing the context of this package
+
+ Parameters
+ ----------
+ pkg_path : str
+ path containing __init__.py for package
+
+ Returns
+ -------
+ context : dict
+ with named parameters of interest
+ """
+ src, hsh = pkg_commit_hash(pkg_path)
+ return dict(
+ ipython_version=release.version,
+ ipython_path=pkg_path,
+ commit_source=src,
+ commit_hash=hsh,
+ sys_version=sys.version,
+ sys_executable=sys.executable,
+ sys_platform=sys.platform,
+ platform=platform.platform(),
+ os_name=os.name,
+ default_encoding=encoding.DEFAULT_ENCODING,
+ )
+
+def get_sys_info():
+ """Return useful information about IPython and the system, as a dict."""
+ p = os.path
+ path = p.realpath(p.dirname(p.abspath(p.join(__file__, '..'))))
+ return pkg_info(path)
+
+@py3compat.doctest_refactor_print
+def sys_info():
+ """Return useful information about IPython and the system, as a string.
+
+ Examples
+ --------
+ ::
+
+ In [2]: print sys_info()
+ {'commit_hash': '144fdae', # random
+ 'commit_source': 'repository',
+ 'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',
+ 'ipython_version': '0.11.dev',
+ 'os_name': 'posix',
+ 'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',
+ 'sys_executable': '/usr/bin/python',
+ 'sys_platform': 'linux2',
+ 'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
+ """
+ return pprint.pformat(get_sys_info())
+
+def _num_cpus_unix():
+ """Return the number of active CPUs on a Unix system."""
+ return os.sysconf("SC_NPROCESSORS_ONLN")
+
+
+def _num_cpus_darwin():
+ """Return the number of active CPUs on a Darwin system."""
+ p = subprocess.Popen(['sysctl','-n','hw.ncpu'],stdout=subprocess.PIPE)
+ return p.stdout.read()
+
+
+def _num_cpus_windows():
+ """Return the number of active CPUs on a Windows system."""
+ return os.environ.get("NUMBER_OF_PROCESSORS")
+
+
+def num_cpus():
+ """Return the effective number of CPUs in the system as an integer.
+
+ This cross-platform function makes an attempt at finding the total number of
+ available CPUs in the system, as returned by various underlying system and
+ python calls.
+
+ If it can't find a sensible answer, it returns 1 (though an error *may* make
+ it return a large positive number that's actually incorrect).
+ """
+
+ # Many thanks to the Parallel Python project (http://www.parallelpython.com)
+ # for the names of the keys we needed to look up for this function. This
+ # code was inspired by their equivalent function.
+
+ ncpufuncs = {'Linux':_num_cpus_unix,
+ 'Darwin':_num_cpus_darwin,
+ 'Windows':_num_cpus_windows
+ }
+
+ ncpufunc = ncpufuncs.get(platform.system(),
+ # default to unix version (Solaris, AIX, etc)
+ _num_cpus_unix)
+
+ try:
+ ncpus = max(1,int(ncpufunc()))
+ except:
+ ncpus = 1
+ return ncpus
+
diff --git a/contrib/python/ipython/py2/IPython/utils/syspathcontext.py b/contrib/python/ipython/py2/IPython/utils/syspathcontext.py
index fdcfbbee35..89612038ff 100644
--- a/contrib/python/ipython/py2/IPython/utils/syspathcontext.py
+++ b/contrib/python/ipython/py2/IPython/utils/syspathcontext.py
@@ -1,71 +1,71 @@
-# encoding: utf-8
-"""
-Context managers for adding things to sys.path temporarily.
-
-Authors:
-
-* Brian Granger
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import sys
-
-from IPython.utils.py3compat import cast_bytes_py2
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-class appended_to_syspath(object):
- """A context for appending a directory to sys.path for a second."""
-
- def __init__(self, dir):
- self.dir = cast_bytes_py2(dir, sys.getdefaultencoding())
-
- def __enter__(self):
- if self.dir not in sys.path:
- sys.path.append(self.dir)
- self.added = True
- else:
- self.added = False
-
- def __exit__(self, type, value, traceback):
- if self.added:
- try:
- sys.path.remove(self.dir)
- except ValueError:
- pass
- # Returning False causes any exceptions to be re-raised.
- return False
-
-class prepended_to_syspath(object):
- """A context for prepending a directory to sys.path for a second."""
-
- def __init__(self, dir):
- self.dir = cast_bytes_py2(dir, sys.getdefaultencoding())
-
- def __enter__(self):
- if self.dir not in sys.path:
- sys.path.insert(0,self.dir)
- self.added = True
- else:
- self.added = False
-
- def __exit__(self, type, value, traceback):
- if self.added:
- try:
- sys.path.remove(self.dir)
- except ValueError:
- pass
- # Returning False causes any exceptions to be re-raised.
- return False
+# encoding: utf-8
+"""
+Context managers for adding things to sys.path temporarily.
+
+Authors:
+
+* Brian Granger
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import sys
+
+from IPython.utils.py3compat import cast_bytes_py2
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+class appended_to_syspath(object):
+ """A context for appending a directory to sys.path for a second."""
+
+ def __init__(self, dir):
+ self.dir = cast_bytes_py2(dir, sys.getdefaultencoding())
+
+ def __enter__(self):
+ if self.dir not in sys.path:
+ sys.path.append(self.dir)
+ self.added = True
+ else:
+ self.added = False
+
+ def __exit__(self, type, value, traceback):
+ if self.added:
+ try:
+ sys.path.remove(self.dir)
+ except ValueError:
+ pass
+ # Returning False causes any exceptions to be re-raised.
+ return False
+
+class prepended_to_syspath(object):
+ """A context for prepending a directory to sys.path for a second."""
+
+ def __init__(self, dir):
+ self.dir = cast_bytes_py2(dir, sys.getdefaultencoding())
+
+ def __enter__(self):
+ if self.dir not in sys.path:
+ sys.path.insert(0,self.dir)
+ self.added = True
+ else:
+ self.added = False
+
+ def __exit__(self, type, value, traceback):
+ if self.added:
+ try:
+ sys.path.remove(self.dir)
+ except ValueError:
+ pass
+ # Returning False causes any exceptions to be re-raised.
+ return False
diff --git a/contrib/python/ipython/py2/IPython/utils/tempdir.py b/contrib/python/ipython/py2/IPython/utils/tempdir.py
index 909205e192..951abd65c9 100644
--- a/contrib/python/ipython/py2/IPython/utils/tempdir.py
+++ b/contrib/python/ipython/py2/IPython/utils/tempdir.py
@@ -1,145 +1,145 @@
-"""TemporaryDirectory class, copied from Python 3.2.
-
-This is copied from the stdlib and will be standard in Python 3.2 and onwards.
-"""
-from __future__ import print_function
-
-import os as _os
-import warnings as _warnings
-import sys as _sys
-
-# This code should only be used in Python versions < 3.2, since after that we
-# can rely on the stdlib itself.
-try:
- from tempfile import TemporaryDirectory
-
-except ImportError:
- from tempfile import mkdtemp, template
-
- class TemporaryDirectory(object):
- """Create and return a temporary directory. This has the same
- behavior as mkdtemp but can be used as a context manager. For
- example:
-
- with TemporaryDirectory() as tmpdir:
- ...
-
- Upon exiting the context, the directory and everthing contained
- in it are removed.
- """
-
- def __init__(self, suffix="", prefix=template, dir=None):
- self.name = mkdtemp(suffix, prefix, dir)
- self._closed = False
-
- def __enter__(self):
- return self.name
-
- def cleanup(self, _warn=False):
- if self.name and not self._closed:
- try:
- self._rmtree(self.name)
- except (TypeError, AttributeError) as ex:
- # Issue #10188: Emit a warning on stderr
- # if the directory could not be cleaned
- # up due to missing globals
- if "None" not in str(ex):
- raise
- print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
- file=_sys.stderr)
- return
- self._closed = True
- if _warn:
- self._warn("Implicitly cleaning up {!r}".format(self),
- Warning)
-
- def __exit__(self, exc, value, tb):
- self.cleanup()
-
- def __del__(self):
- # Issue a ResourceWarning if implicit cleanup needed
- self.cleanup(_warn=True)
-
-
- # XXX (ncoghlan): The following code attempts to make
- # this class tolerant of the module nulling out process
- # that happens during CPython interpreter shutdown
- # Alas, it doesn't actually manage it. See issue #10188
- _listdir = staticmethod(_os.listdir)
- _path_join = staticmethod(_os.path.join)
- _isdir = staticmethod(_os.path.isdir)
- _remove = staticmethod(_os.remove)
- _rmdir = staticmethod(_os.rmdir)
- _os_error = _os.error
- _warn = _warnings.warn
-
- def _rmtree(self, path):
- # Essentially a stripped down version of shutil.rmtree. We can't
- # use globals because they may be None'ed out at shutdown.
- for name in self._listdir(path):
- fullname = self._path_join(path, name)
- try:
- isdir = self._isdir(fullname)
- except self._os_error:
- isdir = False
- if isdir:
- self._rmtree(fullname)
- else:
- try:
- self._remove(fullname)
- except self._os_error:
- pass
- try:
- self._rmdir(path)
- except self._os_error:
- pass
-
-
-class NamedFileInTemporaryDirectory(object):
-
- def __init__(self, filename, mode='w+b', bufsize=-1, **kwds):
- """
- Open a file named `filename` in a temporary directory.
-
- This context manager is preferred over `NamedTemporaryFile` in
- stdlib `tempfile` when one needs to reopen the file.
-
- Arguments `mode` and `bufsize` are passed to `open`.
- Rest of the arguments are passed to `TemporaryDirectory`.
-
- """
- self._tmpdir = TemporaryDirectory(**kwds)
- path = _os.path.join(self._tmpdir.name, filename)
- self.file = open(path, mode, bufsize)
-
- def cleanup(self):
- self.file.close()
- self._tmpdir.cleanup()
-
- __del__ = cleanup
-
- def __enter__(self):
- return self.file
-
- def __exit__(self, type, value, traceback):
- self.cleanup()
-
-
-class TemporaryWorkingDirectory(TemporaryDirectory):
- """
- Creates a temporary directory and sets the cwd to that directory.
- Automatically reverts to previous cwd upon cleanup.
- Usage example:
-
- with TemporaryWorkingDirectory() as tmpdir:
- ...
- """
- def __enter__(self):
- self.old_wd = _os.getcwd()
- _os.chdir(self.name)
- return super(TemporaryWorkingDirectory, self).__enter__()
-
- def __exit__(self, exc, value, tb):
- _os.chdir(self.old_wd)
- return super(TemporaryWorkingDirectory, self).__exit__(exc, value, tb)
-
+"""TemporaryDirectory class, copied from Python 3.2.
+
+This is copied from the stdlib and will be standard in Python 3.2 and onwards.
+"""
+from __future__ import print_function
+
+import os as _os
+import warnings as _warnings
+import sys as _sys
+
+# This code should only be used in Python versions < 3.2, since after that we
+# can rely on the stdlib itself.
+try:
+ from tempfile import TemporaryDirectory
+
+except ImportError:
+ from tempfile import mkdtemp, template
+
+ class TemporaryDirectory(object):
+ """Create and return a temporary directory. This has the same
+ behavior as mkdtemp but can be used as a context manager. For
+ example:
+
+ with TemporaryDirectory() as tmpdir:
+ ...
+
+ Upon exiting the context, the directory and everthing contained
+ in it are removed.
+ """
+
+ def __init__(self, suffix="", prefix=template, dir=None):
+ self.name = mkdtemp(suffix, prefix, dir)
+ self._closed = False
+
+ def __enter__(self):
+ return self.name
+
+ def cleanup(self, _warn=False):
+ if self.name and not self._closed:
+ try:
+ self._rmtree(self.name)
+ except (TypeError, AttributeError) as ex:
+ # Issue #10188: Emit a warning on stderr
+ # if the directory could not be cleaned
+ # up due to missing globals
+ if "None" not in str(ex):
+ raise
+ print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
+ file=_sys.stderr)
+ return
+ self._closed = True
+ if _warn:
+ self._warn("Implicitly cleaning up {!r}".format(self),
+ Warning)
+
+ def __exit__(self, exc, value, tb):
+ self.cleanup()
+
+ def __del__(self):
+ # Issue a ResourceWarning if implicit cleanup needed
+ self.cleanup(_warn=True)
+
+
+ # XXX (ncoghlan): The following code attempts to make
+ # this class tolerant of the module nulling out process
+ # that happens during CPython interpreter shutdown
+ # Alas, it doesn't actually manage it. See issue #10188
+ _listdir = staticmethod(_os.listdir)
+ _path_join = staticmethod(_os.path.join)
+ _isdir = staticmethod(_os.path.isdir)
+ _remove = staticmethod(_os.remove)
+ _rmdir = staticmethod(_os.rmdir)
+ _os_error = _os.error
+ _warn = _warnings.warn
+
+ def _rmtree(self, path):
+ # Essentially a stripped down version of shutil.rmtree. We can't
+ # use globals because they may be None'ed out at shutdown.
+ for name in self._listdir(path):
+ fullname = self._path_join(path, name)
+ try:
+ isdir = self._isdir(fullname)
+ except self._os_error:
+ isdir = False
+ if isdir:
+ self._rmtree(fullname)
+ else:
+ try:
+ self._remove(fullname)
+ except self._os_error:
+ pass
+ try:
+ self._rmdir(path)
+ except self._os_error:
+ pass
+
+
+class NamedFileInTemporaryDirectory(object):
+
+ def __init__(self, filename, mode='w+b', bufsize=-1, **kwds):
+ """
+ Open a file named `filename` in a temporary directory.
+
+ This context manager is preferred over `NamedTemporaryFile` in
+ stdlib `tempfile` when one needs to reopen the file.
+
+ Arguments `mode` and `bufsize` are passed to `open`.
+ Rest of the arguments are passed to `TemporaryDirectory`.
+
+ """
+ self._tmpdir = TemporaryDirectory(**kwds)
+ path = _os.path.join(self._tmpdir.name, filename)
+ self.file = open(path, mode, bufsize)
+
+ def cleanup(self):
+ self.file.close()
+ self._tmpdir.cleanup()
+
+ __del__ = cleanup
+
+ def __enter__(self):
+ return self.file
+
+ def __exit__(self, type, value, traceback):
+ self.cleanup()
+
+
+class TemporaryWorkingDirectory(TemporaryDirectory):
+ """
+ Creates a temporary directory and sets the cwd to that directory.
+ Automatically reverts to previous cwd upon cleanup.
+ Usage example:
+
+ with TemporaryWorkingDirectory() as tmpdir:
+ ...
+ """
+ def __enter__(self):
+ self.old_wd = _os.getcwd()
+ _os.chdir(self.name)
+ return super(TemporaryWorkingDirectory, self).__enter__()
+
+ def __exit__(self, exc, value, tb):
+ _os.chdir(self.old_wd)
+ return super(TemporaryWorkingDirectory, self).__exit__(exc, value, tb)
+
diff --git a/contrib/python/ipython/py2/IPython/utils/terminal.py b/contrib/python/ipython/py2/IPython/utils/terminal.py
index 833debce41..e92c410c79 100644
--- a/contrib/python/ipython/py2/IPython/utils/terminal.py
+++ b/contrib/python/ipython/py2/IPython/utils/terminal.py
@@ -1,22 +1,22 @@
-# encoding: utf-8
-"""
-Utilities for working with terminals.
-
-Authors:
-
-* Brian E. Granger
-* Fernando Perez
-* Alexander Belchenko (e-mail: bialix AT ukr.net)
-"""
-
+# encoding: utf-8
+"""
+Utilities for working with terminals.
+
+Authors:
+
+* Brian E. Granger
+* Fernando Perez
+* Alexander Belchenko (e-mail: bialix AT ukr.net)
+"""
+
from __future__ import absolute_import
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
-
-import os
-import sys
-import warnings
+
+import os
+import sys
+import warnings
try:
from shutil import get_terminal_size as _get_terminal_size
except ImportError:
@@ -25,101 +25,101 @@ except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size as _get_terminal_size
except ImportError:
from ._get_terminal_size import get_terminal_size as _get_terminal_size
-
-from . import py3compat
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-# This variable is part of the expected API of the module:
-ignore_termtitle = True
-
-
-
-if os.name == 'posix':
- def _term_clear():
- os.system('clear')
-elif sys.platform == 'win32':
- def _term_clear():
- os.system('cls')
-else:
- def _term_clear():
- pass
-
-
-
-def toggle_set_term_title(val):
- """Control whether set_term_title is active or not.
-
- set_term_title() allows writing to the console titlebar. In embedded
- widgets this can cause problems, so this call can be used to toggle it on
- or off as needed.
-
- The default state of the module is for the function to be disabled.
-
- Parameters
- ----------
- val : bool
- If True, set_term_title() actually writes to the terminal (using the
- appropriate platform-specific module). If False, it is a no-op.
- """
- global ignore_termtitle
- ignore_termtitle = not(val)
-
-
-def _set_term_title(*args,**kw):
- """Dummy no-op."""
- pass
-
-
-def _set_term_title_xterm(title):
- """ Change virtual terminal title in xterm-workalikes """
- sys.stdout.write('\033]0;%s\007' % title)
-
-if os.name == 'posix':
- TERM = os.environ.get('TERM','')
- if TERM.startswith('xterm'):
- _set_term_title = _set_term_title_xterm
-elif sys.platform == 'win32':
- try:
- import ctypes
-
- SetConsoleTitleW = ctypes.windll.kernel32.SetConsoleTitleW
- SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]
-
- def _set_term_title(title):
- """Set terminal title using ctypes to access the Win32 APIs."""
- SetConsoleTitleW(title)
- except ImportError:
- def _set_term_title(title):
- """Set terminal title using the 'title' command."""
- global ignore_termtitle
-
- try:
- # Cannot be on network share when issuing system commands
- curr = py3compat.getcwd()
- os.chdir("C:")
- ret = os.system("title " + title)
- finally:
- os.chdir(curr)
- if ret:
- # non-zero return code signals error, don't try again
- ignore_termtitle = True
-
-
-def set_term_title(title):
- """Set terminal title using the necessary platform-dependent calls."""
- if ignore_termtitle:
- return
- _set_term_title(title)
-
-
-def freeze_term_title():
- warnings.warn("This function is deprecated, use toggle_set_term_title()")
- global ignore_termtitle
- ignore_termtitle = True
-
-
+
+from . import py3compat
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+# This variable is part of the expected API of the module:
+ignore_termtitle = True
+
+
+
+if os.name == 'posix':
+ def _term_clear():
+ os.system('clear')
+elif sys.platform == 'win32':
+ def _term_clear():
+ os.system('cls')
+else:
+ def _term_clear():
+ pass
+
+
+
+def toggle_set_term_title(val):
+ """Control whether set_term_title is active or not.
+
+ set_term_title() allows writing to the console titlebar. In embedded
+ widgets this can cause problems, so this call can be used to toggle it on
+ or off as needed.
+
+ The default state of the module is for the function to be disabled.
+
+ Parameters
+ ----------
+ val : bool
+ If True, set_term_title() actually writes to the terminal (using the
+ appropriate platform-specific module). If False, it is a no-op.
+ """
+ global ignore_termtitle
+ ignore_termtitle = not(val)
+
+
+def _set_term_title(*args,**kw):
+ """Dummy no-op."""
+ pass
+
+
+def _set_term_title_xterm(title):
+ """ Change virtual terminal title in xterm-workalikes """
+ sys.stdout.write('\033]0;%s\007' % title)
+
+if os.name == 'posix':
+ TERM = os.environ.get('TERM','')
+ if TERM.startswith('xterm'):
+ _set_term_title = _set_term_title_xterm
+elif sys.platform == 'win32':
+ try:
+ import ctypes
+
+ SetConsoleTitleW = ctypes.windll.kernel32.SetConsoleTitleW
+ SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]
+
+ def _set_term_title(title):
+ """Set terminal title using ctypes to access the Win32 APIs."""
+ SetConsoleTitleW(title)
+ except ImportError:
+ def _set_term_title(title):
+ """Set terminal title using the 'title' command."""
+ global ignore_termtitle
+
+ try:
+ # Cannot be on network share when issuing system commands
+ curr = py3compat.getcwd()
+ os.chdir("C:")
+ ret = os.system("title " + title)
+ finally:
+ os.chdir(curr)
+ if ret:
+ # non-zero return code signals error, don't try again
+ ignore_termtitle = True
+
+
+def set_term_title(title):
+ """Set terminal title using the necessary platform-dependent calls."""
+ if ignore_termtitle:
+ return
+ _set_term_title(title)
+
+
+def freeze_term_title():
+ warnings.warn("This function is deprecated, use toggle_set_term_title()")
+ global ignore_termtitle
+ ignore_termtitle = True
+
+
def get_terminal_size(defaultx=80, defaulty=25):
return _get_terminal_size((defaultx, defaulty))
diff --git a/contrib/python/ipython/py2/IPython/utils/text.py b/contrib/python/ipython/py2/IPython/utils/text.py
index 50ff04e1fc..5ed1a845e3 100644
--- a/contrib/python/ipython/py2/IPython/utils/text.py
+++ b/contrib/python/ipython/py2/IPython/utils/text.py
@@ -1,783 +1,783 @@
-# encoding: utf-8
-"""
-Utilities for working with strings and text.
-
-Inheritance diagram:
-
-.. inheritance-diagram:: IPython.utils.text
- :parts: 3
-"""
-from __future__ import absolute_import
-
-import os
-import re
-import sys
-import textwrap
-from string import Formatter
+# encoding: utf-8
+"""
+Utilities for working with strings and text.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: IPython.utils.text
+ :parts: 3
+"""
+from __future__ import absolute_import
+
+import os
+import re
+import sys
+import textwrap
+from string import Formatter
try:
from pathlib import Path
except ImportError:
# Python 2 backport
from pathlib2 import Path
-
-from IPython.testing.skipdoctest import skip_doctest_py3, skip_doctest
-from IPython.utils import py3compat
-
-# datetime.strftime date format for ipython
-if sys.platform == 'win32':
- date_format = "%B %d, %Y"
-else:
- date_format = "%B %-d, %Y"
-
-class LSString(str):
- """String derivative with a special access attributes.
-
- These are normal strings, but with the special attributes:
-
- .l (or .list) : value as list (split on newlines).
- .n (or .nlstr): original value (the string itself).
- .s (or .spstr): value as whitespace-separated string.
- .p (or .paths): list of path objects (requires path.py package)
-
- Any values which require transformations are computed only once and
- cached.
-
- Such strings are very useful to efficiently interact with the shell, which
- typically only understands whitespace-separated options for commands."""
-
- def get_list(self):
- try:
- return self.__list
- except AttributeError:
- self.__list = self.split('\n')
- return self.__list
-
- l = list = property(get_list)
-
- def get_spstr(self):
- try:
- return self.__spstr
- except AttributeError:
- self.__spstr = self.replace('\n',' ')
- return self.__spstr
-
- s = spstr = property(get_spstr)
-
- def get_nlstr(self):
- return self
-
- n = nlstr = property(get_nlstr)
-
- def get_paths(self):
- try:
- return self.__paths
- except AttributeError:
+
+from IPython.testing.skipdoctest import skip_doctest_py3, skip_doctest
+from IPython.utils import py3compat
+
+# datetime.strftime date format for ipython
+if sys.platform == 'win32':
+ date_format = "%B %d, %Y"
+else:
+ date_format = "%B %-d, %Y"
+
+class LSString(str):
+ """String derivative with a special access attributes.
+
+ These are normal strings, but with the special attributes:
+
+ .l (or .list) : value as list (split on newlines).
+ .n (or .nlstr): original value (the string itself).
+ .s (or .spstr): value as whitespace-separated string.
+ .p (or .paths): list of path objects (requires path.py package)
+
+ Any values which require transformations are computed only once and
+ cached.
+
+ Such strings are very useful to efficiently interact with the shell, which
+ typically only understands whitespace-separated options for commands."""
+
+ def get_list(self):
+ try:
+ return self.__list
+ except AttributeError:
+ self.__list = self.split('\n')
+ return self.__list
+
+ l = list = property(get_list)
+
+ def get_spstr(self):
+ try:
+ return self.__spstr
+ except AttributeError:
+ self.__spstr = self.replace('\n',' ')
+ return self.__spstr
+
+ s = spstr = property(get_spstr)
+
+ def get_nlstr(self):
+ return self
+
+ n = nlstr = property(get_nlstr)
+
+ def get_paths(self):
+ try:
+ return self.__paths
+ except AttributeError:
self.__paths = [Path(p) for p in self.split('\n') if os.path.exists(p)]
- return self.__paths
-
- p = paths = property(get_paths)
-
-# FIXME: We need to reimplement type specific displayhook and then add this
-# back as a custom printer. This should also be moved outside utils into the
-# core.
-
-# def print_lsstring(arg):
-# """ Prettier (non-repr-like) and more informative printer for LSString """
-# print "LSString (.p, .n, .l, .s available). Value:"
-# print arg
-#
-#
-# print_lsstring = result_display.when_type(LSString)(print_lsstring)
-
-
-class SList(list):
- """List derivative with a special access attributes.
-
- These are normal lists, but with the special attributes:
-
- * .l (or .list) : value as list (the list itself).
- * .n (or .nlstr): value as a string, joined on newlines.
- * .s (or .spstr): value as a string, joined on spaces.
- * .p (or .paths): list of path objects (requires path.py package)
-
- Any values which require transformations are computed only once and
- cached."""
-
- def get_list(self):
- return self
-
- l = list = property(get_list)
-
- def get_spstr(self):
- try:
- return self.__spstr
- except AttributeError:
- self.__spstr = ' '.join(self)
- return self.__spstr
-
- s = spstr = property(get_spstr)
-
- def get_nlstr(self):
- try:
- return self.__nlstr
- except AttributeError:
- self.__nlstr = '\n'.join(self)
- return self.__nlstr
-
- n = nlstr = property(get_nlstr)
-
- def get_paths(self):
- try:
- return self.__paths
- except AttributeError:
+ return self.__paths
+
+ p = paths = property(get_paths)
+
+# FIXME: We need to reimplement type specific displayhook and then add this
+# back as a custom printer. This should also be moved outside utils into the
+# core.
+
+# def print_lsstring(arg):
+# """ Prettier (non-repr-like) and more informative printer for LSString """
+# print "LSString (.p, .n, .l, .s available). Value:"
+# print arg
+#
+#
+# print_lsstring = result_display.when_type(LSString)(print_lsstring)
+
+
+class SList(list):
+ """List derivative with a special access attributes.
+
+ These are normal lists, but with the special attributes:
+
+ * .l (or .list) : value as list (the list itself).
+ * .n (or .nlstr): value as a string, joined on newlines.
+ * .s (or .spstr): value as a string, joined on spaces.
+ * .p (or .paths): list of path objects (requires path.py package)
+
+ Any values which require transformations are computed only once and
+ cached."""
+
+ def get_list(self):
+ return self
+
+ l = list = property(get_list)
+
+ def get_spstr(self):
+ try:
+ return self.__spstr
+ except AttributeError:
+ self.__spstr = ' '.join(self)
+ return self.__spstr
+
+ s = spstr = property(get_spstr)
+
+ def get_nlstr(self):
+ try:
+ return self.__nlstr
+ except AttributeError:
+ self.__nlstr = '\n'.join(self)
+ return self.__nlstr
+
+ n = nlstr = property(get_nlstr)
+
+ def get_paths(self):
+ try:
+ return self.__paths
+ except AttributeError:
self.__paths = [Path(p) for p in self if os.path.exists(p)]
- return self.__paths
-
- p = paths = property(get_paths)
-
- def grep(self, pattern, prune = False, field = None):
- """ Return all strings matching 'pattern' (a regex or callable)
-
- This is case-insensitive. If prune is true, return all items
- NOT matching the pattern.
-
- If field is specified, the match must occur in the specified
- whitespace-separated field.
-
- Examples::
-
- a.grep( lambda x: x.startswith('C') )
- a.grep('Cha.*log', prune=1)
- a.grep('chm', field=-1)
- """
-
- def match_target(s):
- if field is None:
- return s
- parts = s.split()
- try:
- tgt = parts[field]
- return tgt
- except IndexError:
- return ""
-
- if isinstance(pattern, py3compat.string_types):
- pred = lambda x : re.search(pattern, x, re.IGNORECASE)
- else:
- pred = pattern
- if not prune:
- return SList([el for el in self if pred(match_target(el))])
- else:
- return SList([el for el in self if not pred(match_target(el))])
-
- def fields(self, *fields):
- """ Collect whitespace-separated fields from string list
-
- Allows quick awk-like usage of string lists.
-
- Example data (in var a, created by 'a = !ls -l')::
-
- -rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog
- drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython
-
- * ``a.fields(0)`` is ``['-rwxrwxrwx', 'drwxrwxrwx+']``
- * ``a.fields(1,0)`` is ``['1 -rwxrwxrwx', '6 drwxrwxrwx+']``
- (note the joining by space).
- * ``a.fields(-1)`` is ``['ChangeLog', 'IPython']``
-
- IndexErrors are ignored.
-
- Without args, fields() just split()'s the strings.
- """
- if len(fields) == 0:
- return [el.split() for el in self]
-
- res = SList()
- for el in [f.split() for f in self]:
- lineparts = []
-
- for fd in fields:
- try:
- lineparts.append(el[fd])
- except IndexError:
- pass
- if lineparts:
- res.append(" ".join(lineparts))
-
- return res
-
- def sort(self,field= None, nums = False):
- """ sort by specified fields (see fields())
-
- Example::
-
- a.sort(1, nums = True)
-
- Sorts a by second field, in numerical order (so that 21 > 3)
-
- """
-
- #decorate, sort, undecorate
- if field is not None:
- dsu = [[SList([line]).fields(field), line] for line in self]
- else:
- dsu = [[line, line] for line in self]
- if nums:
- for i in range(len(dsu)):
- numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()])
- try:
- n = int(numstr)
- except ValueError:
- n = 0
- dsu[i][0] = n
-
-
- dsu.sort()
- return SList([t[1] for t in dsu])
-
-
-# FIXME: We need to reimplement type specific displayhook and then add this
-# back as a custom printer. This should also be moved outside utils into the
-# core.
-
-# def print_slist(arg):
-# """ Prettier (non-repr-like) and more informative printer for SList """
-# print "SList (.p, .n, .l, .s, .grep(), .fields(), sort() available):"
-# if hasattr(arg, 'hideonce') and arg.hideonce:
-# arg.hideonce = False
-# return
-#
-# nlprint(arg) # This was a nested list printer, now removed.
-#
-# print_slist = result_display.when_type(SList)(print_slist)
-
-
-def indent(instr,nspaces=4, ntabs=0, flatten=False):
- """Indent a string a given number of spaces or tabstops.
-
- indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
-
- Parameters
- ----------
-
- instr : basestring
- The string to be indented.
- nspaces : int (default: 4)
- The number of spaces to be indented.
- ntabs : int (default: 0)
- The number of tabs to be indented.
- flatten : bool (default: False)
- Whether to scrub existing indentation. If True, all lines will be
- aligned to the same indentation. If False, existing indentation will
- be strictly increased.
-
- Returns
- -------
-
- str|unicode : string indented by ntabs and nspaces.
-
- """
- if instr is None:
- return
- ind = '\t'*ntabs+' '*nspaces
- if flatten:
- pat = re.compile(r'^\s*', re.MULTILINE)
- else:
- pat = re.compile(r'^', re.MULTILINE)
- outstr = re.sub(pat, ind, instr)
- if outstr.endswith(os.linesep+ind):
- return outstr[:-len(ind)]
- else:
- return outstr
-
-
-def list_strings(arg):
- """Always return a list of strings, given a string or list of strings
- as input.
-
- Examples
- --------
- ::
-
- In [7]: list_strings('A single string')
- Out[7]: ['A single string']
-
- In [8]: list_strings(['A single string in a list'])
- Out[8]: ['A single string in a list']
-
- In [9]: list_strings(['A','list','of','strings'])
- Out[9]: ['A', 'list', 'of', 'strings']
- """
-
- if isinstance(arg, py3compat.string_types): return [arg]
- else: return arg
-
-
-def marquee(txt='',width=78,mark='*'):
- """Return the input string centered in a 'marquee'.
-
- Examples
- --------
- ::
-
- In [16]: marquee('A test',40)
- Out[16]: '**************** A test ****************'
-
- In [17]: marquee('A test',40,'-')
- Out[17]: '---------------- A test ----------------'
-
- In [18]: marquee('A test',40,' ')
- Out[18]: ' A test '
-
- """
- if not txt:
- return (mark*width)[:width]
- nmark = (width-len(txt)-2)//len(mark)//2
- if nmark < 0: nmark =0
- marks = mark*nmark
- return '%s %s %s' % (marks,txt,marks)
-
-
-ini_spaces_re = re.compile(r'^(\s+)')
-
-def num_ini_spaces(strng):
- """Return the number of initial spaces in a string"""
-
- ini_spaces = ini_spaces_re.match(strng)
- if ini_spaces:
- return ini_spaces.end()
- else:
- return 0
-
-
-def format_screen(strng):
- """Format a string for screen printing.
-
- This removes some latex-type format codes."""
- # Paragraph continue
- par_re = re.compile(r'\\$',re.MULTILINE)
- strng = par_re.sub('',strng)
- return strng
-
-
-def dedent(text):
- """Equivalent of textwrap.dedent that ignores unindented first line.
-
- This means it will still dedent strings like:
- '''foo
- is a bar
- '''
-
- For use in wrap_paragraphs.
- """
-
- if text.startswith('\n'):
- # text starts with blank line, don't ignore the first line
- return textwrap.dedent(text)
-
- # split first line
- splits = text.split('\n',1)
- if len(splits) == 1:
- # only one line
- return textwrap.dedent(text)
-
- first, rest = splits
- # dedent everything but the first line
- rest = textwrap.dedent(rest)
- return '\n'.join([first, rest])
-
-
-def wrap_paragraphs(text, ncols=80):
- """Wrap multiple paragraphs to fit a specified width.
-
- This is equivalent to textwrap.wrap, but with support for multiple
- paragraphs, as separated by empty lines.
-
- Returns
- -------
-
- list of complete paragraphs, wrapped to fill `ncols` columns.
- """
- paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE)
- text = dedent(text).strip()
- paragraphs = paragraph_re.split(text)[::2] # every other entry is space
- out_ps = []
- indent_re = re.compile(r'\n\s+', re.MULTILINE)
- for p in paragraphs:
- # presume indentation that survives dedent is meaningful formatting,
- # so don't fill unless text is flush.
- if indent_re.search(p) is None:
- # wrap paragraph
- p = textwrap.fill(p, ncols)
- out_ps.append(p)
- return out_ps
-
-
-def long_substr(data):
- """Return the longest common substring in a list of strings.
-
- Credit: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
- """
- substr = ''
- if len(data) > 1 and len(data[0]) > 0:
- for i in range(len(data[0])):
- for j in range(len(data[0])-i+1):
- if j > len(substr) and all(data[0][i:i+j] in x for x in data):
- substr = data[0][i:i+j]
- elif len(data) == 1:
- substr = data[0]
- return substr
-
-
-def strip_email_quotes(text):
- """Strip leading email quotation characters ('>').
-
- Removes any combination of leading '>' interspersed with whitespace that
- appears *identically* in all lines of the input text.
-
- Parameters
- ----------
- text : str
-
- Examples
- --------
-
- Simple uses::
-
- In [2]: strip_email_quotes('> > text')
- Out[2]: 'text'
-
- In [3]: strip_email_quotes('> > text\\n> > more')
- Out[3]: 'text\\nmore'
-
- Note how only the common prefix that appears in all lines is stripped::
-
- In [4]: strip_email_quotes('> > text\\n> > more\\n> more...')
- Out[4]: '> text\\n> more\\nmore...'
-
- So if any line has no quote marks ('>') , then none are stripped from any
- of them ::
-
- In [5]: strip_email_quotes('> > text\\n> > more\\nlast different')
- Out[5]: '> > text\\n> > more\\nlast different'
- """
- lines = text.splitlines()
- matches = set()
- for line in lines:
- prefix = re.match(r'^(\s*>[ >]*)', line)
- if prefix:
- matches.add(prefix.group(1))
- else:
- break
- else:
- prefix = long_substr(list(matches))
- if prefix:
- strip = len(prefix)
- text = '\n'.join([ ln[strip:] for ln in lines])
- return text
-
-def strip_ansi(source):
- """
- Remove ansi escape codes from text.
-
- Parameters
- ----------
- source : str
- Source to remove the ansi from
- """
- return re.sub(r'\033\[(\d|;)+?m', '', source)
-
-
-class EvalFormatter(Formatter):
- """A String Formatter that allows evaluation of simple expressions.
-
- Note that this version interprets a : as specifying a format string (as per
- standard string formatting), so if slicing is required, you must explicitly
- create a slice.
-
- This is to be used in templating cases, such as the parallel batch
- script templates, where simple arithmetic on arguments is useful.
-
- Examples
- --------
- ::
-
- In [1]: f = EvalFormatter()
- In [2]: f.format('{n//4}', n=8)
- Out[2]: '2'
-
- In [3]: f.format("{greeting[slice(2,4)]}", greeting="Hello")
- Out[3]: 'll'
- """
- def get_field(self, name, args, kwargs):
- v = eval(name, kwargs)
- return v, name
-
-#XXX: As of Python 3.4, the format string parsing no longer splits on a colon
-# inside [], so EvalFormatter can handle slicing. Once we only support 3.4 and
-# above, it should be possible to remove FullEvalFormatter.
-
-@skip_doctest_py3
-class FullEvalFormatter(Formatter):
- """A String Formatter that allows evaluation of simple expressions.
-
- Any time a format key is not found in the kwargs,
- it will be tried as an expression in the kwargs namespace.
-
- Note that this version allows slicing using [1:2], so you cannot specify
- a format string. Use :class:`EvalFormatter` to permit format strings.
-
- Examples
- --------
- ::
-
- In [1]: f = FullEvalFormatter()
- In [2]: f.format('{n//4}', n=8)
- Out[2]: u'2'
-
- In [3]: f.format('{list(range(5))[2:4]}')
- Out[3]: u'[2, 3]'
-
- In [4]: f.format('{3*2}')
- Out[4]: u'6'
- """
- # copied from Formatter._vformat with minor changes to allow eval
- # and replace the format_spec code with slicing
- def vformat(self, format_string, args, kwargs):
- result = []
- for literal_text, field_name, format_spec, conversion in \
- self.parse(format_string):
-
- # output the literal text
- if literal_text:
- result.append(literal_text)
-
- # if there's a field, output it
- if field_name is not None:
- # this is some markup, find the object and do
- # the formatting
-
- if format_spec:
- # override format spec, to allow slicing:
- field_name = ':'.join([field_name, format_spec])
-
- # eval the contents of the field for the object
- # to be formatted
- obj = eval(field_name, kwargs)
-
- # do any conversion on the resulting object
- obj = self.convert_field(obj, conversion)
-
- # format the object and append to the result
- result.append(self.format_field(obj, ''))
-
- return u''.join(py3compat.cast_unicode(s) for s in result)
-
-
-@skip_doctest_py3
-class DollarFormatter(FullEvalFormatter):
- """Formatter allowing Itpl style $foo replacement, for names and attribute
- access only. Standard {foo} replacement also works, and allows full
- evaluation of its arguments.
-
- Examples
- --------
- ::
-
- In [1]: f = DollarFormatter()
- In [2]: f.format('{n//4}', n=8)
- Out[2]: u'2'
-
- In [3]: f.format('23 * 76 is $result', result=23*76)
- Out[3]: u'23 * 76 is 1748'
-
- In [4]: f.format('$a or {b}', a=1, b=2)
- Out[4]: u'1 or 2'
- """
- _dollar_pattern = re.compile("(.*?)\$(\$?[\w\.]+)")
- def parse(self, fmt_string):
- for literal_txt, field_name, format_spec, conversion \
- in Formatter.parse(self, fmt_string):
-
- # Find $foo patterns in the literal text.
- continue_from = 0
- txt = ""
- for m in self._dollar_pattern.finditer(literal_txt):
- new_txt, new_field = m.group(1,2)
- # $$foo --> $foo
- if new_field.startswith("$"):
- txt += new_txt + new_field
- else:
- yield (txt + new_txt, new_field, "", None)
- txt = ""
- continue_from = m.end()
-
- # Re-yield the {foo} style pattern
- yield (txt + literal_txt[continue_from:], field_name, format_spec, conversion)
-
-#-----------------------------------------------------------------------------
-# Utils to columnize a list of string
-#-----------------------------------------------------------------------------
-
-def _col_chunks(l, max_rows, row_first=False):
- """Yield successive max_rows-sized column chunks from l."""
- if row_first:
- ncols = (len(l) // max_rows) + (len(l) % max_rows > 0)
- for i in py3compat.xrange(ncols):
- yield [l[j] for j in py3compat.xrange(i, len(l), ncols)]
- else:
- for i in py3compat.xrange(0, len(l), max_rows):
- yield l[i:(i + max_rows)]
-
-
-def _find_optimal(rlist, row_first=False, separator_size=2, displaywidth=80):
- """Calculate optimal info to columnize a list of string"""
- for max_rows in range(1, len(rlist) + 1):
- col_widths = list(map(max, _col_chunks(rlist, max_rows, row_first)))
- sumlength = sum(col_widths)
- ncols = len(col_widths)
- if sumlength + separator_size * (ncols - 1) <= displaywidth:
- break
- return {'num_columns': ncols,
- 'optimal_separator_width': (displaywidth - sumlength) / (ncols - 1) if (ncols - 1) else 0,
- 'max_rows': max_rows,
- 'column_widths': col_widths
- }
-
-
-def _get_or_default(mylist, i, default=None):
- """return list item number, or default if don't exist"""
- if i >= len(mylist):
- return default
- else :
- return mylist[i]
-
-
-def compute_item_matrix(items, row_first=False, empty=None, *args, **kwargs) :
- """Returns a nested list, and info to columnize items
-
- Parameters
- ----------
-
- items
- list of strings to columize
- row_first : (default False)
- Whether to compute columns for a row-first matrix instead of
- column-first (default).
- empty : (default None)
- default value to fill list if needed
- separator_size : int (default=2)
- How much caracters will be used as a separation between each columns.
- displaywidth : int (default=80)
- The width of the area onto wich the columns should enter
-
- Returns
- -------
-
- strings_matrix
-
- nested list of string, the outer most list contains as many list as
- rows, the innermost lists have each as many element as colums. If the
- total number of elements in `items` does not equal the product of
- rows*columns, the last element of some lists are filled with `None`.
-
- dict_info
- some info to make columnize easier:
-
- num_columns
- number of columns
- max_rows
- maximum number of rows (final number may be less)
- column_widths
- list of with of each columns
- optimal_separator_width
- best separator width between columns
-
- Examples
- --------
- ::
-
- In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l']
- ...: compute_item_matrix(l, displaywidth=12)
- Out[1]:
- ([['aaa', 'f', 'k'],
- ['b', 'g', 'l'],
- ['cc', 'h', None],
- ['d', 'i', None],
- ['eeeee', 'j', None]],
- {'num_columns': 3,
- 'column_widths': [5, 1, 1],
- 'optimal_separator_width': 2,
- 'max_rows': 5})
- """
- info = _find_optimal(list(map(len, items)), row_first, *args, **kwargs)
- nrow, ncol = info['max_rows'], info['num_columns']
- if row_first:
- return ([[_get_or_default(items, r * ncol + c, default=empty) for c in range(ncol)] for r in range(nrow)], info)
- else:
- return ([[_get_or_default(items, c * nrow + r, default=empty) for c in range(ncol)] for r in range(nrow)], info)
-
-
-def columnize(items, row_first=False, separator=' ', displaywidth=80, spread=False):
- """ Transform a list of strings into a single string with columns.
-
- Parameters
- ----------
- items : sequence of strings
- The strings to process.
-
- row_first : (default False)
- Whether to compute columns for a row-first matrix instead of
- column-first (default).
-
- separator : str, optional [default is two spaces]
- The string that separates columns.
-
- displaywidth : int, optional [default is 80]
- Width of the display in number of characters.
-
- Returns
- -------
- The formatted string.
- """
- if not items:
- return '\n'
- matrix, info = compute_item_matrix(items, row_first=row_first, separator_size=len(separator), displaywidth=displaywidth)
- if spread:
- separator = separator.ljust(int(info['optimal_separator_width']))
- fmatrix = [filter(None, x) for x in matrix]
- sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['column_widths'])])
- return '\n'.join(map(sjoin, fmatrix))+'\n'
-
-
-def get_text_list(list_, last_sep=' and ', sep=", ", wrap_item_with=""):
- """
- Return a string with a natural enumeration of items
-
- >>> get_text_list(['a', 'b', 'c', 'd'])
- 'a, b, c and d'
- >>> get_text_list(['a', 'b', 'c'], ' or ')
- 'a, b or c'
- >>> get_text_list(['a', 'b', 'c'], ', ')
- 'a, b, c'
- >>> get_text_list(['a', 'b'], ' or ')
- 'a or b'
- >>> get_text_list(['a'])
- 'a'
- >>> get_text_list([])
- ''
- >>> get_text_list(['a', 'b'], wrap_item_with="`")
- '`a` and `b`'
- >>> get_text_list(['a', 'b', 'c', 'd'], " = ", sep=" + ")
- 'a + b + c = d'
- """
- if len(list_) == 0:
- return ''
- if wrap_item_with:
- list_ = ['%s%s%s' % (wrap_item_with, item, wrap_item_with) for
- item in list_]
- if len(list_) == 1:
- return list_[0]
- return '%s%s%s' % (
- sep.join(i for i in list_[:-1]),
- last_sep, list_[-1])
+ return self.__paths
+
+ p = paths = property(get_paths)
+
+ def grep(self, pattern, prune = False, field = None):
+ """ Return all strings matching 'pattern' (a regex or callable)
+
+ This is case-insensitive. If prune is true, return all items
+ NOT matching the pattern.
+
+ If field is specified, the match must occur in the specified
+ whitespace-separated field.
+
+ Examples::
+
+ a.grep( lambda x: x.startswith('C') )
+ a.grep('Cha.*log', prune=1)
+ a.grep('chm', field=-1)
+ """
+
+ def match_target(s):
+ if field is None:
+ return s
+ parts = s.split()
+ try:
+ tgt = parts[field]
+ return tgt
+ except IndexError:
+ return ""
+
+ if isinstance(pattern, py3compat.string_types):
+ pred = lambda x : re.search(pattern, x, re.IGNORECASE)
+ else:
+ pred = pattern
+ if not prune:
+ return SList([el for el in self if pred(match_target(el))])
+ else:
+ return SList([el for el in self if not pred(match_target(el))])
+
+ def fields(self, *fields):
+ """ Collect whitespace-separated fields from string list
+
+ Allows quick awk-like usage of string lists.
+
+ Example data (in var a, created by 'a = !ls -l')::
+
+ -rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog
+ drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython
+
+ * ``a.fields(0)`` is ``['-rwxrwxrwx', 'drwxrwxrwx+']``
+ * ``a.fields(1,0)`` is ``['1 -rwxrwxrwx', '6 drwxrwxrwx+']``
+ (note the joining by space).
+ * ``a.fields(-1)`` is ``['ChangeLog', 'IPython']``
+
+ IndexErrors are ignored.
+
+ Without args, fields() just split()'s the strings.
+ """
+ if len(fields) == 0:
+ return [el.split() for el in self]
+
+ res = SList()
+ for el in [f.split() for f in self]:
+ lineparts = []
+
+ for fd in fields:
+ try:
+ lineparts.append(el[fd])
+ except IndexError:
+ pass
+ if lineparts:
+ res.append(" ".join(lineparts))
+
+ return res
+
+ def sort(self,field= None, nums = False):
+ """ sort by specified fields (see fields())
+
+ Example::
+
+ a.sort(1, nums = True)
+
+ Sorts a by second field, in numerical order (so that 21 > 3)
+
+ """
+
+ #decorate, sort, undecorate
+ if field is not None:
+ dsu = [[SList([line]).fields(field), line] for line in self]
+ else:
+ dsu = [[line, line] for line in self]
+ if nums:
+ for i in range(len(dsu)):
+ numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()])
+ try:
+ n = int(numstr)
+ except ValueError:
+ n = 0
+ dsu[i][0] = n
+
+
+ dsu.sort()
+ return SList([t[1] for t in dsu])
+
+
+# FIXME: We need to reimplement type specific displayhook and then add this
+# back as a custom printer. This should also be moved outside utils into the
+# core.
+
+# def print_slist(arg):
+# """ Prettier (non-repr-like) and more informative printer for SList """
+# print "SList (.p, .n, .l, .s, .grep(), .fields(), sort() available):"
+# if hasattr(arg, 'hideonce') and arg.hideonce:
+# arg.hideonce = False
+# return
+#
+# nlprint(arg) # This was a nested list printer, now removed.
+#
+# print_slist = result_display.when_type(SList)(print_slist)
+
+
+def indent(instr,nspaces=4, ntabs=0, flatten=False):
+ """Indent a string a given number of spaces or tabstops.
+
+ indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
+
+ Parameters
+ ----------
+
+ instr : basestring
+ The string to be indented.
+ nspaces : int (default: 4)
+ The number of spaces to be indented.
+ ntabs : int (default: 0)
+ The number of tabs to be indented.
+ flatten : bool (default: False)
+ Whether to scrub existing indentation. If True, all lines will be
+ aligned to the same indentation. If False, existing indentation will
+ be strictly increased.
+
+ Returns
+ -------
+
+ str|unicode : string indented by ntabs and nspaces.
+
+ """
+ if instr is None:
+ return
+ ind = '\t'*ntabs+' '*nspaces
+ if flatten:
+ pat = re.compile(r'^\s*', re.MULTILINE)
+ else:
+ pat = re.compile(r'^', re.MULTILINE)
+ outstr = re.sub(pat, ind, instr)
+ if outstr.endswith(os.linesep+ind):
+ return outstr[:-len(ind)]
+ else:
+ return outstr
+
+
+def list_strings(arg):
+ """Always return a list of strings, given a string or list of strings
+ as input.
+
+ Examples
+ --------
+ ::
+
+ In [7]: list_strings('A single string')
+ Out[7]: ['A single string']
+
+ In [8]: list_strings(['A single string in a list'])
+ Out[8]: ['A single string in a list']
+
+ In [9]: list_strings(['A','list','of','strings'])
+ Out[9]: ['A', 'list', 'of', 'strings']
+ """
+
+ if isinstance(arg, py3compat.string_types): return [arg]
+ else: return arg
+
+
+def marquee(txt='',width=78,mark='*'):
+ """Return the input string centered in a 'marquee'.
+
+ Examples
+ --------
+ ::
+
+ In [16]: marquee('A test',40)
+ Out[16]: '**************** A test ****************'
+
+ In [17]: marquee('A test',40,'-')
+ Out[17]: '---------------- A test ----------------'
+
+ In [18]: marquee('A test',40,' ')
+ Out[18]: ' A test '
+
+ """
+ if not txt:
+ return (mark*width)[:width]
+ nmark = (width-len(txt)-2)//len(mark)//2
+ if nmark < 0: nmark =0
+ marks = mark*nmark
+ return '%s %s %s' % (marks,txt,marks)
+
+
+ini_spaces_re = re.compile(r'^(\s+)')
+
+def num_ini_spaces(strng):
+ """Return the number of initial spaces in a string"""
+
+ ini_spaces = ini_spaces_re.match(strng)
+ if ini_spaces:
+ return ini_spaces.end()
+ else:
+ return 0
+
+
+def format_screen(strng):
+ """Format a string for screen printing.
+
+ This removes some latex-type format codes."""
+ # Paragraph continue
+ par_re = re.compile(r'\\$',re.MULTILINE)
+ strng = par_re.sub('',strng)
+ return strng
+
+
+def dedent(text):
+ """Equivalent of textwrap.dedent that ignores unindented first line.
+
+ This means it will still dedent strings like:
+ '''foo
+ is a bar
+ '''
+
+ For use in wrap_paragraphs.
+ """
+
+ if text.startswith('\n'):
+ # text starts with blank line, don't ignore the first line
+ return textwrap.dedent(text)
+
+ # split first line
+ splits = text.split('\n',1)
+ if len(splits) == 1:
+ # only one line
+ return textwrap.dedent(text)
+
+ first, rest = splits
+ # dedent everything but the first line
+ rest = textwrap.dedent(rest)
+ return '\n'.join([first, rest])
+
+
+def wrap_paragraphs(text, ncols=80):
+ """Wrap multiple paragraphs to fit a specified width.
+
+ This is equivalent to textwrap.wrap, but with support for multiple
+ paragraphs, as separated by empty lines.
+
+ Returns
+ -------
+
+ list of complete paragraphs, wrapped to fill `ncols` columns.
+ """
+ paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE)
+ text = dedent(text).strip()
+ paragraphs = paragraph_re.split(text)[::2] # every other entry is space
+ out_ps = []
+ indent_re = re.compile(r'\n\s+', re.MULTILINE)
+ for p in paragraphs:
+ # presume indentation that survives dedent is meaningful formatting,
+ # so don't fill unless text is flush.
+ if indent_re.search(p) is None:
+ # wrap paragraph
+ p = textwrap.fill(p, ncols)
+ out_ps.append(p)
+ return out_ps
+
+
+def long_substr(data):
+ """Return the longest common substring in a list of strings.
+
+ Credit: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
+ """
+ substr = ''
+ if len(data) > 1 and len(data[0]) > 0:
+ for i in range(len(data[0])):
+ for j in range(len(data[0])-i+1):
+ if j > len(substr) and all(data[0][i:i+j] in x for x in data):
+ substr = data[0][i:i+j]
+ elif len(data) == 1:
+ substr = data[0]
+ return substr
+
+
+def strip_email_quotes(text):
+ """Strip leading email quotation characters ('>').
+
+ Removes any combination of leading '>' interspersed with whitespace that
+ appears *identically* in all lines of the input text.
+
+ Parameters
+ ----------
+ text : str
+
+ Examples
+ --------
+
+ Simple uses::
+
+ In [2]: strip_email_quotes('> > text')
+ Out[2]: 'text'
+
+ In [3]: strip_email_quotes('> > text\\n> > more')
+ Out[3]: 'text\\nmore'
+
+ Note how only the common prefix that appears in all lines is stripped::
+
+ In [4]: strip_email_quotes('> > text\\n> > more\\n> more...')
+ Out[4]: '> text\\n> more\\nmore...'
+
+ So if any line has no quote marks ('>') , then none are stripped from any
+ of them ::
+
+ In [5]: strip_email_quotes('> > text\\n> > more\\nlast different')
+ Out[5]: '> > text\\n> > more\\nlast different'
+ """
+ lines = text.splitlines()
+ matches = set()
+ for line in lines:
+ prefix = re.match(r'^(\s*>[ >]*)', line)
+ if prefix:
+ matches.add(prefix.group(1))
+ else:
+ break
+ else:
+ prefix = long_substr(list(matches))
+ if prefix:
+ strip = len(prefix)
+ text = '\n'.join([ ln[strip:] for ln in lines])
+ return text
+
+def strip_ansi(source):
+ """
+ Remove ansi escape codes from text.
+
+ Parameters
+ ----------
+ source : str
+ Source to remove the ansi from
+ """
+ return re.sub(r'\033\[(\d|;)+?m', '', source)
+
+
+class EvalFormatter(Formatter):
+ """A String Formatter that allows evaluation of simple expressions.
+
+ Note that this version interprets a : as specifying a format string (as per
+ standard string formatting), so if slicing is required, you must explicitly
+ create a slice.
+
+ This is to be used in templating cases, such as the parallel batch
+ script templates, where simple arithmetic on arguments is useful.
+
+ Examples
+ --------
+ ::
+
+ In [1]: f = EvalFormatter()
+ In [2]: f.format('{n//4}', n=8)
+ Out[2]: '2'
+
+ In [3]: f.format("{greeting[slice(2,4)]}", greeting="Hello")
+ Out[3]: 'll'
+ """
+ def get_field(self, name, args, kwargs):
+ v = eval(name, kwargs)
+ return v, name
+
+#XXX: As of Python 3.4, the format string parsing no longer splits on a colon
+# inside [], so EvalFormatter can handle slicing. Once we only support 3.4 and
+# above, it should be possible to remove FullEvalFormatter.
+
+@skip_doctest_py3
+class FullEvalFormatter(Formatter):
+ """A String Formatter that allows evaluation of simple expressions.
+
+ Any time a format key is not found in the kwargs,
+ it will be tried as an expression in the kwargs namespace.
+
+ Note that this version allows slicing using [1:2], so you cannot specify
+ a format string. Use :class:`EvalFormatter` to permit format strings.
+
+ Examples
+ --------
+ ::
+
+ In [1]: f = FullEvalFormatter()
+ In [2]: f.format('{n//4}', n=8)
+ Out[2]: u'2'
+
+ In [3]: f.format('{list(range(5))[2:4]}')
+ Out[3]: u'[2, 3]'
+
+ In [4]: f.format('{3*2}')
+ Out[4]: u'6'
+ """
+ # copied from Formatter._vformat with minor changes to allow eval
+ # and replace the format_spec code with slicing
+ def vformat(self, format_string, args, kwargs):
+ result = []
+ for literal_text, field_name, format_spec, conversion in \
+ self.parse(format_string):
+
+ # output the literal text
+ if literal_text:
+ result.append(literal_text)
+
+ # if there's a field, output it
+ if field_name is not None:
+ # this is some markup, find the object and do
+ # the formatting
+
+ if format_spec:
+ # override format spec, to allow slicing:
+ field_name = ':'.join([field_name, format_spec])
+
+ # eval the contents of the field for the object
+ # to be formatted
+ obj = eval(field_name, kwargs)
+
+ # do any conversion on the resulting object
+ obj = self.convert_field(obj, conversion)
+
+ # format the object and append to the result
+ result.append(self.format_field(obj, ''))
+
+ return u''.join(py3compat.cast_unicode(s) for s in result)
+
+
+@skip_doctest_py3
+class DollarFormatter(FullEvalFormatter):
+ """Formatter allowing Itpl style $foo replacement, for names and attribute
+ access only. Standard {foo} replacement also works, and allows full
+ evaluation of its arguments.
+
+ Examples
+ --------
+ ::
+
+ In [1]: f = DollarFormatter()
+ In [2]: f.format('{n//4}', n=8)
+ Out[2]: u'2'
+
+ In [3]: f.format('23 * 76 is $result', result=23*76)
+ Out[3]: u'23 * 76 is 1748'
+
+ In [4]: f.format('$a or {b}', a=1, b=2)
+ Out[4]: u'1 or 2'
+ """
+ _dollar_pattern = re.compile("(.*?)\$(\$?[\w\.]+)")
+ def parse(self, fmt_string):
+ for literal_txt, field_name, format_spec, conversion \
+ in Formatter.parse(self, fmt_string):
+
+ # Find $foo patterns in the literal text.
+ continue_from = 0
+ txt = ""
+ for m in self._dollar_pattern.finditer(literal_txt):
+ new_txt, new_field = m.group(1,2)
+ # $$foo --> $foo
+ if new_field.startswith("$"):
+ txt += new_txt + new_field
+ else:
+ yield (txt + new_txt, new_field, "", None)
+ txt = ""
+ continue_from = m.end()
+
+ # Re-yield the {foo} style pattern
+ yield (txt + literal_txt[continue_from:], field_name, format_spec, conversion)
+
+#-----------------------------------------------------------------------------
+# Utils to columnize a list of string
+#-----------------------------------------------------------------------------
+
+def _col_chunks(l, max_rows, row_first=False):
+ """Yield successive max_rows-sized column chunks from l."""
+ if row_first:
+ ncols = (len(l) // max_rows) + (len(l) % max_rows > 0)
+ for i in py3compat.xrange(ncols):
+ yield [l[j] for j in py3compat.xrange(i, len(l), ncols)]
+ else:
+ for i in py3compat.xrange(0, len(l), max_rows):
+ yield l[i:(i + max_rows)]
+
+
+def _find_optimal(rlist, row_first=False, separator_size=2, displaywidth=80):
+ """Calculate optimal info to columnize a list of string"""
+ for max_rows in range(1, len(rlist) + 1):
+ col_widths = list(map(max, _col_chunks(rlist, max_rows, row_first)))
+ sumlength = sum(col_widths)
+ ncols = len(col_widths)
+ if sumlength + separator_size * (ncols - 1) <= displaywidth:
+ break
+ return {'num_columns': ncols,
+ 'optimal_separator_width': (displaywidth - sumlength) / (ncols - 1) if (ncols - 1) else 0,
+ 'max_rows': max_rows,
+ 'column_widths': col_widths
+ }
+
+
+def _get_or_default(mylist, i, default=None):
+ """return list item number, or default if don't exist"""
+ if i >= len(mylist):
+ return default
+ else :
+ return mylist[i]
+
+
+def compute_item_matrix(items, row_first=False, empty=None, *args, **kwargs) :
+ """Returns a nested list, and info to columnize items
+
+ Parameters
+ ----------
+
+ items
+ list of strings to columize
+ row_first : (default False)
+ Whether to compute columns for a row-first matrix instead of
+ column-first (default).
+ empty : (default None)
+ default value to fill list if needed
+ separator_size : int (default=2)
+ How much caracters will be used as a separation between each columns.
+ displaywidth : int (default=80)
+ The width of the area onto wich the columns should enter
+
+ Returns
+ -------
+
+ strings_matrix
+
+ nested list of string, the outer most list contains as many list as
+ rows, the innermost lists have each as many element as colums. If the
+ total number of elements in `items` does not equal the product of
+ rows*columns, the last element of some lists are filled with `None`.
+
+ dict_info
+ some info to make columnize easier:
+
+ num_columns
+ number of columns
+ max_rows
+ maximum number of rows (final number may be less)
+ column_widths
+ list of with of each columns
+ optimal_separator_width
+ best separator width between columns
+
+ Examples
+ --------
+ ::
+
+ In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l']
+ ...: compute_item_matrix(l, displaywidth=12)
+ Out[1]:
+ ([['aaa', 'f', 'k'],
+ ['b', 'g', 'l'],
+ ['cc', 'h', None],
+ ['d', 'i', None],
+ ['eeeee', 'j', None]],
+ {'num_columns': 3,
+ 'column_widths': [5, 1, 1],
+ 'optimal_separator_width': 2,
+ 'max_rows': 5})
+ """
+ info = _find_optimal(list(map(len, items)), row_first, *args, **kwargs)
+ nrow, ncol = info['max_rows'], info['num_columns']
+ if row_first:
+ return ([[_get_or_default(items, r * ncol + c, default=empty) for c in range(ncol)] for r in range(nrow)], info)
+ else:
+ return ([[_get_or_default(items, c * nrow + r, default=empty) for c in range(ncol)] for r in range(nrow)], info)
+
+
+def columnize(items, row_first=False, separator=' ', displaywidth=80, spread=False):
+ """ Transform a list of strings into a single string with columns.
+
+ Parameters
+ ----------
+ items : sequence of strings
+ The strings to process.
+
+ row_first : (default False)
+ Whether to compute columns for a row-first matrix instead of
+ column-first (default).
+
+ separator : str, optional [default is two spaces]
+ The string that separates columns.
+
+ displaywidth : int, optional [default is 80]
+ Width of the display in number of characters.
+
+ Returns
+ -------
+ The formatted string.
+ """
+ if not items:
+ return '\n'
+ matrix, info = compute_item_matrix(items, row_first=row_first, separator_size=len(separator), displaywidth=displaywidth)
+ if spread:
+ separator = separator.ljust(int(info['optimal_separator_width']))
+ fmatrix = [filter(None, x) for x in matrix]
+ sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['column_widths'])])
+ return '\n'.join(map(sjoin, fmatrix))+'\n'
+
+
+def get_text_list(list_, last_sep=' and ', sep=", ", wrap_item_with=""):
+ """
+ Return a string with a natural enumeration of items
+
+ >>> get_text_list(['a', 'b', 'c', 'd'])
+ 'a, b, c and d'
+ >>> get_text_list(['a', 'b', 'c'], ' or ')
+ 'a, b or c'
+ >>> get_text_list(['a', 'b', 'c'], ', ')
+ 'a, b, c'
+ >>> get_text_list(['a', 'b'], ' or ')
+ 'a or b'
+ >>> get_text_list(['a'])
+ 'a'
+ >>> get_text_list([])
+ ''
+ >>> get_text_list(['a', 'b'], wrap_item_with="`")
+ '`a` and `b`'
+ >>> get_text_list(['a', 'b', 'c', 'd'], " = ", sep=" + ")
+ 'a + b + c = d'
+ """
+ if len(list_) == 0:
+ return ''
+ if wrap_item_with:
+ list_ = ['%s%s%s' % (wrap_item_with, item, wrap_item_with) for
+ item in list_]
+ if len(list_) == 1:
+ return list_[0]
+ return '%s%s%s' % (
+ sep.join(i for i in list_[:-1]),
+ last_sep, list_[-1])
diff --git a/contrib/python/ipython/py2/IPython/utils/timing.py b/contrib/python/ipython/py2/IPython/utils/timing.py
index ff88bf664d..99b7bbc59a 100644
--- a/contrib/python/ipython/py2/IPython/utils/timing.py
+++ b/contrib/python/ipython/py2/IPython/utils/timing.py
@@ -1,118 +1,118 @@
-# encoding: utf-8
-"""
-Utilities for timing code execution.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-import time
-
-from .py3compat import xrange
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-# If possible (Unix), use the resource module instead of time.clock()
-try:
- import resource
- def clocku():
- """clocku() -> floating point number
-
- Return the *USER* CPU time in seconds since the start of the process.
- This is done via a call to resource.getrusage, so it avoids the
- wraparound problems in time.clock()."""
-
- return resource.getrusage(resource.RUSAGE_SELF)[0]
-
- def clocks():
- """clocks() -> floating point number
-
- Return the *SYSTEM* CPU time in seconds since the start of the process.
- This is done via a call to resource.getrusage, so it avoids the
- wraparound problems in time.clock()."""
-
- return resource.getrusage(resource.RUSAGE_SELF)[1]
-
- def clock():
- """clock() -> floating point number
-
- Return the *TOTAL USER+SYSTEM* CPU time in seconds since the start of
- the process. This is done via a call to resource.getrusage, so it
- avoids the wraparound problems in time.clock()."""
-
- u,s = resource.getrusage(resource.RUSAGE_SELF)[:2]
- return u+s
-
- def clock2():
- """clock2() -> (t_user,t_system)
-
- Similar to clock(), but return a tuple of user/system times."""
- return resource.getrusage(resource.RUSAGE_SELF)[:2]
-except ImportError:
- # There is no distinction of user/system time under windows, so we just use
- # time.clock() for everything...
- clocku = clocks = clock = time.clock
- def clock2():
- """Under windows, system CPU time can't be measured.
-
- This just returns clock() and zero."""
- return time.clock(),0.0
-
-
-def timings_out(reps,func,*args,**kw):
- """timings_out(reps,func,*args,**kw) -> (t_total,t_per_call,output)
-
- Execute a function reps times, return a tuple with the elapsed total
- CPU time in seconds, the time per call and the function's output.
-
- Under Unix, the return value is the sum of user+system time consumed by
- the process, computed via the resource module. This prevents problems
- related to the wraparound effect which the time.clock() function has.
-
- Under Windows the return value is in wall clock seconds. See the
- documentation for the time module for more details."""
-
- reps = int(reps)
- assert reps >=1, 'reps must be >= 1'
- if reps==1:
- start = clock()
- out = func(*args,**kw)
- tot_time = clock()-start
- else:
- rng = xrange(reps-1) # the last time is executed separately to store output
- start = clock()
- for dummy in rng: func(*args,**kw)
- out = func(*args,**kw) # one last time
- tot_time = clock()-start
- av_time = tot_time / reps
- return tot_time,av_time,out
-
-
-def timings(reps,func,*args,**kw):
- """timings(reps,func,*args,**kw) -> (t_total,t_per_call)
-
- Execute a function reps times, return a tuple with the elapsed total CPU
- time in seconds and the time per call. These are just the first two values
- in timings_out()."""
-
- return timings_out(reps,func,*args,**kw)[0:2]
-
-
-def timing(func,*args,**kw):
- """timing(func,*args,**kw) -> t_total
-
- Execute a function once, return the elapsed total CPU time in
- seconds. This is just the first value in timings_out()."""
-
- return timings_out(1,func,*args,**kw)[0]
-
+# encoding: utf-8
+"""
+Utilities for timing code execution.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2008-2011 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+import time
+
+from .py3compat import xrange
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+# If possible (Unix), use the resource module instead of time.clock()
+try:
+ import resource
+ def clocku():
+ """clocku() -> floating point number
+
+ Return the *USER* CPU time in seconds since the start of the process.
+ This is done via a call to resource.getrusage, so it avoids the
+ wraparound problems in time.clock()."""
+
+ return resource.getrusage(resource.RUSAGE_SELF)[0]
+
+ def clocks():
+ """clocks() -> floating point number
+
+ Return the *SYSTEM* CPU time in seconds since the start of the process.
+ This is done via a call to resource.getrusage, so it avoids the
+ wraparound problems in time.clock()."""
+
+ return resource.getrusage(resource.RUSAGE_SELF)[1]
+
+ def clock():
+ """clock() -> floating point number
+
+ Return the *TOTAL USER+SYSTEM* CPU time in seconds since the start of
+ the process. This is done via a call to resource.getrusage, so it
+ avoids the wraparound problems in time.clock()."""
+
+ u,s = resource.getrusage(resource.RUSAGE_SELF)[:2]
+ return u+s
+
+ def clock2():
+ """clock2() -> (t_user,t_system)
+
+ Similar to clock(), but return a tuple of user/system times."""
+ return resource.getrusage(resource.RUSAGE_SELF)[:2]
+except ImportError:
+ # There is no distinction of user/system time under windows, so we just use
+ # time.clock() for everything...
+ clocku = clocks = clock = time.clock
+ def clock2():
+ """Under windows, system CPU time can't be measured.
+
+ This just returns clock() and zero."""
+ return time.clock(),0.0
+
+
+def timings_out(reps,func,*args,**kw):
+ """timings_out(reps,func,*args,**kw) -> (t_total,t_per_call,output)
+
+ Execute a function reps times, return a tuple with the elapsed total
+ CPU time in seconds, the time per call and the function's output.
+
+ Under Unix, the return value is the sum of user+system time consumed by
+ the process, computed via the resource module. This prevents problems
+ related to the wraparound effect which the time.clock() function has.
+
+ Under Windows the return value is in wall clock seconds. See the
+ documentation for the time module for more details."""
+
+ reps = int(reps)
+ assert reps >=1, 'reps must be >= 1'
+ if reps==1:
+ start = clock()
+ out = func(*args,**kw)
+ tot_time = clock()-start
+ else:
+ rng = xrange(reps-1) # the last time is executed separately to store output
+ start = clock()
+ for dummy in rng: func(*args,**kw)
+ out = func(*args,**kw) # one last time
+ tot_time = clock()-start
+ av_time = tot_time / reps
+ return tot_time,av_time,out
+
+
+def timings(reps,func,*args,**kw):
+ """timings(reps,func,*args,**kw) -> (t_total,t_per_call)
+
+ Execute a function reps times, return a tuple with the elapsed total CPU
+ time in seconds and the time per call. These are just the first two values
+ in timings_out()."""
+
+ return timings_out(reps,func,*args,**kw)[0:2]
+
+
+def timing(func,*args,**kw):
+ """timing(func,*args,**kw) -> t_total
+
+ Execute a function once, return the elapsed total CPU time in
+ seconds. This is just the first value in timings_out()."""
+
+ return timings_out(1,func,*args,**kw)[0]
+
diff --git a/contrib/python/ipython/py2/IPython/utils/tokenize2.py b/contrib/python/ipython/py2/IPython/utils/tokenize2.py
index 7e60a8a629..cbb5292e5a 100644
--- a/contrib/python/ipython/py2/IPython/utils/tokenize2.py
+++ b/contrib/python/ipython/py2/IPython/utils/tokenize2.py
@@ -1,9 +1,9 @@
-"""Load our patched versions of tokenize.
-"""
-
-import sys
-
-if sys.version_info[0] >= 3:
- from ._tokenize_py3 import *
-else:
- from ._tokenize_py2 import *
+"""Load our patched versions of tokenize.
+"""
+
+import sys
+
+if sys.version_info[0] >= 3:
+ from ._tokenize_py3 import *
+else:
+ from ._tokenize_py2 import *
diff --git a/contrib/python/ipython/py2/IPython/utils/tokenutil.py b/contrib/python/ipython/py2/IPython/utils/tokenutil.py
index 940da98d3d..f52d3b7658 100644
--- a/contrib/python/ipython/py2/IPython/utils/tokenutil.py
+++ b/contrib/python/ipython/py2/IPython/utils/tokenutil.py
@@ -1,128 +1,128 @@
-"""Token-related utilities"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import absolute_import, print_function
-
-from collections import namedtuple
-from io import StringIO
-from keyword import iskeyword
-
-from . import tokenize2
-from .py3compat import cast_unicode_py2
-
-Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
-
-def generate_tokens(readline):
- """wrap generate_tokens to catch EOF errors"""
- try:
- for token in tokenize2.generate_tokens(readline):
- yield token
- except tokenize2.TokenError:
- # catch EOF error
- return
-
-def line_at_cursor(cell, cursor_pos=0):
- """Return the line in a cell at a given cursor position
-
- Used for calling line-based APIs that don't support multi-line input, yet.
-
- Parameters
- ----------
-
+"""Token-related utilities"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import absolute_import, print_function
+
+from collections import namedtuple
+from io import StringIO
+from keyword import iskeyword
+
+from . import tokenize2
+from .py3compat import cast_unicode_py2
+
+Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
+
+def generate_tokens(readline):
+ """wrap generate_tokens to catch EOF errors"""
+ try:
+ for token in tokenize2.generate_tokens(readline):
+ yield token
+ except tokenize2.TokenError:
+ # catch EOF error
+ return
+
+def line_at_cursor(cell, cursor_pos=0):
+ """Return the line in a cell at a given cursor position
+
+ Used for calling line-based APIs that don't support multi-line input, yet.
+
+ Parameters
+ ----------
+
cell: str
- multiline block of text
- cursor_pos: integer
- the cursor position
-
- Returns
- -------
-
- (line, offset): (text, integer)
- The line with the current cursor, and the character offset of the start of the line.
- """
- offset = 0
- lines = cell.splitlines(True)
- for line in lines:
- next_offset = offset + len(line)
- if next_offset >= cursor_pos:
- break
- offset = next_offset
- else:
- line = ""
- return (line, offset)
-
-def token_at_cursor(cell, cursor_pos=0):
- """Get the token at a given cursor
-
- Used for introspection.
-
- Function calls are prioritized, so the token for the callable will be returned
- if the cursor is anywhere inside the call.
-
- Parameters
- ----------
-
- cell : unicode
- A block of Python code
- cursor_pos : int
- The location of the cursor in the block where the token should be found
- """
- cell = cast_unicode_py2(cell)
- names = []
- tokens = []
- call_names = []
-
- offsets = {1: 0} # lines start at 1
- for tup in generate_tokens(StringIO(cell).readline):
-
- tok = Token(*tup)
-
- # token, text, start, end, line = tup
- start_line, start_col = tok.start
- end_line, end_col = tok.end
- if end_line + 1 not in offsets:
- # keep track of offsets for each line
- lines = tok.line.splitlines(True)
- for lineno, line in zip(range(start_line + 1, end_line + 2), lines):
- if lineno not in offsets:
- offsets[lineno] = offsets[lineno-1] + len(line)
-
- offset = offsets[start_line]
- # allow '|foo' to find 'foo' at the beginning of a line
- boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
- if offset + start_col >= boundary:
- # current token starts after the cursor,
- # don't consume it
- break
-
- if tok.token == tokenize2.NAME and not iskeyword(tok.text):
- if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
- names[-1] = "%s.%s" % (names[-1], tok.text)
- else:
- names.append(tok.text)
- elif tok.token == tokenize2.OP:
- if tok.text == '=' and names:
- # don't inspect the lhs of an assignment
- names.pop(-1)
- if tok.text == '(' and names:
- # if we are inside a function call, inspect the function
- call_names.append(names[-1])
- elif tok.text == ')' and call_names:
- call_names.pop(-1)
-
- tokens.append(tok)
-
- if offsets[end_line] + end_col > cursor_pos:
- # we found the cursor, stop reading
- break
-
- if call_names:
- return call_names[-1]
- elif names:
- return names[-1]
- else:
- return ''
-
-
+ multiline block of text
+ cursor_pos: integer
+ the cursor position
+
+ Returns
+ -------
+
+ (line, offset): (text, integer)
+ The line with the current cursor, and the character offset of the start of the line.
+ """
+ offset = 0
+ lines = cell.splitlines(True)
+ for line in lines:
+ next_offset = offset + len(line)
+ if next_offset >= cursor_pos:
+ break
+ offset = next_offset
+ else:
+ line = ""
+ return (line, offset)
+
+def token_at_cursor(cell, cursor_pos=0):
+ """Get the token at a given cursor
+
+ Used for introspection.
+
+ Function calls are prioritized, so the token for the callable will be returned
+ if the cursor is anywhere inside the call.
+
+ Parameters
+ ----------
+
+ cell : unicode
+ A block of Python code
+ cursor_pos : int
+ The location of the cursor in the block where the token should be found
+ """
+ cell = cast_unicode_py2(cell)
+ names = []
+ tokens = []
+ call_names = []
+
+ offsets = {1: 0} # lines start at 1
+ for tup in generate_tokens(StringIO(cell).readline):
+
+ tok = Token(*tup)
+
+ # token, text, start, end, line = tup
+ start_line, start_col = tok.start
+ end_line, end_col = tok.end
+ if end_line + 1 not in offsets:
+ # keep track of offsets for each line
+ lines = tok.line.splitlines(True)
+ for lineno, line in zip(range(start_line + 1, end_line + 2), lines):
+ if lineno not in offsets:
+ offsets[lineno] = offsets[lineno-1] + len(line)
+
+ offset = offsets[start_line]
+ # allow '|foo' to find 'foo' at the beginning of a line
+ boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
+ if offset + start_col >= boundary:
+ # current token starts after the cursor,
+ # don't consume it
+ break
+
+ if tok.token == tokenize2.NAME and not iskeyword(tok.text):
+ if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
+ names[-1] = "%s.%s" % (names[-1], tok.text)
+ else:
+ names.append(tok.text)
+ elif tok.token == tokenize2.OP:
+ if tok.text == '=' and names:
+ # don't inspect the lhs of an assignment
+ names.pop(-1)
+ if tok.text == '(' and names:
+ # if we are inside a function call, inspect the function
+ call_names.append(names[-1])
+ elif tok.text == ')' and call_names:
+ call_names.pop(-1)
+
+ tokens.append(tok)
+
+ if offsets[end_line] + end_col > cursor_pos:
+ # we found the cursor, stop reading
+ break
+
+ if call_names:
+ return call_names[-1]
+ elif names:
+ return names[-1]
+ else:
+ return ''
+
+
diff --git a/contrib/python/ipython/py2/IPython/utils/traitlets.py b/contrib/python/ipython/py2/IPython/utils/traitlets.py
index 0ff664fb5c..b4ff7a2689 100644
--- a/contrib/python/ipython/py2/IPython/utils/traitlets.py
+++ b/contrib/python/ipython/py2/IPython/utils/traitlets.py
@@ -1,7 +1,7 @@
-from __future__ import absolute_import
-
-from warnings import warn
-
-warn("IPython.utils.traitlets has moved to a top-level traitlets package.")
-
-from traitlets import *
+from __future__ import absolute_import
+
+from warnings import warn
+
+warn("IPython.utils.traitlets has moved to a top-level traitlets package.")
+
+from traitlets import *
diff --git a/contrib/python/ipython/py2/IPython/utils/tz.py b/contrib/python/ipython/py2/IPython/utils/tz.py
index 14172b2f4a..b315d532d1 100644
--- a/contrib/python/ipython/py2/IPython/utils/tz.py
+++ b/contrib/python/ipython/py2/IPython/utils/tz.py
@@ -1,46 +1,46 @@
-# encoding: utf-8
-"""
-Timezone utilities
-
-Just UTC-awareness right now
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2013 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-from datetime import tzinfo, timedelta, datetime
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-# constant for zero offset
-ZERO = timedelta(0)
-
-class tzUTC(tzinfo):
- """tzinfo object for UTC (zero offset)"""
-
- def utcoffset(self, d):
- return ZERO
-
- def dst(self, d):
- return ZERO
-
-UTC = tzUTC()
-
-def utc_aware(unaware):
- """decorator for adding UTC tzinfo to datetime's utcfoo methods"""
- def utc_method(*args, **kwargs):
- dt = unaware(*args, **kwargs)
- return dt.replace(tzinfo=UTC)
- return utc_method
-
-utcfromtimestamp = utc_aware(datetime.utcfromtimestamp)
-utcnow = utc_aware(datetime.utcnow)
+# encoding: utf-8
+"""
+Timezone utilities
+
+Just UTC-awareness right now
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2013 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from datetime import tzinfo, timedelta, datetime
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+# constant for zero offset
+ZERO = timedelta(0)
+
+class tzUTC(tzinfo):
+ """tzinfo object for UTC (zero offset)"""
+
+ def utcoffset(self, d):
+ return ZERO
+
+ def dst(self, d):
+ return ZERO
+
+UTC = tzUTC()
+
+def utc_aware(unaware):
+ """decorator for adding UTC tzinfo to datetime's utcfoo methods"""
+ def utc_method(*args, **kwargs):
+ dt = unaware(*args, **kwargs)
+ return dt.replace(tzinfo=UTC)
+ return utc_method
+
+utcfromtimestamp = utc_aware(datetime.utcfromtimestamp)
+utcnow = utc_aware(datetime.utcnow)
diff --git a/contrib/python/ipython/py2/IPython/utils/ulinecache.py b/contrib/python/ipython/py2/IPython/utils/ulinecache.py
index 886454c267..f53b0dde69 100644
--- a/contrib/python/ipython/py2/IPython/utils/ulinecache.py
+++ b/contrib/python/ipython/py2/IPython/utils/ulinecache.py
@@ -1,45 +1,45 @@
-"""Wrapper around linecache which decodes files to unicode according to PEP 263.
-
-This is only needed for Python 2 - linecache in Python 3 does the same thing
-itself.
-"""
-import functools
-import linecache
-import sys
-
-from IPython.utils import py3compat
-from IPython.utils import openpy
-
-if py3compat.PY3:
- getline = linecache.getline
-
- # getlines has to be looked up at runtime, because doctests monkeypatch it.
- @functools.wraps(linecache.getlines)
- def getlines(filename, module_globals=None):
- return linecache.getlines(filename, module_globals=module_globals)
-
-else:
- def getlines(filename, module_globals=None):
- """Get the lines (as unicode) for a file from the cache.
- Update the cache if it doesn't contain an entry for this file already."""
- filename = py3compat.cast_bytes(filename, sys.getfilesystemencoding())
- lines = linecache.getlines(filename, module_globals=module_globals)
-
- # The bits we cache ourselves can be unicode.
- if (not lines) or isinstance(lines[0], py3compat.unicode_type):
- return lines
-
- readline = openpy._list_readline(lines)
- try:
- encoding, _ = openpy.detect_encoding(readline)
- except SyntaxError:
- encoding = 'ascii'
- return [l.decode(encoding, 'replace') for l in lines]
-
- # This is a straight copy of linecache.getline
- def getline(filename, lineno, module_globals=None):
- lines = getlines(filename, module_globals)
- if 1 <= lineno <= len(lines):
- return lines[lineno-1]
- else:
- return ''
+"""Wrapper around linecache which decodes files to unicode according to PEP 263.
+
+This is only needed for Python 2 - linecache in Python 3 does the same thing
+itself.
+"""
+import functools
+import linecache
+import sys
+
+from IPython.utils import py3compat
+from IPython.utils import openpy
+
+if py3compat.PY3:
+ getline = linecache.getline
+
+ # getlines has to be looked up at runtime, because doctests monkeypatch it.
+ @functools.wraps(linecache.getlines)
+ def getlines(filename, module_globals=None):
+ return linecache.getlines(filename, module_globals=module_globals)
+
+else:
+ def getlines(filename, module_globals=None):
+ """Get the lines (as unicode) for a file from the cache.
+ Update the cache if it doesn't contain an entry for this file already."""
+ filename = py3compat.cast_bytes(filename, sys.getfilesystemencoding())
+ lines = linecache.getlines(filename, module_globals=module_globals)
+
+ # The bits we cache ourselves can be unicode.
+ if (not lines) or isinstance(lines[0], py3compat.unicode_type):
+ return lines
+
+ readline = openpy._list_readline(lines)
+ try:
+ encoding, _ = openpy.detect_encoding(readline)
+ except SyntaxError:
+ encoding = 'ascii'
+ return [l.decode(encoding, 'replace') for l in lines]
+
+ # This is a straight copy of linecache.getline
+ def getline(filename, lineno, module_globals=None):
+ lines = getlines(filename, module_globals)
+ if 1 <= lineno <= len(lines):
+ return lines[lineno-1]
+ else:
+ return ''
diff --git a/contrib/python/ipython/py2/IPython/utils/version.py b/contrib/python/ipython/py2/IPython/utils/version.py
index 3d1018f7bd..1de0047e6b 100644
--- a/contrib/python/ipython/py2/IPython/utils/version.py
+++ b/contrib/python/ipython/py2/IPython/utils/version.py
@@ -1,36 +1,36 @@
-# encoding: utf-8
-"""
-Utilities for version comparison
-
-It is a bit ridiculous that we need these.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (C) 2013 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-from distutils.version import LooseVersion
-
-#-----------------------------------------------------------------------------
-# Code
-#-----------------------------------------------------------------------------
-
-def check_version(v, check):
- """check version string v >= check
-
- If dev/prerelease tags result in TypeError for string-number comparison,
- it is assumed that the dependency is satisfied.
- Users on dev branches are responsible for keeping their own packages up to date.
- """
- try:
- return LooseVersion(v) >= LooseVersion(check)
- except TypeError:
- return True
-
+# encoding: utf-8
+"""
+Utilities for version comparison
+
+It is a bit ridiculous that we need these.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (C) 2013 The IPython Development Team
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# Imports
+#-----------------------------------------------------------------------------
+
+from distutils.version import LooseVersion
+
+#-----------------------------------------------------------------------------
+# Code
+#-----------------------------------------------------------------------------
+
+def check_version(v, check):
+ """check version string v >= check
+
+ If dev/prerelease tags result in TypeError for string-number comparison,
+ it is assumed that the dependency is satisfied.
+ Users on dev branches are responsible for keeping their own packages up to date.
+ """
+ try:
+ return LooseVersion(v) >= LooseVersion(check)
+ except TypeError:
+ return True
+
diff --git a/contrib/python/ipython/py2/IPython/utils/warn.py b/contrib/python/ipython/py2/IPython/utils/warn.py
index 831e4265ac..dd4852227b 100644
--- a/contrib/python/ipython/py2/IPython/utils/warn.py
+++ b/contrib/python/ipython/py2/IPython/utils/warn.py
@@ -1,65 +1,65 @@
-# encoding: utf-8
-"""
-Utilities for warnings. Shoudn't we just use the built in warnings module.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
-import sys
+# encoding: utf-8
+"""
+Utilities for warnings. Shoudn't we just use the built in warnings module.
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+
+import sys
import warnings
-
+
warnings.warn("The module IPython.utils.warn is deprecated since IPython 4.0, use the standard warnings module instead", DeprecationWarning)
-
-def warn(msg,level=2,exit_val=1):
+
+def warn(msg,level=2,exit_val=1):
"""Deprecated
-
+
Standard warning printer. Gives formatting consistency.
-
+
Output is sent to sys.stderr.
- Options:
-
- -level(2): allows finer control:
- 0 -> Do nothing, dummy function.
- 1 -> Print message.
- 2 -> Print 'WARNING:' + message. (Default level).
- 3 -> Print 'ERROR:' + message.
- 4 -> Print 'FATAL ERROR:' + message and trigger a sys.exit(exit_val).
-
- -exit_val (1): exit value returned by sys.exit() for a level 4
- warning. Ignored for all other levels."""
+ Options:
+
+ -level(2): allows finer control:
+ 0 -> Do nothing, dummy function.
+ 1 -> Print message.
+ 2 -> Print 'WARNING:' + message. (Default level).
+ 3 -> Print 'ERROR:' + message.
+ 4 -> Print 'FATAL ERROR:' + message and trigger a sys.exit(exit_val).
+
+ -exit_val (1): exit value returned by sys.exit() for a level 4
+ warning. Ignored for all other levels."""
warnings.warn("The module IPython.utils.warn is deprecated since IPython 4.0, use the standard warnings module instead", DeprecationWarning)
- if level>0:
- header = ['','','WARNING: ','ERROR: ','FATAL ERROR: ']
+ if level>0:
+ header = ['','','WARNING: ','ERROR: ','FATAL ERROR: ']
print(header[level], msg, sep='', file=sys.stderr)
- if level == 4:
+ if level == 4:
print('Exiting.\n', file=sys.stderr)
- sys.exit(exit_val)
-
-
-def info(msg):
+ sys.exit(exit_val)
+
+
+def info(msg):
"""Deprecated
Equivalent to warn(msg,level=1)."""
-
- warn(msg,level=1)
-
-
-def error(msg):
+
+ warn(msg,level=1)
+
+
+def error(msg):
"""Deprecated
Equivalent to warn(msg,level=3)."""
-
- warn(msg,level=3)
-
-
-def fatal(msg,exit_val=1):
+
+ warn(msg,level=3)
+
+
+def fatal(msg,exit_val=1):
"""Deprecated
Equivalent to warn(msg,exit_val=exit_val,level=4)."""
-
- warn(msg,exit_val=exit_val,level=4)
+
+ warn(msg,exit_val=exit_val,level=4)
diff --git a/contrib/python/ipython/py2/IPython/utils/wildcard.py b/contrib/python/ipython/py2/IPython/utils/wildcard.py
index f8e895752c..d22491bd96 100644
--- a/contrib/python/ipython/py2/IPython/utils/wildcard.py
+++ b/contrib/python/ipython/py2/IPython/utils/wildcard.py
@@ -1,112 +1,112 @@
-# -*- coding: utf-8 -*-
-"""Support for wildcard pattern matching in object inspection.
-
-Authors
--------
-- Jörgen Stenarson <jorgen.stenarson@bostream.nu>
-- Thomas Kluyver
-"""
-
-#*****************************************************************************
-# Copyright (C) 2005 Jörgen Stenarson <jorgen.stenarson@bostream.nu>
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-#*****************************************************************************
-
-import re
-import types
-
-from IPython.utils.dir2 import dir2
-from .py3compat import iteritems
-
-def create_typestr2type_dicts(dont_include_in_type2typestr=["lambda"]):
- """Return dictionaries mapping lower case typename (e.g. 'tuple') to type
- objects from the types package, and vice versa."""
- typenamelist = [tname for tname in dir(types) if tname.endswith("Type")]
- typestr2type, type2typestr = {}, {}
-
- for tname in typenamelist:
- name = tname[:-4].lower() # Cut 'Type' off the end of the name
- obj = getattr(types, tname)
- typestr2type[name] = obj
- if name not in dont_include_in_type2typestr:
- type2typestr[obj] = name
- return typestr2type, type2typestr
-
-typestr2type, type2typestr = create_typestr2type_dicts()
-
-def is_type(obj, typestr_or_type):
- """is_type(obj, typestr_or_type) verifies if obj is of a certain type. It
- can take strings or actual python types for the second argument, i.e.
- 'tuple'<->TupleType. 'all' matches all types.
-
- TODO: Should be extended for choosing more than one type."""
- if typestr_or_type == "all":
- return True
- if type(typestr_or_type) == type:
- test_type = typestr_or_type
- else:
- test_type = typestr2type.get(typestr_or_type, False)
- if test_type:
- return isinstance(obj, test_type)
- return False
-
-def show_hidden(str, show_all=False):
- """Return true for strings starting with single _ if show_all is true."""
- return show_all or str.startswith("__") or not str.startswith("_")
-
-def dict_dir(obj):
- """Produce a dictionary of an object's attributes. Builds on dir2 by
- checking that a getattr() call actually succeeds."""
- ns = {}
- for key in dir2(obj):
- # This seemingly unnecessary try/except is actually needed
- # because there is code out there with metaclasses that
- # create 'write only' attributes, where a getattr() call
- # will fail even if the attribute appears listed in the
- # object's dictionary. Properties can actually do the same
- # thing. In particular, Traits use this pattern
- try:
- ns[key] = getattr(obj, key)
- except AttributeError:
- pass
- return ns
-
-def filter_ns(ns, name_pattern="*", type_pattern="all", ignore_case=True,
- show_all=True):
- """Filter a namespace dictionary by name pattern and item type."""
- pattern = name_pattern.replace("*",".*").replace("?",".")
- if ignore_case:
- reg = re.compile(pattern+"$", re.I)
- else:
- reg = re.compile(pattern+"$")
-
- # Check each one matches regex; shouldn't be hidden; of correct type.
- return dict((key,obj) for key, obj in iteritems(ns) if reg.match(key) \
- and show_hidden(key, show_all) \
- and is_type(obj, type_pattern) )
-
-def list_namespace(namespace, type_pattern, filter, ignore_case=False, show_all=False):
- """Return dictionary of all objects in a namespace dictionary that match
- type_pattern and filter."""
- pattern_list=filter.split(".")
- if len(pattern_list) == 1:
- return filter_ns(namespace, name_pattern=pattern_list[0],
- type_pattern=type_pattern,
- ignore_case=ignore_case, show_all=show_all)
- else:
- # This is where we can change if all objects should be searched or
- # only modules. Just change the type_pattern to module to search only
- # modules
- filtered = filter_ns(namespace, name_pattern=pattern_list[0],
- type_pattern="all",
- ignore_case=ignore_case, show_all=show_all)
- results = {}
- for name, obj in iteritems(filtered):
- ns = list_namespace(dict_dir(obj), type_pattern,
- ".".join(pattern_list[1:]),
- ignore_case=ignore_case, show_all=show_all)
- for inner_name, inner_obj in iteritems(ns):
- results["%s.%s"%(name,inner_name)] = inner_obj
- return results
+# -*- coding: utf-8 -*-
+"""Support for wildcard pattern matching in object inspection.
+
+Authors
+-------
+- Jörgen Stenarson <jorgen.stenarson@bostream.nu>
+- Thomas Kluyver
+"""
+
+#*****************************************************************************
+# Copyright (C) 2005 Jörgen Stenarson <jorgen.stenarson@bostream.nu>
+#
+# Distributed under the terms of the BSD License. The full license is in
+# the file COPYING, distributed as part of this software.
+#*****************************************************************************
+
+import re
+import types
+
+from IPython.utils.dir2 import dir2
+from .py3compat import iteritems
+
+def create_typestr2type_dicts(dont_include_in_type2typestr=["lambda"]):
+ """Return dictionaries mapping lower case typename (e.g. 'tuple') to type
+ objects from the types package, and vice versa."""
+ typenamelist = [tname for tname in dir(types) if tname.endswith("Type")]
+ typestr2type, type2typestr = {}, {}
+
+ for tname in typenamelist:
+ name = tname[:-4].lower() # Cut 'Type' off the end of the name
+ obj = getattr(types, tname)
+ typestr2type[name] = obj
+ if name not in dont_include_in_type2typestr:
+ type2typestr[obj] = name
+ return typestr2type, type2typestr
+
+typestr2type, type2typestr = create_typestr2type_dicts()
+
+def is_type(obj, typestr_or_type):
+ """is_type(obj, typestr_or_type) verifies if obj is of a certain type. It
+ can take strings or actual python types for the second argument, i.e.
+ 'tuple'<->TupleType. 'all' matches all types.
+
+ TODO: Should be extended for choosing more than one type."""
+ if typestr_or_type == "all":
+ return True
+ if type(typestr_or_type) == type:
+ test_type = typestr_or_type
+ else:
+ test_type = typestr2type.get(typestr_or_type, False)
+ if test_type:
+ return isinstance(obj, test_type)
+ return False
+
+def show_hidden(str, show_all=False):
+ """Return true for strings starting with single _ if show_all is true."""
+ return show_all or str.startswith("__") or not str.startswith("_")
+
+def dict_dir(obj):
+ """Produce a dictionary of an object's attributes. Builds on dir2 by
+ checking that a getattr() call actually succeeds."""
+ ns = {}
+ for key in dir2(obj):
+ # This seemingly unnecessary try/except is actually needed
+ # because there is code out there with metaclasses that
+ # create 'write only' attributes, where a getattr() call
+ # will fail even if the attribute appears listed in the
+ # object's dictionary. Properties can actually do the same
+ # thing. In particular, Traits use this pattern
+ try:
+ ns[key] = getattr(obj, key)
+ except AttributeError:
+ pass
+ return ns
+
+def filter_ns(ns, name_pattern="*", type_pattern="all", ignore_case=True,
+ show_all=True):
+ """Filter a namespace dictionary by name pattern and item type."""
+ pattern = name_pattern.replace("*",".*").replace("?",".")
+ if ignore_case:
+ reg = re.compile(pattern+"$", re.I)
+ else:
+ reg = re.compile(pattern+"$")
+
+ # Check each one matches regex; shouldn't be hidden; of correct type.
+ return dict((key,obj) for key, obj in iteritems(ns) if reg.match(key) \
+ and show_hidden(key, show_all) \
+ and is_type(obj, type_pattern) )
+
+def list_namespace(namespace, type_pattern, filter, ignore_case=False, show_all=False):
+ """Return dictionary of all objects in a namespace dictionary that match
+ type_pattern and filter."""
+ pattern_list=filter.split(".")
+ if len(pattern_list) == 1:
+ return filter_ns(namespace, name_pattern=pattern_list[0],
+ type_pattern=type_pattern,
+ ignore_case=ignore_case, show_all=show_all)
+ else:
+ # This is where we can change if all objects should be searched or
+ # only modules. Just change the type_pattern to module to search only
+ # modules
+ filtered = filter_ns(namespace, name_pattern=pattern_list[0],
+ type_pattern="all",
+ ignore_case=ignore_case, show_all=show_all)
+ results = {}
+ for name, obj in iteritems(filtered):
+ ns = list_namespace(dict_dir(obj), type_pattern,
+ ".".join(pattern_list[1:]),
+ ignore_case=ignore_case, show_all=show_all)
+ for inner_name, inner_obj in iteritems(ns):
+ results["%s.%s"%(name,inner_name)] = inner_obj
+ return results
diff --git a/contrib/python/ipython/py2/bin/ya.make b/contrib/python/ipython/py2/bin/ya.make
index c9d596dbae..0ff960df6f 100644
--- a/contrib/python/ipython/py2/bin/ya.make
+++ b/contrib/python/ipython/py2/bin/ya.make
@@ -1,11 +1,11 @@
PROGRAM(ipython)
-
+
OWNER(g:python-contrib borman nslus)
-
+
PEERDIR(
contrib/python/ipython
-)
-
+)
+
PY_MAIN(IPython:start_ipython)
-END()
+END()
diff --git a/contrib/python/ipython/ya.make b/contrib/python/ipython/ya.make
index d5058e7943..b1dd8c5e23 100644
--- a/contrib/python/ipython/ya.make
+++ b/contrib/python/ipython/ya.make
@@ -1,5 +1,5 @@
PY23_LIBRARY()
-
+
LICENSE(Service-Py23-Proxy)
OWNER(g:python-contrib)
@@ -12,7 +12,7 @@ ENDIF()
NO_LINT()
-END()
+END()
RECURSE(
py2
diff --git a/contrib/python/jedi/LICENSE.txt b/contrib/python/jedi/LICENSE.txt
index 06be7c0b0b..94f954567b 100644
--- a/contrib/python/jedi/LICENSE.txt
+++ b/contrib/python/jedi/LICENSE.txt
@@ -1,24 +1,24 @@
All contributions towards Jedi are MIT licensed.
-------------------------------------------------------------------------------
-The MIT License (MIT)
-
-Copyright (c) <2013> <David Halter and others, see AUTHORS.txt>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
+The MIT License (MIT)
+
+Copyright (c) <2013> <David Halter and others, see AUTHORS.txt>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/contrib/python/jedi/jedi/__init__.py b/contrib/python/jedi/jedi/__init__.py
index 6f423b4ed8..d23739bee3 100644
--- a/contrib/python/jedi/jedi/__init__.py
+++ b/contrib/python/jedi/jedi/__init__.py
@@ -1,46 +1,46 @@
-"""
-Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
-historic focus is autocompletion, but does static analysis for now as well.
-Jedi is fast and is very well tested. It understands Python on a deeper level
-than all other static analysis frameworks for Python.
-
-Jedi has support for two different goto functions. It's possible to search for
-related names and to list all names in a Python file and infer them. Jedi
-understands docstrings and you can use Jedi autocompletion in your REPL as
-well.
-
-Jedi uses a very simple API to connect with IDE's. There's a reference
-implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
-which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
-It's really easy.
-
-To give you a simple example how you can use the Jedi library, here is an
-example for the autocompletion feature:
-
->>> import jedi
->>> source = '''
-... import datetime
-... datetime.da'''
->>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
->>> script
+"""
+Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
+historic focus is autocompletion, but does static analysis for now as well.
+Jedi is fast and is very well tested. It understands Python on a deeper level
+than all other static analysis frameworks for Python.
+
+Jedi has support for two different goto functions. It's possible to search for
+related names and to list all names in a Python file and infer them. Jedi
+understands docstrings and you can use Jedi autocompletion in your REPL as
+well.
+
+Jedi uses a very simple API to connect with IDE's. There's a reference
+implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
+which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
+It's really easy.
+
+To give you a simple example how you can use the Jedi library, here is an
+example for the autocompletion feature:
+
+>>> import jedi
+>>> source = '''
+... import datetime
+... datetime.da'''
+>>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
+>>> script
<Script: 'example.py' ...>
->>> completions = script.completions()
->>> completions #doctest: +ELLIPSIS
-[<Completion: date>, <Completion: datetime>, ...]
->>> print(completions[0].complete)
-te
->>> print(completions[0].name)
-date
-
-As you see Jedi is pretty simple and allows you to concentrate on writing a
-good text editor, while still having very good IDE features for Python.
-"""
-
+>>> completions = script.completions()
+>>> completions #doctest: +ELLIPSIS
+[<Completion: date>, <Completion: datetime>, ...]
+>>> print(completions[0].complete)
+te
+>>> print(completions[0].name)
+date
+
+As you see Jedi is pretty simple and allows you to concentrate on writing a
+good text editor, while still having very good IDE features for Python.
+"""
+
__version__ = '0.13.3'
-
+
from jedi.api import Script, Interpreter, set_debug_function, \
preload_module, names
-from jedi import settings
+from jedi import settings
from jedi.api.environment import find_virtualenvs, find_system_environments, \
get_default_environment, InvalidPythonEnvironment, create_environment, \
get_system_environment
diff --git a/contrib/python/jedi/jedi/__main__.py b/contrib/python/jedi/jedi/__main__.py
index 8498f90762..f2ee047769 100644
--- a/contrib/python/jedi/jedi/__main__.py
+++ b/contrib/python/jedi/jedi/__main__.py
@@ -1,43 +1,43 @@
import sys
-from os.path import join, dirname, abspath, isdir
-
-
+from os.path import join, dirname, abspath, isdir
+
+
def _start_linter():
- """
- This is a pre-alpha API. You're not supposed to use it at all, except for
- testing. It will very likely change.
- """
- import jedi
-
- if '--debug' in sys.argv:
- jedi.set_debug_function()
-
- for path in sys.argv[2:]:
- if path.startswith('--'):
- continue
- if isdir(path):
- import fnmatch
- import os
-
- paths = []
- for root, dirnames, filenames in os.walk(path):
- for filename in fnmatch.filter(filenames, '*.py'):
- paths.append(os.path.join(root, filename))
- else:
- paths = [path]
-
- try:
- for path in paths:
- for error in jedi.Script(path=path)._analysis():
- print(error)
- except Exception:
- if '--pdb' in sys.argv:
+ """
+ This is a pre-alpha API. You're not supposed to use it at all, except for
+ testing. It will very likely change.
+ """
+ import jedi
+
+ if '--debug' in sys.argv:
+ jedi.set_debug_function()
+
+ for path in sys.argv[2:]:
+ if path.startswith('--'):
+ continue
+ if isdir(path):
+ import fnmatch
+ import os
+
+ paths = []
+ for root, dirnames, filenames in os.walk(path):
+ for filename in fnmatch.filter(filenames, '*.py'):
+ paths.append(os.path.join(root, filename))
+ else:
+ paths = [path]
+
+ try:
+ for path in paths:
+ for error in jedi.Script(path=path)._analysis():
+ print(error)
+ except Exception:
+ if '--pdb' in sys.argv:
import traceback
traceback.print_exc()
- import pdb
- pdb.post_mortem()
- else:
- raise
+ import pdb
+ pdb.post_mortem()
+ else:
+ raise
if len(sys.argv) == 2 and sys.argv[1] == 'repl':
diff --git a/contrib/python/jedi/jedi/_compatibility.py b/contrib/python/jedi/jedi/_compatibility.py
index a14b126ec6..f8f93c1063 100644
--- a/contrib/python/jedi/jedi/_compatibility.py
+++ b/contrib/python/jedi/jedi/_compatibility.py
@@ -1,30 +1,30 @@
-"""
+"""
To ensure compatibility from Python ``2.7`` - ``3.x``, a module has been
-created. Clearly there is huge need to use conforming syntax.
-"""
+created. Clearly there is huge need to use conforming syntax.
+"""
import errno
-import sys
-import os
-import re
+import sys
+import os
+import re
import pkgutil
import warnings
import inspect
import subprocess
-try:
- import importlib
-except ImportError:
- pass
-
-is_py3 = sys.version_info[0] >= 3
+try:
+ import importlib
+except ImportError:
+ pass
+
+is_py3 = sys.version_info[0] >= 3
is_py35 = is_py3 and sys.version_info[1] >= 5
py_version = int(str(sys.version_info[0]) + str(sys.version_info[1]))
-
-
+
+
class DummyFile(object):
def __init__(self, loader, string):
self.loader = loader
self.string = string
-
+
def read(self):
return self.loader.get_source(self.string)
@@ -64,8 +64,8 @@ def find_module_py34(string, path=None, full_name=None, is_global_search=True):
def find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True):
loader = loader or importlib.machinery.PathFinder.find_module(string, path)
- if loader is None and path is None: # Fallback to find builtins
- try:
+ if loader is None and path is None: # Fallback to find builtins
+ try:
with warnings.catch_warnings(record=True):
# Mute "DeprecationWarning: Use importlib.util.find_spec()
# instead." While we should replace that in the future, it's
@@ -73,17 +73,17 @@ def find_module_py33(string, path=None, loader=None, full_name=None, is_global_s
# it was added in Python 3.4 and find_loader hasn't been
# removed in 3.6.
loader = importlib.find_loader(string)
- except ValueError as e:
- # See #491. Importlib might raise a ValueError, to avoid this, we
- # just raise an ImportError to fix the issue.
+ except ValueError as e:
+ # See #491. Importlib might raise a ValueError, to avoid this, we
+ # just raise an ImportError to fix the issue.
raise ImportError("Originally " + repr(e))
-
- if loader is None:
+
+ if loader is None:
raise ImportError("Couldn't find a loader for {}".format(string))
-
- try:
- is_package = loader.is_package(string)
- if is_package:
+
+ try:
+ is_package = loader.is_package(string)
+ if is_package:
if hasattr(loader, 'path'):
module_path = os.path.dirname(loader.path)
else:
@@ -93,27 +93,27 @@ def find_module_py33(string, path=None, loader=None, full_name=None, is_global_s
module_file = DummyFile(loader, string)
else:
module_file = None
- else:
- module_path = loader.get_filename(string)
+ else:
+ module_path = loader.get_filename(string)
module_file = DummyFile(loader, string)
- except AttributeError:
- # ExtensionLoader has not attribute get_filename, instead it has a
- # path attribute that we can use to retrieve the module path
- try:
- module_path = loader.path
+ except AttributeError:
+ # ExtensionLoader has not attribute get_filename, instead it has a
+ # path attribute that we can use to retrieve the module path
+ try:
+ module_path = loader.path
module_file = DummyFile(loader, string)
- except AttributeError:
- module_path = string
- module_file = None
- finally:
- is_package = False
-
+ except AttributeError:
+ module_path = string
+ module_file = None
+ finally:
+ is_package = False
+
if hasattr(loader, 'archive'):
module_path = loader.archive
- return module_file, module_path, is_package
-
-
+ return module_file, module_path, is_package
+
+
def find_module_pre_py34(string, path=None, full_name=None, is_global_search=True):
# This import is here, because in other places it will raise a
# DeprecationWarning.
@@ -124,7 +124,7 @@ def find_module_pre_py34(string, path=None, full_name=None, is_global_search=Tru
return module_file, module_path, module_type is imp.PKG_DIRECTORY
except ImportError:
pass
-
+
if path is None:
path = sys.path
for item in path:
@@ -147,20 +147,20 @@ def find_module_pre_py34(string, path=None, full_name=None, is_global_search=Tru
except ImportError:
pass
raise ImportError("No module named {}".format(string))
-
+
find_module = find_module_py34 if is_py3 else find_module_pre_py34
-find_module.__doc__ = """
-Provides information about a module.
-
-This function isolates the differences in importing libraries introduced with
-python 3.3 on; it gets a module name and optionally a path. It will return a
-tuple containin an open file for the module (if not builtin), the filename
-or the name of the module if it is a builtin one and a boolean indicating
-if the module is contained in a package.
-"""
-
-
+find_module.__doc__ = """
+Provides information about a module.
+
+This function isolates the differences in importing libraries introduced with
+python 3.3 on; it gets a module name and optionally a path. It will return a
+tuple containin an open file for the module (if not builtin), the filename
+or the name of the module if it is a builtin one and a boolean indicating
+if the module is contained in a package.
+"""
+
+
def _iter_modules(paths, prefix=''):
# Copy of pkgutil.iter_modules adapted to work with namespaces
@@ -236,69 +236,69 @@ else:
return [suffix for suffix, _, _ in imp.get_suffixes()]
-# unicode function
-try:
- unicode = unicode
-except NameError:
- unicode = str
-
-
-# re-raise function
-if is_py3:
- def reraise(exception, traceback):
- raise exception.with_traceback(traceback)
-else:
- eval(compile("""
-def reraise(exception, traceback):
- raise exception, None, traceback
-""", 'blub', 'exec'))
-
-reraise.__doc__ = """
-Re-raise `exception` with a `traceback` object.
-
-Usage::
-
- reraise(Exception, sys.exc_info()[2])
-
-"""
-
-
-class Python3Method(object):
- def __init__(self, func):
- self.func = func
-
- def __get__(self, obj, objtype):
- if obj is None:
- return lambda *args, **kwargs: self.func(*args, **kwargs)
- else:
- return lambda *args, **kwargs: self.func(obj, *args, **kwargs)
-
-
-def use_metaclass(meta, *bases):
- """ Create a class with a metaclass. """
- if not bases:
- bases = (object,)
+# unicode function
+try:
+ unicode = unicode
+except NameError:
+ unicode = str
+
+
+# re-raise function
+if is_py3:
+ def reraise(exception, traceback):
+ raise exception.with_traceback(traceback)
+else:
+ eval(compile("""
+def reraise(exception, traceback):
+ raise exception, None, traceback
+""", 'blub', 'exec'))
+
+reraise.__doc__ = """
+Re-raise `exception` with a `traceback` object.
+
+Usage::
+
+ reraise(Exception, sys.exc_info()[2])
+
+"""
+
+
+class Python3Method(object):
+ def __init__(self, func):
+ self.func = func
+
+ def __get__(self, obj, objtype):
+ if obj is None:
+ return lambda *args, **kwargs: self.func(*args, **kwargs)
+ else:
+ return lambda *args, **kwargs: self.func(obj, *args, **kwargs)
+
+
+def use_metaclass(meta, *bases):
+ """ Create a class with a metaclass. """
+ if not bases:
+ bases = (object,)
return meta("Py2CompatibilityMetaClass", bases, {})
-
-
-try:
- encoding = sys.stdout.encoding
- if encoding is None:
- encoding = 'utf-8'
-except AttributeError:
- encoding = 'ascii'
-
-
+
+
+try:
+ encoding = sys.stdout.encoding
+ if encoding is None:
+ encoding = 'utf-8'
+except AttributeError:
+ encoding = 'ascii'
+
+
def u(string, errors='strict'):
- """Cast to unicode DAMMIT!
- Written because Python2 repr always implicitly casts to a string, so we
- have to cast back to a unicode (and we now that we always deal with valid
- unicode, because we check that in the beginning).
- """
+ """Cast to unicode DAMMIT!
+ Written because Python2 repr always implicitly casts to a string, so we
+ have to cast back to a unicode (and we now that we always deal with valid
+ unicode, because we check that in the beginning).
+ """
if isinstance(string, bytes):
return unicode(string, encoding='UTF-8', errors=errors)
- return string
-
+ return string
+
def cast_path(obj):
"""
@@ -320,29 +320,29 @@ def force_unicode(obj):
return cast_path(obj)
-try:
- import builtins # module name in python 3
-except ImportError:
+try:
+ import builtins # module name in python 3
+except ImportError:
import __builtin__ as builtins # noqa: F401
-
-
+
+
import ast # noqa: F401
-
-
-def literal_eval(string):
- return ast.literal_eval(string)
-
-
-try:
- from itertools import zip_longest
-except ImportError:
+
+
+def literal_eval(string):
+ return ast.literal_eval(string)
+
+
+try:
+ from itertools import zip_longest
+except ImportError:
from itertools import izip_longest as zip_longest # Python 2 # noqa: F401
-
+
try:
FileNotFoundError = FileNotFoundError
except NameError:
FileNotFoundError = IOError
-
+
try:
NotADirectoryError = NotADirectoryError
except NameError:
@@ -354,18 +354,18 @@ except NameError:
PermissionError = IOError
-def no_unicode_pprint(dct):
- """
- Python 2/3 dict __repr__ may be different, because of unicode differens
- (with or without a `u` prefix). Normally in doctests we could use `pprint`
- to sort dicts and check for equality, but here we have to write a separate
- function to do that.
- """
- import pprint
- s = pprint.pformat(dct)
- print(re.sub("u'", "'", s))
-
-
+def no_unicode_pprint(dct):
+ """
+ Python 2/3 dict __repr__ may be different, because of unicode differens
+ (with or without a `u` prefix). Normally in doctests we could use `pprint`
+ to sort dicts and check for equality, but here we have to write a separate
+ function to do that.
+ """
+ import pprint
+ s = pprint.pformat(dct)
+ print(re.sub("u'", "'", s))
+
+
def print_to_stderr(*args):
if is_py3:
eval("print(*args, file=sys.stderr)")
@@ -374,22 +374,22 @@ def print_to_stderr(*args):
sys.stderr.flush()
-def utf8_repr(func):
- """
- ``__repr__`` methods in Python 2 don't allow unicode objects to be
- returned. Therefore cast them to utf-8 bytes in this decorator.
- """
- def wrapper(self):
- result = func(self)
- if isinstance(result, unicode):
- return result.encode('utf-8')
- else:
- return result
-
- if is_py3:
- return func
- else:
- return wrapper
+def utf8_repr(func):
+ """
+ ``__repr__`` methods in Python 2 don't allow unicode objects to be
+ returned. Therefore cast them to utf-8 bytes in this decorator.
+ """
+ def wrapper(self):
+ result = func(self)
+ if isinstance(result, unicode):
+ return result.encode('utf-8')
+ else:
+ return result
+
+ if is_py3:
+ return func
+ else:
+ return wrapper
if is_py3:
diff --git a/contrib/python/jedi/jedi/api/__init__.py b/contrib/python/jedi/jedi/api/__init__.py
index 69d34ab64f..9e568cf8f8 100644
--- a/contrib/python/jedi/jedi/api/__init__.py
+++ b/contrib/python/jedi/jedi/api/__init__.py
@@ -1,34 +1,34 @@
-"""
-The API basically only provides one class. You can create a :class:`Script` and
-use its methods.
-
-Additionally you can add a debug function with :func:`set_debug_function`.
+"""
+The API basically only provides one class. You can create a :class:`Script` and
+use its methods.
+
+Additionally you can add a debug function with :func:`set_debug_function`.
Alternatively, if you don't need a custom function and are happy with printing
debug messages to stdout, simply call :func:`set_debug_function` without
arguments.
-
-.. warning:: Please, note that Jedi is **not thread safe**.
-"""
-import os
+
+.. warning:: Please, note that Jedi is **not thread safe**.
+"""
+import os
import sys
-import warnings
-
+import warnings
+
import parso
from parso.python import tree
from jedi._compatibility import force_unicode, is_py3
from jedi.parser_utils import get_executable_nodes
-from jedi import debug
-from jedi import settings
-from jedi import cache
-from jedi.api import classes
-from jedi.api import interpreter
-from jedi.api import helpers
+from jedi import debug
+from jedi import settings
+from jedi import cache
+from jedi.api import classes
+from jedi.api import interpreter
+from jedi.api import helpers
from jedi.api.completion import Completion
from jedi.api.environment import InterpreterEnvironment
from jedi.api.project import get_default_project
-from jedi.evaluate import Evaluator
-from jedi.evaluate import imports
+from jedi.evaluate import Evaluator
+from jedi.evaluate import imports
from jedi.evaluate import usages
from jedi.evaluate.arguments import try_iter_content
from jedi.evaluate.helpers import get_module_names, evaluate_call_of_leaf
@@ -37,20 +37,20 @@ from jedi.evaluate.filters import TreeNameDefinition, ParamName
from jedi.evaluate.syntax_tree import tree_name_to_contexts
from jedi.evaluate.context import ModuleContext
from jedi.evaluate.context.iterable import unpack_tuple_to_dict
-
-# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
-# can remove some "maximum recursion depth" errors.
+
+# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
+# can remove some "maximum recursion depth" errors.
sys.setrecursionlimit(3000)
-
-
-class Script(object):
- """
- A Script is the base for completions, goto or whatever you want to do with
- |jedi|.
-
- You can either use the ``source`` parameter or ``path`` to read a file.
- Usually you're going to want to use both of them (in an editor).
-
+
+
+class Script(object):
+ """
+ A Script is the base for completions, goto or whatever you want to do with
+ |jedi|.
+
+ You can either use the ``source`` parameter or ``path`` to read a file.
+ Usually you're going to want to use both of them (in an editor).
+
The script might be analyzed in a different ``sys.path`` than |jedi|:
- if `sys_path` parameter is not ``None``, it will be used as ``sys.path``
@@ -63,34 +63,34 @@ class Script(object):
- otherwise ``sys.path`` will match that of |jedi|.
- :param source: The source code of the current file, separated by newlines.
- :type source: str
- :param line: The line to perform actions on (starting with 1).
- :type line: int
+ :param source: The source code of the current file, separated by newlines.
+ :type source: str
+ :param line: The line to perform actions on (starting with 1).
+ :type line: int
:param column: The column of the cursor (starting with 0).
:type column: int
- :param path: The path of the file in the file system, or ``''`` if
- it hasn't been saved yet.
- :type path: str or None
- :param encoding: The encoding of ``source``, if it is not a
- ``unicode`` object (default ``'utf-8'``).
- :type encoding: str
+ :param path: The path of the file in the file system, or ``''`` if
+ it hasn't been saved yet.
+ :type path: str or None
+ :param encoding: The encoding of ``source``, if it is not a
+ ``unicode`` object (default ``'utf-8'``).
+ :type encoding: str
:param sys_path: ``sys.path`` to use during analysis of the script
:type sys_path: list
:param environment: TODO
:type sys_path: Environment
- """
- def __init__(self, source=None, line=None, column=None, path=None,
+ """
+ def __init__(self, source=None, line=None, column=None, path=None,
encoding='utf-8', sys_path=None, environment=None):
- self._orig_path = path
+ self._orig_path = path
# An empty path (also empty string) should always result in no path.
self.path = os.path.abspath(path) if path else None
-
- if source is None:
+
+ if source is None:
# TODO add a better warning than the traceback!
with open(path, 'rb') as f:
- source = f.read()
-
+ source = f.read()
+
# Load the Python grammar of the current interpreter.
self._grammar = parso.load_grammar()
@@ -122,8 +122,8 @@ class Script(object):
self._code = source
line = max(len(self._code_lines), 1) if line is None else line
if not (0 < line <= len(self._code_lines)):
- raise ValueError('`line` parameter is not in a valid range.')
-
+ raise ValueError('`line` parameter is not in a valid range.')
+
line_string = self._code_lines[line - 1]
line_len = len(line_string)
if line_string.endswith('\r\n'):
@@ -131,53 +131,53 @@ class Script(object):
if line_string.endswith('\n'):
line_len -= 1
- column = line_len if column is None else column
- if not (0 <= column <= line_len):
+ column = line_len if column is None else column
+ if not (0 <= column <= line_len):
raise ValueError('`column` parameter (%d) is not in a valid range '
'(0-%d) for line %d (%r).' % (
column, line_len, line, line_string))
- self._pos = line, column
+ self._pos = line, column
self._path = path
-
- cache.clear_time_caches()
- debug.reset_time()
-
+
+ cache.clear_time_caches()
+ debug.reset_time()
+
def _get_module(self):
name = '__main__'
if self.path is not None:
import_names = dotted_path_in_sys_path(self._evaluator.get_sys_path(), self.path)
if import_names is not None:
name = '.'.join(import_names)
-
+
module = ModuleContext(
self._evaluator, self._module_node, self.path,
code_lines=self._code_lines
)
imports.add_module_to_cache(self._evaluator, name, module)
return module
-
- def __repr__(self):
+
+ def __repr__(self):
return '<%s: %s %r>' % (
self.__class__.__name__,
repr(self._orig_path),
self._evaluator.environment,
)
-
- def completions(self):
- """
- Return :class:`classes.Completion` objects. Those objects contain
- information about the completions, more than just names.
-
- :return: Completion objects, sorted by name and __ comes last.
- :rtype: list of :class:`classes.Completion`
- """
- debug.speed('completions start')
+
+ def completions(self):
+ """
+ Return :class:`classes.Completion` objects. Those objects contain
+ information about the completions, more than just names.
+
+ :return: Completion objects, sorted by name and __ comes last.
+ :rtype: list of :class:`classes.Completion`
+ """
+ debug.speed('completions start')
completion = Completion(
self._evaluator, self._get_module(), self._code_lines,
self._pos, self.call_signatures
)
completions = completion.completions()
-
+
def iter_import_completions():
for c in completions:
tree_name = c._name.tree_name
@@ -187,57 +187,57 @@ class Script(object):
if definition is not None \
and definition.type in ('import_name', 'import_from'):
yield c
-
+
if len(list(iter_import_completions())) > 10:
# For now disable completions if there's a lot of imports that
# might potentially be resolved. This is the case for tensorflow
# and has been fixed for it. This is obviously temporary until we
# have a better solution.
self._evaluator.infer_enabled = False
-
- debug.speed('completions end')
+
+ debug.speed('completions end')
return completions
-
- def goto_definitions(self):
- """
- Return the definitions of a the path under the cursor. goto function!
- This follows complicated paths and returns the end, not the first
- definition. The big difference between :meth:`goto_assignments` and
- :meth:`goto_definitions` is that :meth:`goto_assignments` doesn't
- follow imports and statements. Multiple objects may be returned,
- because Python itself is a dynamic language, which means depending on
- an option you can have two different versions of a function.
-
- :rtype: list of :class:`classes.Definition`
- """
+
+ def goto_definitions(self):
+ """
+ Return the definitions of a the path under the cursor. goto function!
+ This follows complicated paths and returns the end, not the first
+ definition. The big difference between :meth:`goto_assignments` and
+ :meth:`goto_definitions` is that :meth:`goto_assignments` doesn't
+ follow imports and statements. Multiple objects may be returned,
+ because Python itself is a dynamic language, which means depending on
+ an option you can have two different versions of a function.
+
+ :rtype: list of :class:`classes.Definition`
+ """
leaf = self._module_node.get_name_of_position(self._pos)
if leaf is None:
leaf = self._module_node.get_leaf_for_position(self._pos)
if leaf is None:
return []
-
+
context = self._evaluator.create_context(self._get_module(), leaf)
definitions = helpers.evaluate_goto_definition(self._evaluator, context, leaf)
-
- names = [s.name for s in definitions]
- defs = [classes.Definition(self._evaluator, name) for name in names]
+
+ names = [s.name for s in definitions]
+ defs = [classes.Definition(self._evaluator, name) for name in names]
# The additional set here allows the definitions to become unique in an
# API sense. In the internals we want to separate more things than in
# the API.
- return helpers.sorted_definitions(set(defs))
-
+ return helpers.sorted_definitions(set(defs))
+
def goto_assignments(self, follow_imports=False, follow_builtin_imports=False):
- """
+ """
Return the first definition found, while optionally following imports.
Multiple objects may be returned, because Python itself is a
- dynamic language, which means depending on an option you can have two
- different versions of a function.
-
+ dynamic language, which means depending on an option you can have two
+ different versions of a function.
+
:param follow_imports: The goto call will follow imports.
:param follow_builtin_imports: If follow_imports is True will decide if
it follow builtin imports.
- :rtype: list of :class:`classes.Definition`
- """
+ :rtype: list of :class:`classes.Definition`
+ """
def filter_follow_imports(names, check):
for name in names:
if check(name):
@@ -247,7 +247,7 @@ class Script(object):
for new_name in new_names:
if new_name.start_pos is None:
found_builtin = True
-
+
if found_builtin and not isinstance(name, imports.SubModuleName):
yield name
else:
@@ -255,81 +255,81 @@ class Script(object):
yield new_name
else:
yield name
-
+
tree_name = self._module_node.get_name_of_position(self._pos)
if tree_name is None:
- return []
+ return []
context = self._evaluator.create_context(self._get_module(), tree_name)
names = list(self._evaluator.goto(context, tree_name))
-
+
if follow_imports:
def check(name):
return name.is_import()
- else:
+ else:
def check(name):
return isinstance(name, imports.SubModuleName)
-
+
names = filter_follow_imports(names, check)
-
+
defs = [classes.Definition(self._evaluator, d) for d in set(names)]
return helpers.sorted_definitions(defs)
-
+
def usages(self, additional_module_paths=(), **kwargs):
- """
- Return :class:`classes.Definition` objects, which contain all
- names that point to the definition of the name under the cursor. This
- is very useful for refactoring (renaming), or to show all usages of a
- variable.
-
- .. todo:: Implement additional_module_paths
-
+ """
+ Return :class:`classes.Definition` objects, which contain all
+ names that point to the definition of the name under the cursor. This
+ is very useful for refactoring (renaming), or to show all usages of a
+ variable.
+
+ .. todo:: Implement additional_module_paths
+
:param additional_module_paths: Deprecated, never ever worked.
:param include_builtins: Default True, checks if a usage is a builtin
(e.g. ``sys``) and in that case does not return it.
- :rtype: list of :class:`classes.Definition`
- """
+ :rtype: list of :class:`classes.Definition`
+ """
if additional_module_paths:
warnings.warn(
"Deprecated since version 0.12.0. This never even worked, just ignore it.",
DeprecationWarning,
stacklevel=2
)
-
+
def _usages(include_builtins=True):
tree_name = self._module_node.get_name_of_position(self._pos)
if tree_name is None:
# Must be syntax
- return []
-
+ return []
+
names = usages.usages(self._get_module(), tree_name)
-
+
definitions = [classes.Definition(self._evaluator, n) for n in names]
if not include_builtins:
definitions = [d for d in definitions if not d.in_builtin_module()]
return helpers.sorted_definitions(definitions)
return _usages(**kwargs)
-
- def call_signatures(self):
- """
- Return the function object of the call you're currently in.
-
- E.g. if the cursor is here::
-
- abs(# <-- cursor is here
-
- This would return the ``abs`` function. On the other hand::
-
- abs()# <-- cursor is here
-
+
+ def call_signatures(self):
+ """
+ Return the function object of the call you're currently in.
+
+ E.g. if the cursor is here::
+
+ abs(# <-- cursor is here
+
+ This would return the ``abs`` function. On the other hand::
+
+ abs()# <-- cursor is here
+
This would return an empty list..
-
- :rtype: list of :class:`classes.CallSignature`
- """
+
+ :rtype: list of :class:`classes.CallSignature`
+ """
call_signature_details = \
helpers.get_call_signature_details(self._module_node, self._pos)
if call_signature_details is None:
- return []
-
+ return []
+
context = self._evaluator.create_context(
self._get_module(),
call_signature_details.bracket_leaf
@@ -341,15 +341,15 @@ class Script(object):
self._code_lines,
self._pos
)
- debug.speed('func_call followed')
-
+ debug.speed('func_call followed')
+
return [classes.CallSignature(self._evaluator, d.name,
call_signature_details.bracket_leaf.start_pos,
call_signature_details.call_index,
call_signature_details.keyword_name_str)
for d in definitions if hasattr(d, 'py__call__')]
-
- def _analysis(self):
+
+ def _analysis(self):
self._evaluator.is_analysis = True
self._evaluator.analysis_modules = [self._module_node]
module = self._get_module()
@@ -370,65 +370,65 @@ class Script(object):
for testlist in node.children[:-1:2]:
# Iterate tuples.
unpack_tuple_to_dict(context, types, testlist)
- else:
+ else:
if node.type == 'name':
defs = self._evaluator.goto_definitions(context, node)
else:
defs = evaluate_call_of_leaf(context, node)
try_iter_content(defs)
self._evaluator.reset_recursion_limitations()
-
+
ana = [a for a in self._evaluator.analysis if self.path == a.path]
return sorted(set(ana), key=lambda x: x.line)
finally:
self._evaluator.is_analysis = False
-
-
-class Interpreter(Script):
- """
- Jedi API for Python REPLs.
-
- In addition to completion of simple attribute access, Jedi
- supports code completion based on static code analysis.
- Jedi can complete attributes of object which is not initialized
- yet.
-
- >>> from os.path import join
- >>> namespace = locals()
+
+
+class Interpreter(Script):
+ """
+ Jedi API for Python REPLs.
+
+ In addition to completion of simple attribute access, Jedi
+ supports code completion based on static code analysis.
+ Jedi can complete attributes of object which is not initialized
+ yet.
+
+ >>> from os.path import join
+ >>> namespace = locals()
>>> script = Interpreter('join("").up', [namespace])
- >>> print(script.completions()[0].name)
- upper
- """
-
- def __init__(self, source, namespaces, **kwds):
- """
- Parse `source` and mixin interpreted Python objects from `namespaces`.
-
- :type source: str
- :arg source: Code to parse.
- :type namespaces: list of dict
- :arg namespaces: a list of namespace dictionaries such as the one
- returned by :func:`locals`.
-
- Other optional arguments are same as the ones for :class:`Script`.
- If `line` and `column` are None, they are assumed be at the end of
- `source`.
- """
+ >>> print(script.completions()[0].name)
+ upper
+ """
+
+ def __init__(self, source, namespaces, **kwds):
+ """
+ Parse `source` and mixin interpreted Python objects from `namespaces`.
+
+ :type source: str
+ :arg source: Code to parse.
+ :type namespaces: list of dict
+ :arg namespaces: a list of namespace dictionaries such as the one
+ returned by :func:`locals`.
+
+ Other optional arguments are same as the ones for :class:`Script`.
+ If `line` and `column` are None, they are assumed be at the end of
+ `source`.
+ """
try:
namespaces = [dict(n) for n in namespaces]
except Exception:
raise TypeError("namespaces must be a non-empty list of dicts.")
-
+
environment = kwds.get('environment', None)
if environment is None:
environment = InterpreterEnvironment()
- else:
+ else:
if not isinstance(environment, InterpreterEnvironment):
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
-
+
super(Interpreter, self).__init__(source, environment=environment, **kwds)
self.namespaces = namespaces
-
+
def _get_module(self):
return interpreter.MixedModuleContext(
self._evaluator,
@@ -437,28 +437,28 @@ class Interpreter(Script):
path=self.path,
code_lines=self._code_lines,
)
-
-
-def names(source=None, path=None, encoding='utf-8', all_scopes=False,
+
+
+def names(source=None, path=None, encoding='utf-8', all_scopes=False,
definitions=True, references=False, environment=None):
- """
- Returns a list of `Definition` objects, containing name parts.
- This means you can call ``Definition.goto_assignments()`` and get the
- reference of a name.
- The parameters are the same as in :py:class:`Script`, except or the
- following ones:
-
- :param all_scopes: If True lists the names of all scopes instead of only
- the module namespace.
- :param definitions: If True lists the names that have been defined by a
- class, function or a statement (``a = b`` returns ``a``).
- :param references: If True lists all the names that are not listed by
- ``definitions=True``. E.g. ``a = b`` returns ``b``.
- """
- def def_ref_filter(_def):
+ """
+ Returns a list of `Definition` objects, containing name parts.
+ This means you can call ``Definition.goto_assignments()`` and get the
+ reference of a name.
+ The parameters are the same as in :py:class:`Script`, except or the
+ following ones:
+
+ :param all_scopes: If True lists the names of all scopes instead of only
+ the module namespace.
+ :param definitions: If True lists the names that have been defined by a
+ class, function or a statement (``a = b`` returns ``a``).
+ :param references: If True lists all the names that are not listed by
+ ``definitions=True``. E.g. ``a = b`` returns ``b``.
+ """
+ def def_ref_filter(_def):
is_def = _def._name.tree_name.is_definition()
- return definitions and is_def or references and not is_def
-
+ return definitions and is_def or references and not is_def
+
def create_name(name):
if name.parent.type == 'param':
cls = ParamName
@@ -470,7 +470,7 @@ def names(source=None, path=None, encoding='utf-8', all_scopes=False,
name
)
- # Set line/column to a random position, because they don't matter.
+ # Set line/column to a random position, because they don't matter.
script = Script(source, line=1, column=0, path=path, encoding=encoding, environment=environment)
module_context = script._get_module()
defs = [
@@ -479,31 +479,31 @@ def names(source=None, path=None, encoding='utf-8', all_scopes=False,
create_name(name)
) for name in get_module_names(script._module_node, all_scopes)
]
- return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column))
-
-
-def preload_module(*modules):
- """
- Preloading modules tells Jedi to load a module now, instead of lazy parsing
- of modules. Usful for IDEs, to control which modules to load on startup.
-
- :param modules: different module names, list of string.
- """
- for m in modules:
- s = "import %s as x; x." % m
- Script(s, 1, len(s), None).completions()
-
-
-def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
- notices=True, speed=True):
- """
- Define a callback debug function to get all the debug messages.
-
+ return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column))
+
+
+def preload_module(*modules):
+ """
+ Preloading modules tells Jedi to load a module now, instead of lazy parsing
+ of modules. Usful for IDEs, to control which modules to load on startup.
+
+ :param modules: different module names, list of string.
+ """
+ for m in modules:
+ s = "import %s as x; x." % m
+ Script(s, 1, len(s), None).completions()
+
+
+def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
+ notices=True, speed=True):
+ """
+ Define a callback debug function to get all the debug messages.
+
If you don't specify any arguments, debug messages will be printed to stdout.
- :param func_cb: The callback function for debug messages, with n params.
- """
- debug.debug_function = func_cb
- debug.enable_warning = warnings
- debug.enable_notice = notices
- debug.enable_speed = speed
+ :param func_cb: The callback function for debug messages, with n params.
+ """
+ debug.debug_function = func_cb
+ debug.enable_warning = warnings
+ debug.enable_notice = notices
+ debug.enable_speed = speed
diff --git a/contrib/python/jedi/jedi/api/classes.py b/contrib/python/jedi/jedi/api/classes.py
index bba278adc8..9602e04a8b 100644
--- a/contrib/python/jedi/jedi/api/classes.py
+++ b/contrib/python/jedi/jedi/api/classes.py
@@ -1,138 +1,138 @@
-"""
-The :mod:`jedi.api.classes` module contains the return classes of the API.
-These classes are the much bigger part of the whole API, because they contain
-the interesting information about completion and goto operations.
-"""
-import re
-
+"""
+The :mod:`jedi.api.classes` module contains the return classes of the API.
+These classes are the much bigger part of the whole API, because they contain
+the interesting information about completion and goto operations.
+"""
+import re
+
from parso.python.tree import search_ancestor
-from jedi import settings
+from jedi import settings
from jedi.evaluate.utils import ignored, unite
from jedi.cache import memoize_method
-from jedi.evaluate import imports
-from jedi.evaluate import compiled
+from jedi.evaluate import imports
+from jedi.evaluate import compiled
from jedi.evaluate.imports import ImportName
from jedi.evaluate.context import instance
from jedi.evaluate.context import ClassContext, FunctionExecutionContext
from jedi.api.keywords import KeywordName
-
-
+
+
def _sort_names_by_start_pos(names):
return sorted(names, key=lambda s: s.start_pos or (0, 0))
def defined_names(evaluator, context):
- """
- List sub-definitions (e.g., methods in class).
-
- :type scope: Scope
- :rtype: list of Definition
- """
+ """
+ List sub-definitions (e.g., methods in class).
+
+ :type scope: Scope
+ :rtype: list of Definition
+ """
filter = next(context.get_filters(search_global=True))
names = [name for name in filter.values()]
return [Definition(evaluator, n) for n in _sort_names_by_start_pos(names)]
-
-
-class BaseDefinition(object):
- _mapping = {
- 'posixpath': 'os.path',
- 'riscospath': 'os.path',
- 'ntpath': 'os.path',
- 'os2emxpath': 'os.path',
- 'macpath': 'os.path',
- 'genericpath': 'os.path',
- 'posix': 'os',
- '_io': 'io',
- '_functools': 'functools',
- '_sqlite3': 'sqlite3',
- '__builtin__': '',
- 'builtins': '',
- }
-
- _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
- 'argparse._ActionsContainer': 'argparse.ArgumentParser',
- }.items())
-
- def __init__(self, evaluator, name):
- self._evaluator = evaluator
- self._name = name
- """
+
+
+class BaseDefinition(object):
+ _mapping = {
+ 'posixpath': 'os.path',
+ 'riscospath': 'os.path',
+ 'ntpath': 'os.path',
+ 'os2emxpath': 'os.path',
+ 'macpath': 'os.path',
+ 'genericpath': 'os.path',
+ 'posix': 'os',
+ '_io': 'io',
+ '_functools': 'functools',
+ '_sqlite3': 'sqlite3',
+ '__builtin__': '',
+ 'builtins': '',
+ }
+
+ _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
+ 'argparse._ActionsContainer': 'argparse.ArgumentParser',
+ }.items())
+
+ def __init__(self, evaluator, name):
+ self._evaluator = evaluator
+ self._name = name
+ """
An instance of :class:`parso.reprsentation.Name` subclass.
- """
+ """
self.is_keyword = isinstance(self._name, KeywordName)
-
- # generate a path to the definition
+
+ # generate a path to the definition
self._module = name.get_root_context()
- if self.in_builtin_module():
- self.module_path = None
- else:
+ if self.in_builtin_module():
+ self.module_path = None
+ else:
self.module_path = self._module.py__file__()
- """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``"""
-
- @property
- def name(self):
- """
- Name of variable/function/class/module.
-
- For example, for ``x = None`` it returns ``'x'``.
-
- :rtype: str or None
- """
+ """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``"""
+
+ @property
+ def name(self):
+ """
+ Name of variable/function/class/module.
+
+ For example, for ``x = None`` it returns ``'x'``.
+
+ :rtype: str or None
+ """
return self._name.string_name
-
- @property
- def type(self):
- """
- The type of the definition.
-
- Here is an example of the value of this attribute. Let's consider
- the following source. As what is in ``variable`` is unambiguous
- to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of
- definition for ``sys``, ``f``, ``C`` and ``x``.
-
- >>> from jedi import Script
- >>> source = '''
- ... import keyword
- ...
- ... class C:
- ... pass
- ...
- ... class D:
- ... pass
- ...
- ... x = D()
- ...
- ... def f():
- ... pass
- ...
- ... for variable in [keyword, f, C, x]:
- ... variable'''
-
- >>> script = Script(source)
- >>> defs = script.goto_definitions()
-
- Before showing what is in ``defs``, let's sort it by :attr:`line`
- so that it is easy to relate the result to the source code.
-
- >>> defs = sorted(defs, key=lambda d: d.line)
- >>> defs # doctest: +NORMALIZE_WHITESPACE
- [<Definition module keyword>, <Definition class C>,
+
+ @property
+ def type(self):
+ """
+ The type of the definition.
+
+ Here is an example of the value of this attribute. Let's consider
+ the following source. As what is in ``variable`` is unambiguous
+ to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of
+ definition for ``sys``, ``f``, ``C`` and ``x``.
+
+ >>> from jedi import Script
+ >>> source = '''
+ ... import keyword
+ ...
+ ... class C:
+ ... pass
+ ...
+ ... class D:
+ ... pass
+ ...
+ ... x = D()
+ ...
+ ... def f():
+ ... pass
+ ...
+ ... for variable in [keyword, f, C, x]:
+ ... variable'''
+
+ >>> script = Script(source)
+ >>> defs = script.goto_definitions()
+
+ Before showing what is in ``defs``, let's sort it by :attr:`line`
+ so that it is easy to relate the result to the source code.
+
+ >>> defs = sorted(defs, key=lambda d: d.line)
+ >>> defs # doctest: +NORMALIZE_WHITESPACE
+ [<Definition module keyword>, <Definition class C>,
<Definition instance D>, <Definition def f>]
-
- Finally, here is what you can get from :attr:`type`:
-
+
+ Finally, here is what you can get from :attr:`type`:
+
>>> defs = [str(d.type) for d in defs] # It's unicode and in Py2 has u before it.
>>> defs[0]
- 'module'
+ 'module'
>>> defs[1]
- 'class'
+ 'class'
>>> defs[2]
- 'instance'
+ 'instance'
>>> defs[3]
- 'function'
-
- """
+ 'function'
+
+ """
tree_name = self._name.tree_name
resolve = False
if tree_name is not None:
@@ -141,14 +141,14 @@ class BaseDefinition(object):
if definition is not None and definition.type == 'import_from' and \
tree_name.is_definition():
resolve = True
-
+
if isinstance(self._name, imports.SubModuleName) or resolve:
for context in self._name.infer():
return context.api_type
return self._name.api_type
-
- def _path(self):
- """The path to a module/class/function definition."""
+
+ def _path(self):
+ """The path to a module/class/function definition."""
def to_reverse():
name = self._name
if name.api_type == 'module':
@@ -169,7 +169,7 @@ class BaseDefinition(object):
# ImportError. So simply return the name.
yield name.string_name
return
- else:
+ else:
yield name.string_name
parent_context = name.parent_context
@@ -181,139 +181,139 @@ class BaseDefinition(object):
yield parent_context.name.string_name
except AttributeError:
pass
- else:
+ else:
for name in reversed(method().split('.')):
yield name
parent_context = parent_context.parent_context
return reversed(list(to_reverse()))
-
- @property
- def module_name(self):
- """
- The module name.
-
- >>> from jedi import Script
- >>> source = 'import json'
- >>> script = Script(source, path='example.py')
- >>> d = script.goto_definitions()[0]
- >>> print(d.module_name) # doctest: +ELLIPSIS
- json
- """
+
+ @property
+ def module_name(self):
+ """
+ The module name.
+
+ >>> from jedi import Script
+ >>> source = 'import json'
+ >>> script = Script(source, path='example.py')
+ >>> d = script.goto_definitions()[0]
+ >>> print(d.module_name) # doctest: +ELLIPSIS
+ json
+ """
return self._module.name.string_name
-
- def in_builtin_module(self):
- """Whether this is a builtin module."""
- return isinstance(self._module, compiled.CompiledObject)
-
- @property
- def line(self):
- """The line where the definition occurs (starting with 1)."""
+
+ def in_builtin_module(self):
+ """Whether this is a builtin module."""
+ return isinstance(self._module, compiled.CompiledObject)
+
+ @property
+ def line(self):
+ """The line where the definition occurs (starting with 1)."""
start_pos = self._name.start_pos
if start_pos is None:
- return None
+ return None
return start_pos[0]
-
- @property
- def column(self):
- """The column where the definition occurs (starting with 0)."""
+
+ @property
+ def column(self):
+ """The column where the definition occurs (starting with 0)."""
start_pos = self._name.start_pos
if start_pos is None:
- return None
+ return None
return start_pos[1]
-
+
def docstring(self, raw=False, fast=True):
- r"""
- Return a document string for this completion object.
-
- Example:
-
- >>> from jedi import Script
- >>> source = '''\
- ... def f(a, b=1):
- ... "Document for function f."
- ... '''
- >>> script = Script(source, 1, len('def f'), 'example.py')
- >>> doc = script.goto_definitions()[0].docstring()
- >>> print(doc)
- f(a, b=1)
- <BLANKLINE>
- Document for function f.
-
- Notice that useful extra information is added to the actual
- docstring. For function, it is call signature. If you need
- actual docstring, use ``raw=True`` instead.
-
- >>> print(script.goto_definitions()[0].docstring(raw=True))
- Document for function f.
-
+ r"""
+ Return a document string for this completion object.
+
+ Example:
+
+ >>> from jedi import Script
+ >>> source = '''\
+ ... def f(a, b=1):
+ ... "Document for function f."
+ ... '''
+ >>> script = Script(source, 1, len('def f'), 'example.py')
+ >>> doc = script.goto_definitions()[0].docstring()
+ >>> print(doc)
+ f(a, b=1)
+ <BLANKLINE>
+ Document for function f.
+
+ Notice that useful extra information is added to the actual
+ docstring. For function, it is call signature. If you need
+ actual docstring, use ``raw=True`` instead.
+
+ >>> print(script.goto_definitions()[0].docstring(raw=True))
+ Document for function f.
+
:param fast: Don't follow imports that are only one level deep like
``import foo``, but follow ``from foo import bar``. This makes
sense for speed reasons. Completing `import a` is slow if you use
the ``foo.docstring(fast=False)`` on every object, because it
parses all libraries starting with ``a``.
- """
+ """
return _Help(self._name).docstring(fast=fast, raw=raw)
-
- @property
- def description(self):
- """A textual description of the object."""
+
+ @property
+ def description(self):
+ """A textual description of the object."""
return self._name.string_name
-
- @property
- def full_name(self):
- """
- Dot-separated path of this object.
-
- It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
- It is useful when you want to look up Python manual of the
- object at hand.
-
- Example:
-
- >>> from jedi import Script
- >>> source = '''
- ... import os
- ... os.path.join'''
- >>> script = Script(source, 3, len('os.path.join'), 'example.py')
- >>> print(script.goto_definitions()[0].full_name)
- os.path.join
-
+
+ @property
+ def full_name(self):
+ """
+ Dot-separated path of this object.
+
+ It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
+ It is useful when you want to look up Python manual of the
+ object at hand.
+
+ Example:
+
+ >>> from jedi import Script
+ >>> source = '''
+ ... import os
+ ... os.path.join'''
+ >>> script = Script(source, 3, len('os.path.join'), 'example.py')
+ >>> print(script.goto_definitions()[0].full_name)
+ os.path.join
+
Notice that it returns ``'os.path.join'`` instead of (for example)
``'posixpath.join'``. This is not correct, since the modules name would
be ``<module 'posixpath' ...>```. However most users find the latter
more practical.
- """
+ """
path = list(self._path())
- # TODO add further checks, the mapping should only occur on stdlib.
- if not path:
- return None # for keywords the path is empty
-
+ # TODO add further checks, the mapping should only occur on stdlib.
+ if not path:
+ return None # for keywords the path is empty
+
with ignored(KeyError):
- path[0] = self._mapping[path[0]]
- for key, repl in self._tuple_mapping.items():
- if tuple(path[:len(key)]) == key:
- path = [repl] + path[len(key):]
-
- return '.'.join(path if path[0] else path[1:])
-
- def goto_assignments(self):
+ path[0] = self._mapping[path[0]]
+ for key, repl in self._tuple_mapping.items():
+ if tuple(path[:len(key)]) == key:
+ path = [repl] + path[len(key):]
+
+ return '.'.join(path if path[0] else path[1:])
+
+ def goto_assignments(self):
if self._name.tree_name is None:
return self
-
+
names = self._evaluator.goto(self._name.parent_context, self._name.tree_name)
return [Definition(self._evaluator, n) for n in names]
-
+
def _goto_definitions(self):
# TODO make this function public.
return [Definition(self._evaluator, d.name) for d in self._name.infer()]
- @property
+ @property
@memoize_method
- def params(self):
- """
- Raises an ``AttributeError``if the definition is not callable.
- Otherwise returns a list of `Definition` that represents the params.
- """
+ def params(self):
+ """
+ Raises an ``AttributeError``if the definition is not callable.
+ Otherwise returns a list of `Definition` that represents the params.
+ """
def get_param_names(context):
param_names = []
if context.api_type == 'function':
@@ -341,28 +341,28 @@ class BaseDefinition(object):
return param_names
followed = list(self._name.infer())
- if not followed or not hasattr(followed[0], 'py__call__'):
+ if not followed or not hasattr(followed[0], 'py__call__'):
raise AttributeError('There are no params defined on this.')
context = followed[0] # only check the first one.
-
+
return [Definition(self._evaluator, n) for n in get_param_names(context)]
-
- def parent(self):
+
+ def parent(self):
context = self._name.parent_context
if context is None:
return None
-
+
if isinstance(context, FunctionExecutionContext):
context = context.function_context
return Definition(self._evaluator, context.name)
- def __repr__(self):
- return "<%s %s>" % (type(self).__name__, self.description)
-
+ def __repr__(self):
+ return "<%s %s>" % (type(self).__name__, self.description)
+
def get_line_code(self, before=0, after=0):
"""
Returns the line of code where this object was defined.
-
+
:param before: Add n lines before the current line to the output.
:param after: Add n lines after the current line to the output.
@@ -379,47 +379,47 @@ class BaseDefinition(object):
return ''.join(lines[start_index:index + after + 1])
-class Completion(BaseDefinition):
- """
- `Completion` objects are returned from :meth:`api.Script.completions`. They
- provide additional information about a completion.
- """
+class Completion(BaseDefinition):
+ """
+ `Completion` objects are returned from :meth:`api.Script.completions`. They
+ provide additional information about a completion.
+ """
def __init__(self, evaluator, name, stack, like_name_length):
- super(Completion, self).__init__(evaluator, name)
-
- self._like_name_length = like_name_length
+ super(Completion, self).__init__(evaluator, name)
+
+ self._like_name_length = like_name_length
self._stack = stack
-
- # Completion objects with the same Completion name (which means
- # duplicate items in the completion)
- self._same_name_completions = []
-
- def _complete(self, like_name):
- append = ''
- if settings.add_bracket_after_function \
- and self.type == 'Function':
- append = '('
-
+
+ # Completion objects with the same Completion name (which means
+ # duplicate items in the completion)
+ self._same_name_completions = []
+
+ def _complete(self, like_name):
+ append = ''
+ if settings.add_bracket_after_function \
+ and self.type == 'Function':
+ append = '('
+
if self._name.api_type == 'param' and self._stack is not None:
nonterminals = [stack_node.nonterminal for stack_node in self._stack]
if 'trailer' in nonterminals and 'argument' not in nonterminals:
# TODO this doesn't work for nested calls.
append += '='
-
+
name = self._name.string_name
- if like_name:
- name = name[self._like_name_length:]
+ if like_name:
+ name = name[self._like_name_length:]
return name + append
-
- @property
- def complete(self):
- """
- Return the rest of the word, e.g. completing ``isinstance``::
-
- isinstan# <-- Cursor is here
-
- would return the string 'ce'. It also adds additional stuff, depending
- on your `settings.py`.
+
+ @property
+ def complete(self):
+ """
+ Return the rest of the word, e.g. completing ``isinstance``::
+
+ isinstan# <-- Cursor is here
+
+ would return the string 'ce'. It also adds additional stuff, depending
+ on your `settings.py`.
Assuming the following function definition::
@@ -430,24 +430,24 @@ class Completion(BaseDefinition):
would be `am=`
- """
- return self._complete(True)
-
- @property
- def name_with_symbols(self):
- """
+ """
+ return self._complete(True)
+
+ @property
+ def name_with_symbols(self):
+ """
Similar to :attr:`name`, but like :attr:`name` returns also the
symbols, for example assuming the following function definition::
-
+
def foo(param=0):
pass
-
+
completing ``foo(`` would give a ``Completion`` which
``name_with_symbols`` would be "param=".
- """
- return self._complete(False)
-
+ """
+ return self._complete(False)
+
def docstring(self, raw=False, fast=True):
if self._like_name_length >= 3:
# In this case we can just resolve the like name, because we
@@ -455,65 +455,65 @@ class Completion(BaseDefinition):
fast = False
return super(Completion, self).docstring(raw=raw, fast=fast)
- @property
- def description(self):
- """Provide a description of the completion object."""
+ @property
+ def description(self):
+ """Provide a description of the completion object."""
# TODO improve the class structure.
return Definition.description.__get__(self)
-
- def __repr__(self):
+
+ def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._name.string_name)
-
+
@memoize_method
- def follow_definition(self):
- """
- Return the original definitions. I strongly recommend not using it for
- your completions, because it might slow down |jedi|. If you want to
- read only a few objects (<=20), it might be useful, especially to get
- the original docstrings. The basic problem of this function is that it
- follows all results. This means with 1000 completions (e.g. numpy),
- it's just PITA-slow.
- """
+ def follow_definition(self):
+ """
+ Return the original definitions. I strongly recommend not using it for
+ your completions, because it might slow down |jedi|. If you want to
+ read only a few objects (<=20), it might be useful, especially to get
+ the original docstrings. The basic problem of this function is that it
+ follows all results. This means with 1000 completions (e.g. numpy),
+ it's just PITA-slow.
+ """
defs = self._name.infer()
- return [Definition(self._evaluator, d.name) for d in defs]
-
-
+ return [Definition(self._evaluator, d.name) for d in defs]
+
+
class Definition(BaseDefinition):
- """
- *Definition* objects are returned from :meth:`api.Script.goto_assignments`
- or :meth:`api.Script.goto_definitions`.
- """
- def __init__(self, evaluator, definition):
- super(Definition, self).__init__(evaluator, definition)
-
- @property
- def description(self):
- """
- A description of the :class:`.Definition` object, which is heavily used
- in testing. e.g. for ``isinstance`` it returns ``def isinstance``.
-
- Example:
-
- >>> from jedi import Script
- >>> source = '''
- ... def f():
- ... pass
- ...
- ... class C:
- ... pass
- ...
- ... variable = f if random.choice([0,1]) else C'''
- >>> script = Script(source, column=3) # line is maximum by default
- >>> defs = script.goto_definitions()
- >>> defs = sorted(defs, key=lambda d: d.line)
- >>> defs
- [<Definition def f>, <Definition class C>]
- >>> str(defs[0].description) # strip literals in python2
- 'def f'
- >>> str(defs[1].description)
- 'class C'
-
- """
+ """
+ *Definition* objects are returned from :meth:`api.Script.goto_assignments`
+ or :meth:`api.Script.goto_definitions`.
+ """
+ def __init__(self, evaluator, definition):
+ super(Definition, self).__init__(evaluator, definition)
+
+ @property
+ def description(self):
+ """
+ A description of the :class:`.Definition` object, which is heavily used
+ in testing. e.g. for ``isinstance`` it returns ``def isinstance``.
+
+ Example:
+
+ >>> from jedi import Script
+ >>> source = '''
+ ... def f():
+ ... pass
+ ...
+ ... class C:
+ ... pass
+ ...
+ ... variable = f if random.choice([0,1]) else C'''
+ >>> script = Script(source, column=3) # line is maximum by default
+ >>> defs = script.goto_definitions()
+ >>> defs = sorted(defs, key=lambda d: d.line)
+ >>> defs
+ [<Definition def f>, <Definition class C>]
+ >>> str(defs[0].description) # strip literals in python2
+ 'def f'
+ >>> str(defs[1].description)
+ 'class C'
+
+ """
typ = self.type
tree_name = self._name.tree_name
if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
@@ -527,152 +527,152 @@ class Definition(BaseDefinition):
include_comma=False
)
return typ + ' ' + code
-
+
definition = tree_name.get_definition() or tree_name
# Remove the prefix, because that's not what we want for get_code
# here.
txt = definition.get_code(include_prefix=False)
- # Delete comments:
+ # Delete comments:
txt = re.sub(r'#[^\n]+\n', ' ', txt)
- # Delete multi spaces/newlines
+ # Delete multi spaces/newlines
txt = re.sub(r'\s+', ' ', txt).strip()
return txt
-
- @property
- def desc_with_module(self):
- """
- In addition to the definition, also return the module.
-
- .. warning:: Don't use this function yet, its behaviour may change. If
- you really need it, talk to me.
-
- .. todo:: Add full path. This function is should return a
- `module.class.function` path.
- """
+
+ @property
+ def desc_with_module(self):
+ """
+ In addition to the definition, also return the module.
+
+ .. warning:: Don't use this function yet, its behaviour may change. If
+ you really need it, talk to me.
+
+ .. todo:: Add full path. This function is should return a
+ `module.class.function` path.
+ """
position = '' if self.in_builtin_module else '@%s' % self.line
- return "%s:%s%s" % (self.module_name, self.description, position)
-
+ return "%s:%s%s" % (self.module_name, self.description, position)
+
@memoize_method
- def defined_names(self):
- """
- List sub-definitions (e.g., methods in class).
-
- :rtype: list of Definition
- """
+ def defined_names(self):
+ """
+ List sub-definitions (e.g., methods in class).
+
+ :rtype: list of Definition
+ """
defs = self._name.infer()
return sorted(
unite(defined_names(self._evaluator, d) for d in defs),
key=lambda s: s._name.start_pos or (0, 0)
)
-
- def is_definition(self):
- """
- Returns True, if defined as a name in a statement, function or class.
- Returns False, if it's a reference to such a definition.
- """
+
+ def is_definition(self):
+ """
+ Returns True, if defined as a name in a statement, function or class.
+ Returns False, if it's a reference to such a definition.
+ """
if self._name.tree_name is None:
return True
else:
return self._name.tree_name.is_definition()
-
- def __eq__(self, other):
- return self._name.start_pos == other._name.start_pos \
- and self.module_path == other.module_path \
- and self.name == other.name \
- and self._evaluator == other._evaluator
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __hash__(self):
- return hash((self._name.start_pos, self.module_path, self.name, self._evaluator))
-
-
-class CallSignature(Definition):
- """
- `CallSignature` objects is the return value of `Script.function_definition`.
- It knows what functions you are currently in. e.g. `isinstance(` would
- return the `isinstance` function. without `(` it would return nothing.
- """
+
+ def __eq__(self, other):
+ return self._name.start_pos == other._name.start_pos \
+ and self.module_path == other.module_path \
+ and self.name == other.name \
+ and self._evaluator == other._evaluator
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return hash((self._name.start_pos, self.module_path, self.name, self._evaluator))
+
+
+class CallSignature(Definition):
+ """
+ `CallSignature` objects is the return value of `Script.function_definition`.
+ It knows what functions you are currently in. e.g. `isinstance(` would
+ return the `isinstance` function. without `(` it would return nothing.
+ """
def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str):
- super(CallSignature, self).__init__(evaluator, executable_name)
- self._index = index
+ super(CallSignature, self).__init__(evaluator, executable_name)
+ self._index = index
self._key_name_str = key_name_str
self._bracket_start_pos = bracket_start_pos
-
- @property
- def index(self):
- """
- The Param index of the current call.
- Returns None if the index cannot be found in the curent call.
- """
+
+ @property
+ def index(self):
+ """
+ The Param index of the current call.
+ Returns None if the index cannot be found in the curent call.
+ """
if self._key_name_str is not None:
- for i, param in enumerate(self.params):
+ for i, param in enumerate(self.params):
if self._key_name_str == param.name:
- return i
+ return i
if self.params:
param_name = self.params[-1]._name
if param_name.tree_name is not None:
if param_name.tree_name.get_definition().star_count == 2:
return i
return None
-
- if self._index >= len(self.params):
- for i, param in enumerate(self.params):
+
+ if self._index >= len(self.params):
+ for i, param in enumerate(self.params):
tree_name = param._name.tree_name
if tree_name is not None:
# *args case
if tree_name.get_definition().star_count == 1:
return i
- return None
- return self._index
-
- @property
- def bracket_start(self):
- """
- The indent of the bracket that is responsible for the last function
- call.
- """
+ return None
+ return self._index
+
+ @property
+ def bracket_start(self):
+ """
+ The indent of the bracket that is responsible for the last function
+ call.
+ """
return self._bracket_start_pos
-
- @property
+
+ @property
def _params_str(self):
return ', '.join([p.description[6:]
for p in self.params])
-
- def __repr__(self):
+
+ def __repr__(self):
return '<%s: %s index=%r params=[%s]>' % (
type(self).__name__,
self._name.string_name,
self._index,
self._params_str,
)
-
-
-class _Help(object):
- """
- Temporary implementation, will be used as `Script.help() or something in
- the future.
- """
- def __init__(self, definition):
- self._name = definition
-
+
+
+class _Help(object):
+ """
+ Temporary implementation, will be used as `Script.help() or something in
+ the future.
+ """
+ def __init__(self, definition):
+ self._name = definition
+
@memoize_method
def _get_contexts(self, fast):
if isinstance(self._name, ImportName) and fast:
return {}
-
+
if self._name.api_type == 'statement':
return {}
return self._name.infer()
def docstring(self, fast=True, raw=True):
- """
+ """
The docstring ``__doc__`` for any object.
-
- See :attr:`doc` for example.
- """
+
+ See :attr:`doc` for example.
+ """
# TODO: Use all of the followed objects as output. Possibly divinding
# them by a few dashes.
for context in self._get_contexts(fast=fast):
diff --git a/contrib/python/jedi/jedi/api/helpers.py b/contrib/python/jedi/jedi/api/helpers.py
index 1cf24b6a32..7cf4bc6fc4 100644
--- a/contrib/python/jedi/jedi/api/helpers.py
+++ b/contrib/python/jedi/jedi/api/helpers.py
@@ -1,28 +1,28 @@
-"""
-Helpers for the API
-"""
-import re
+"""
+Helpers for the API
+"""
+import re
from collections import namedtuple
from textwrap import dedent
-
+
from parso.python.parser import Parser
from parso.python import tree
-
+
from jedi._compatibility import u
from jedi.evaluate.syntax_tree import eval_atom
from jedi.evaluate.helpers import evaluate_call_of_leaf
from jedi.evaluate.compiled import get_string_context_set
from jedi.cache import call_signature_time_cache
-
-
+
+
CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
-
-def sorted_definitions(defs):
- # Note: `or ''` below is required because `module_path` could be
- return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0))
-
-
+
+def sorted_definitions(defs):
+ # Note: `or ''` below is required because `module_path` could be
+ return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0))
+
+
def get_on_completion_name(module_node, lines, position):
leaf = module_node.get_leaf_for_position(position)
if leaf is None or leaf.type in ('string', 'error_leaf'):
@@ -95,12 +95,12 @@ def _get_code_for_stack(code_lines, module_node, position):
def get_stack_at_position(grammar, code_lines, module_node, pos):
- """
+ """
Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
- """
+ """
class EndMarkerReached(Exception):
pass
-
+
def tokenize_without_endmarker(code):
# TODO This is for now not an official parso API that exists purely
# for Jedi.
@@ -116,7 +116,7 @@ def get_stack_at_position(grammar, code_lines, module_node, pos):
raise EndMarkerReached()
else:
yield token
-
+
# The code might be indedented, just remove it.
code = dedent(_get_code_for_stack(code_lines, module_node, pos))
# We use a word to tell Jedi when we have reached the start of the
@@ -124,7 +124,7 @@ def get_stack_at_position(grammar, code_lines, module_node, pos):
# Use Z as a prefix because it's not part of a number suffix.
safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI'
code = code + ' ' + safeword
-
+
p = Parser(grammar._pgen_grammar, error_recovery=True)
try:
p.parse(tokens=tokenize_without_endmarker(code))
@@ -134,14 +134,14 @@ def get_stack_at_position(grammar, code_lines, module_node, pos):
"This really shouldn't happen. There's a bug in Jedi:\n%s"
% list(tokenize_without_endmarker(code))
)
-
-
+
+
def evaluate_goto_definition(evaluator, context, leaf):
if leaf.type == 'name':
# In case of a name we can just use goto_definition which does all the
# magic itself.
return evaluator.goto_definitions(context, leaf)
-
+
parent = leaf.parent
if parent.type == 'atom':
return context.eval_node(leaf.parent)
@@ -152,7 +152,7 @@ def evaluate_goto_definition(evaluator, context, leaf):
elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'):
return get_string_context_set(evaluator)
return []
-
+
CallSignatureDetails = namedtuple(
'CallSignatureDetails',
diff --git a/contrib/python/jedi/jedi/api/interpreter.py b/contrib/python/jedi/jedi/api/interpreter.py
index 3395142964..c9b7bd69bb 100644
--- a/contrib/python/jedi/jedi/api/interpreter.py
+++ b/contrib/python/jedi/jedi/api/interpreter.py
@@ -1,32 +1,32 @@
-"""
-TODO Some parts of this module are still not well documented.
-"""
-
+"""
+TODO Some parts of this module are still not well documented.
+"""
+
from jedi.evaluate.context import ModuleContext
-from jedi.evaluate import compiled
+from jedi.evaluate import compiled
from jedi.evaluate.compiled import mixed
from jedi.evaluate.compiled.access import create_access_path
from jedi.evaluate.base_context import Context
-
-
+
+
def _create(evaluator, obj):
return compiled.create_from_access_path(
evaluator, create_access_path(evaluator, obj)
)
-
-
+
+
class NamespaceObject(object):
def __init__(self, dct):
self.__dict__ = dct
-
-
+
+
class MixedModuleContext(Context):
type = 'mixed_module'
-
+
def __init__(self, evaluator, tree_module, namespaces, path, code_lines):
self.evaluator = evaluator
self._namespaces = namespaces
-
+
self._namespace_objects = [NamespaceObject(n) for n in namespaces]
self._module_context = ModuleContext(
evaluator, tree_module,
@@ -34,14 +34,14 @@ class MixedModuleContext(Context):
code_lines=code_lines
)
self.tree_node = tree_module
-
+
def get_node(self):
return self.tree_node
-
+
def get_filters(self, *args, **kwargs):
for filter in self._module_context.get_filters(*args, **kwargs):
yield filter
-
+
for namespace_obj in self._namespace_objects:
compiled_object = _create(self.evaluator, namespace_obj)
mixed_object = mixed.MixedObject(
@@ -52,7 +52,7 @@ class MixedModuleContext(Context):
)
for filter in mixed_object.get_filters(*args, **kwargs):
yield filter
-
+
@property
def code_lines(self):
return self._module_context.code_lines
diff --git a/contrib/python/jedi/jedi/api/keywords.py b/contrib/python/jedi/jedi/api/keywords.py
index 81e3d1c21c..2991a0f81a 100644
--- a/contrib/python/jedi/jedi/api/keywords.py
+++ b/contrib/python/jedi/jedi/api/keywords.py
@@ -1,11 +1,11 @@
-import pydoc
-
+import pydoc
+
from jedi.evaluate.utils import ignored
from jedi.evaluate.filters import AbstractNameDefinition
-
-try:
- from pydoc_data import topics as pydoc_topics
-except ImportError:
+
+try:
+ from pydoc_data import topics as pydoc_topics
+except ImportError:
# Python 2
try:
import pydoc_topics
@@ -13,72 +13,72 @@ except ImportError:
# This is for Python 3 embeddable version, which dont have
# pydoc_data module in its file python3x.zip.
pydoc_topics = None
-
-
+
+
def get_operator(evaluator, string, pos):
return Keyword(evaluator, string, pos)
-
-
+
+
class KeywordName(AbstractNameDefinition):
api_type = u'keyword'
-
+
def __init__(self, evaluator, name):
self.evaluator = evaluator
self.string_name = name
self.parent_context = evaluator.builtins_module
-
+
def infer(self):
return [Keyword(self.evaluator, self.string_name, (0, 0))]
-
-
+
+
class Keyword(object):
api_type = u'keyword'
-
+
def __init__(self, evaluator, name, pos):
self.name = KeywordName(evaluator, name)
- self.start_pos = pos
+ self.start_pos = pos
self.parent = evaluator.builtins_module
-
- @property
- def names(self):
- """ For a `parsing.Name` like comparision """
- return [self.name]
-
+
+ @property
+ def names(self):
+ """ For a `parsing.Name` like comparision """
+ return [self.name]
+
def py__doc__(self, include_call_signature=False):
return imitate_pydoc(self.name.string_name)
-
- def __repr__(self):
- return '<%s: %s>' % (type(self).__name__, self.name)
-
-
-def imitate_pydoc(string):
- """
- It's not possible to get the pydoc's without starting the annoying pager
- stuff.
- """
+
+ def __repr__(self):
+ return '<%s: %s>' % (type(self).__name__, self.name)
+
+
+def imitate_pydoc(string):
+ """
+ It's not possible to get the pydoc's without starting the annoying pager
+ stuff.
+ """
if pydoc_topics is None:
return ''
- # str needed because of possible unicode stuff in py2k (pydoc doesn't work
- # with unicode strings)
- string = str(string)
- h = pydoc.help
+ # str needed because of possible unicode stuff in py2k (pydoc doesn't work
+ # with unicode strings)
+ string = str(string)
+ h = pydoc.help
with ignored(KeyError):
- # try to access symbols
- string = h.symbols[string]
- string, _, related = string.partition(' ')
-
- get_target = lambda s: h.topics.get(s, h.keywords.get(s))
- while isinstance(string, str):
- string = get_target(string)
-
- try:
- # is a tuple now
- label, related = string
- except TypeError:
- return ''
-
- try:
+ # try to access symbols
+ string = h.symbols[string]
+ string, _, related = string.partition(' ')
+
+ get_target = lambda s: h.topics.get(s, h.keywords.get(s))
+ while isinstance(string, str):
+ string = get_target(string)
+
+ try:
+ # is a tuple now
+ label, related = string
+ except TypeError:
+ return ''
+
+ try:
return pydoc_topics.topics[label].strip() if pydoc_topics else ''
- except KeyError:
- return ''
+ except KeyError:
+ return ''
diff --git a/contrib/python/jedi/jedi/api/replstartup.py b/contrib/python/jedi/jedi/api/replstartup.py
index 98734bf703..3ac8470877 100644
--- a/contrib/python/jedi/jedi/api/replstartup.py
+++ b/contrib/python/jedi/jedi/api/replstartup.py
@@ -1,29 +1,29 @@
-"""
-To use Jedi completion in Python interpreter, add the following in your shell
+"""
+To use Jedi completion in Python interpreter, add the following in your shell
setup (e.g., ``.bashrc``). This works only on Linux/Mac, because readline is
not available on Windows. If you still want Jedi autocompletion in your REPL,
just use IPython instead::
-
- export PYTHONSTARTUP="$(python -m jedi repl)"
-
-Then you will be able to use Jedi completer in your Python interpreter::
-
- $ python
- Python 2.7.2+ (default, Jul 20 2012, 22:15:08)
- [GCC 4.6.1] on linux2
- Type "help", "copyright", "credits" or "license" for more information.
- >>> import os
+
+ export PYTHONSTARTUP="$(python -m jedi repl)"
+
+Then you will be able to use Jedi completer in your Python interpreter::
+
+ $ python
+ Python 2.7.2+ (default, Jul 20 2012, 22:15:08)
+ [GCC 4.6.1] on linux2
+ Type "help", "copyright", "credits" or "license" for more information.
+ >>> import os
>>> os.path.join('a', 'b').split().in<TAB> # doctest: +SKIP
..dex ..sert
-
-"""
-import jedi.utils
-from jedi import __version__ as __jedi_version__
-
-print('REPL completion using Jedi %s' % __jedi_version__)
-jedi.utils.setup_readline()
-
-del jedi
-
-# Note: try not to do many things here, as it will contaminate global
-# namespace of the interpreter.
+
+"""
+import jedi.utils
+from jedi import __version__ as __jedi_version__
+
+print('REPL completion using Jedi %s' % __jedi_version__)
+jedi.utils.setup_readline()
+
+del jedi
+
+# Note: try not to do many things here, as it will contaminate global
+# namespace of the interpreter.
diff --git a/contrib/python/jedi/jedi/cache.py b/contrib/python/jedi/jedi/cache.py
index b20ea1e779..93e2bd7fcf 100644
--- a/contrib/python/jedi/jedi/cache.py
+++ b/contrib/python/jedi/jedi/cache.py
@@ -1,37 +1,37 @@
-"""
-This caching is very important for speed and memory optimizations. There's
-nothing really spectacular, just some decorators. The following cache types are
-available:
-
-- ``time_cache`` can be used to cache something for just a limited time span,
- which can be useful if there's user interaction and the user cannot react
- faster than a certain time.
-
-This module is one of the reasons why |jedi| is not thread-safe. As you can see
-there are global variables, which are holding the cache information. Some of
-these variables are being cleaned after every API usage.
-"""
-import time
+"""
+This caching is very important for speed and memory optimizations. There's
+nothing really spectacular, just some decorators. The following cache types are
+available:
+
+- ``time_cache`` can be used to cache something for just a limited time span,
+ which can be useful if there's user interaction and the user cannot react
+ faster than a certain time.
+
+This module is one of the reasons why |jedi| is not thread-safe. As you can see
+there are global variables, which are holding the cache information. Some of
+these variables are being cleaned after every API usage.
+"""
+import time
from functools import wraps
-
-from jedi import settings
+
+from jedi import settings
from parso.cache import parser_cache
-
-_time_caches = {}
-
-
+
+_time_caches = {}
+
+
def underscore_memoization(func):
"""
Decorator for methods::
-
+
class A(object):
def x(self):
if self._x:
self._x = 10
return self._x
-
+
Becomes::
-
+
class A(object):
@underscore_memoization
def x(self):
@@ -52,65 +52,65 @@ def underscore_memoization(func):
return wrapper
-def clear_time_caches(delete_all=False):
- """ Jedi caches many things, that should be completed after each completion
- finishes.
-
- :param delete_all: Deletes also the cache that is normally not deleted,
- like parser cache, which is important for faster parsing.
- """
- global _time_caches
-
- if delete_all:
- for cache in _time_caches.values():
- cache.clear()
- parser_cache.clear()
- else:
- # normally just kill the expired entries, not all
- for tc in _time_caches.values():
- # check time_cache for expired entries
- for key, (t, value) in list(tc.items()):
- if t < time.time():
- # delete expired entries
- del tc[key]
-
-
+def clear_time_caches(delete_all=False):
+ """ Jedi caches many things, that should be completed after each completion
+ finishes.
+
+ :param delete_all: Deletes also the cache that is normally not deleted,
+ like parser cache, which is important for faster parsing.
+ """
+ global _time_caches
+
+ if delete_all:
+ for cache in _time_caches.values():
+ cache.clear()
+ parser_cache.clear()
+ else:
+ # normally just kill the expired entries, not all
+ for tc in _time_caches.values():
+ # check time_cache for expired entries
+ for key, (t, value) in list(tc.items()):
+ if t < time.time():
+ # delete expired entries
+ del tc[key]
+
+
def call_signature_time_cache(time_add_setting):
- """
- This decorator works as follows: Call it with a setting and after that
- use the function with a callable that returns the key.
- But: This function is only called if the key is not available. After a
- certain amount of time (`time_add_setting`) the cache is invalid.
+ """
+ This decorator works as follows: Call it with a setting and after that
+ use the function with a callable that returns the key.
+ But: This function is only called if the key is not available. After a
+ certain amount of time (`time_add_setting`) the cache is invalid.
If the given key is None, the function will not be cached.
- """
- def _temp(key_func):
- dct = {}
- _time_caches[time_add_setting] = dct
-
- def wrapper(*args, **kwargs):
- generator = key_func(*args, **kwargs)
- key = next(generator)
- try:
- expiry, value = dct[key]
- if expiry > time.time():
- return value
- except KeyError:
- pass
-
- value = next(generator)
- time_add = getattr(settings, time_add_setting)
- if key is not None:
- dct[key] = time.time() + time_add, value
- return value
- return wrapper
- return _temp
-
-
+ """
+ def _temp(key_func):
+ dct = {}
+ _time_caches[time_add_setting] = dct
+
+ def wrapper(*args, **kwargs):
+ generator = key_func(*args, **kwargs)
+ key = next(generator)
+ try:
+ expiry, value = dct[key]
+ if expiry > time.time():
+ return value
+ except KeyError:
+ pass
+
+ value = next(generator)
+ time_add = getattr(settings, time_add_setting)
+ if key is not None:
+ dct[key] = time.time() + time_add, value
+ return value
+ return wrapper
+ return _temp
+
+
def time_cache(seconds):
def decorator(func):
cache = {}
-
+
@wraps(func)
def wrapper(*args, **kwargs):
key = (args, frozenset(kwargs.items()))
@@ -123,24 +123,24 @@ def time_cache(seconds):
result = func(*args, **kwargs)
cache[key] = time.time(), result
return result
-
+
wrapper.clear_cache = lambda: cache.clear()
return wrapper
-
+
return decorator
-
-
-def memoize_method(method):
- """A normal memoize function."""
+
+
+def memoize_method(method):
+ """A normal memoize function."""
@wraps(method)
- def wrapper(self, *args, **kwargs):
+ def wrapper(self, *args, **kwargs):
cache_dict = self.__dict__.setdefault('_memoize_method_dct', {})
dct = cache_dict.setdefault(method, {})
- key = (args, frozenset(kwargs.items()))
- try:
- return dct[key]
- except KeyError:
- result = method(self, *args, **kwargs)
- dct[key] = result
- return result
- return wrapper
+ key = (args, frozenset(kwargs.items()))
+ try:
+ return dct[key]
+ except KeyError:
+ result = method(self, *args, **kwargs)
+ dct[key] = result
+ return result
+ return wrapper
diff --git a/contrib/python/jedi/jedi/debug.py b/contrib/python/jedi/jedi/debug.py
index 69205d921d..49fda4bcac 100644
--- a/contrib/python/jedi/jedi/debug.py
+++ b/contrib/python/jedi/jedi/debug.py
@@ -1,7 +1,7 @@
-from jedi._compatibility import encoding, is_py3, u
-import os
-import time
-
+from jedi._compatibility import encoding, is_py3, u
+import os
+import time
+
_inited = False
@@ -14,14 +14,14 @@ def _lazy_colorama_init():
"""
-try:
- if os.name == 'nt':
+try:
+ if os.name == 'nt':
# Does not work on Windows, as pyreadline and colorama interfere
- raise ImportError
- else:
- # Use colorama for nicer console output.
- from colorama import Fore, init
- from colorama import initialise
+ raise ImportError
+ else:
+ # Use colorama for nicer console output.
+ from colorama import Fore, init
+ from colorama import initialise
def _lazy_colorama_init(): # noqa: F811
"""
@@ -44,76 +44,76 @@ try:
pass
_inited = True
-except ImportError:
- class Fore(object):
- RED = ''
- GREEN = ''
- YELLOW = ''
+except ImportError:
+ class Fore(object):
+ RED = ''
+ GREEN = ''
+ YELLOW = ''
MAGENTA = ''
- RESET = ''
-
-NOTICE = object()
-WARNING = object()
-SPEED = object()
-
-enable_speed = False
-enable_warning = False
-enable_notice = False
-
-# callback, interface: level, str
-debug_function = None
+ RESET = ''
+
+NOTICE = object()
+WARNING = object()
+SPEED = object()
+
+enable_speed = False
+enable_warning = False
+enable_notice = False
+
+# callback, interface: level, str
+debug_function = None
_debug_indent = 0
-_start_time = time.time()
-
-
-def reset_time():
- global _start_time, _debug_indent
- _start_time = time.time()
+_start_time = time.time()
+
+
+def reset_time():
+ global _start_time, _debug_indent
+ _start_time = time.time()
_debug_indent = 0
-
-
-def increase_indent(func):
- """Decorator for makin """
- def wrapper(*args, **kwargs):
- global _debug_indent
- _debug_indent += 1
- try:
+
+
+def increase_indent(func):
+ """Decorator for makin """
+ def wrapper(*args, **kwargs):
+ global _debug_indent
+ _debug_indent += 1
+ try:
return func(*args, **kwargs)
- finally:
- _debug_indent -= 1
- return wrapper
-
-
+ finally:
+ _debug_indent -= 1
+ return wrapper
+
+
def dbg(message, *args, **kwargs):
- """ Looks at the stack, to see if a debug message should be printed. """
+ """ Looks at the stack, to see if a debug message should be printed. """
# Python 2 compatibility, because it doesn't understand default args
color = kwargs.pop('color', 'GREEN')
assert color
- if debug_function and enable_notice:
+ if debug_function and enable_notice:
i = ' ' * _debug_indent
_lazy_colorama_init()
debug_function(color, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args))
-
-
+
+
def warning(message, *args, **kwargs):
format = kwargs.pop('format', True)
assert not kwargs
- if debug_function and enable_warning:
- i = ' ' * _debug_indent
+ if debug_function and enable_warning:
+ i = ' ' * _debug_indent
if format:
message = message % tuple(u(repr(a)) for a in args)
debug_function('RED', i + 'warning: ' + message)
-
-
-def speed(name):
- if debug_function and enable_speed:
- now = time.time()
- i = ' ' * _debug_indent
+
+
+def speed(name):
+ if debug_function and enable_speed:
+ now = time.time()
+ i = ' ' * _debug_indent
debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time))
-
-
+
+
def print_to_stdout(color, str_out):
"""
The default debug function that prints to standard out.
@@ -122,9 +122,9 @@ def print_to_stdout(color, str_out):
"""
col = getattr(Fore, color)
_lazy_colorama_init()
- if not is_py3:
- str_out = str_out.encode(encoding, 'replace')
- print(col + str_out + Fore.RESET)
-
-
-# debug_function = print_to_stdout
+ if not is_py3:
+ str_out = str_out.encode(encoding, 'replace')
+ print(col + str_out + Fore.RESET)
+
+
+# debug_function = print_to_stdout
diff --git a/contrib/python/jedi/jedi/evaluate/__init__.py b/contrib/python/jedi/jedi/evaluate/__init__.py
index 00104c870e..48339439ee 100644
--- a/contrib/python/jedi/jedi/evaluate/__init__.py
+++ b/contrib/python/jedi/jedi/evaluate/__init__.py
@@ -1,80 +1,80 @@
-"""
-Evaluation of Python code in |jedi| is based on three assumptions:
-
-* The code uses as least side effects as possible. Jedi understands certain
- list/tuple/set modifications, but there's no guarantee that Jedi detects
- everything (list.append in different modules for example).
-* No magic is being used:
-
- - metaclasses
- - ``setattr()`` / ``__import__()``
- - writing to ``globals()``, ``locals()``, ``object.__dict__``
-* The programmer is not a total dick, e.g. like `this
- <https://github.com/davidhalter/jedi/issues/24>`_ :-)
-
+"""
+Evaluation of Python code in |jedi| is based on three assumptions:
+
+* The code uses as least side effects as possible. Jedi understands certain
+ list/tuple/set modifications, but there's no guarantee that Jedi detects
+ everything (list.append in different modules for example).
+* No magic is being used:
+
+ - metaclasses
+ - ``setattr()`` / ``__import__()``
+ - writing to ``globals()``, ``locals()``, ``object.__dict__``
+* The programmer is not a total dick, e.g. like `this
+ <https://github.com/davidhalter/jedi/issues/24>`_ :-)
+
The actual algorithm is based on a principle called lazy evaluation. That
said, the typical entry point for static analysis is calling
``eval_expr_stmt``. There's separate logic for autocompletion in the API, the
evaluator is all about evaluating an expression.
-
+
TODO this paragraph is not what jedi does anymore, it's similar, but not the
same.
Now you need to understand what follows after ``eval_expr_stmt``. Let's
-make an example::
-
- import datetime
- datetime.date.toda# <-- cursor here
-
-First of all, this module doesn't care about completion. It really just cares
+make an example::
+
+ import datetime
+ datetime.date.toda# <-- cursor here
+
+First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``eval_expr_stmt`` will
-return the ``date`` class.
-
-To *visualize* this (simplified):
-
+return the ``date`` class.
+
+To *visualize* this (simplified):
+
- ``Evaluator.eval_expr_stmt`` doesn't do much, because there's no assignment.
- ``Context.eval_node`` cares for resolving the dotted path
-- ``Evaluator.find_types`` searches for global definitions of datetime, which
- it finds in the definition of an import, by scanning the syntax tree.
-- Using the import logic, the datetime module is found.
+- ``Evaluator.find_types`` searches for global definitions of datetime, which
+ it finds in the definition of an import, by scanning the syntax tree.
+- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``eval_node`` to find ``date``
- inside the datetime module.
-
-Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
-calls to ``find_types``. However the second call would be ignored, because the
-first one would return nothing (there's no foo attribute in ``date``).
-
-What if the import would contain another ``ExprStmt`` like this::
-
- from foo import bar
- Date = bar.baz
-
+ inside the datetime module.
+
+Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
+calls to ``find_types``. However the second call would be ignored, because the
+first one would return nothing (there's no foo attribute in ``date``).
+
+What if the import would contain another ``ExprStmt`` like this::
+
+ from foo import bar
+ Date = bar.baz
+
Well... You get it. Just another ``eval_expr_stmt`` recursion. It's really
-easy. Python can obviously get way more complicated then this. To understand
-tuple assignments, list comprehensions and everything else, a lot more code had
-to be written.
-
-Jedi has been tested very well, so you can just start modifying code. It's best
-to write your own test first for your "new" feature. Don't be scared of
-breaking stuff. As long as the tests pass, you're most likely to be fine.
-
-I need to mention now that lazy evaluation is really good because it
-only *evaluates* what needs to be *evaluated*. All the statements and modules
-that are not used are just being ignored.
-"""
-
+easy. Python can obviously get way more complicated then this. To understand
+tuple assignments, list comprehensions and everything else, a lot more code had
+to be written.
+
+Jedi has been tested very well, so you can just start modifying code. It's best
+to write your own test first for your "new" feature. Don't be scared of
+breaking stuff. As long as the tests pass, you're most likely to be fine.
+
+I need to mention now that lazy evaluation is really good because it
+only *evaluates* what needs to be *evaluated*. All the statements and modules
+that are not used are just being ignored.
+"""
+
from parso.python import tree
import parso
from parso import python_bytes_to_unicode
-
-from jedi import debug
+
+from jedi import debug
from jedi import parser_utils
from jedi.evaluate.utils import unite
-from jedi.evaluate import imports
-from jedi.evaluate import recursion
+from jedi.evaluate import imports
+from jedi.evaluate import recursion
from jedi.evaluate.cache import evaluator_function_cache
-from jedi.evaluate import compiled
-from jedi.evaluate import helpers
+from jedi.evaluate import compiled
+from jedi.evaluate import helpers
from jedi.evaluate.filters import TreeNameDefinition, ParamName
from jedi.evaluate.base_context import ContextualizedName, ContextualizedNode, \
ContextSet, NO_CONTEXTS, iterate_contexts
@@ -83,9 +83,9 @@ from jedi.evaluate.context import ClassContext, FunctionContext, \
from jedi.evaluate.context.iterable import CompForContext
from jedi.evaluate.syntax_tree import eval_trailer, eval_expr_stmt, \
eval_node, check_tuple_assignments
-
-
-class Evaluator(object):
+
+
+class Evaluator(object):
def __init__(self, project, environment=None, script_path=None):
if environment is None:
environment = project.get_environment()
@@ -95,12 +95,12 @@ class Evaluator(object):
self.grammar = environment.get_grammar()
self.latest_grammar = parso.load_grammar(version='3.6')
- self.memoize_cache = {} # for memoize decorators
+ self.memoize_cache = {} # for memoize decorators
self.module_cache = imports.ModuleCache() # does the job of `sys.modules`.
self.compiled_cache = {} # see `evaluate.compiled.create()`
self.inferred_element_counts = {}
self.mixed_cache = {} # see `evaluate.compiled.mixed._create()`
- self.analysis = []
+ self.analysis = []
self.dynamic_params_depth = 0
self.is_analysis = False
self.project = project
@@ -108,30 +108,30 @@ class Evaluator(object):
# This setting is only temporary to limit the work we have to do with
# tensorflow and others.
self.infer_enabled = True
-
+
self.reset_recursion_limitations()
self.allow_different_encoding = True
-
+
@property
@evaluator_function_cache()
def builtins_module(self):
return compiled.get_special_object(self, u'BUILTINS')
-
+
def reset_recursion_limitations(self):
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
-
+
def get_sys_path(self):
"""Convenience function"""
return self.project._get_sys_path(self, environment=self.environment)
-
+
def eval_element(self, context, element):
if not self.infer_enabled:
return NO_CONTEXTS
-
+
if isinstance(context, CompForContext):
return eval_node(context, element)
-
+
if_stmt = element
while if_stmt is not None:
if_stmt = if_stmt.parent
@@ -172,7 +172,7 @@ class Evaluator(object):
# long.
name_dicts = [{}]
break
-
+
original_name_dicts = list(name_dicts)
name_dicts = []
for definition in definitions:
@@ -180,7 +180,7 @@ class Evaluator(object):
for i, name_dict in enumerate(new_name_dicts):
new_name_dicts[i] = name_dict.copy()
new_name_dicts[i][if_name.value] = ContextSet(definition)
-
+
name_dicts += new_name_dicts
else:
for name_dict in name_dicts:
@@ -191,18 +191,18 @@ class Evaluator(object):
with helpers.predefine_names(context, if_stmt, name_dict):
result |= eval_node(context, element)
return result
- else:
+ else:
return self._eval_element_if_evaluated(context, element)
- else:
+ else:
if predefined_if_name_dict:
return eval_node(context, element)
- else:
+ else:
return self._eval_element_if_evaluated(context, element)
-
+
def _eval_element_if_evaluated(self, context, element):
- """
+ """
TODO This function is temporary: Merge with eval_element.
- """
+ """
parent = element
while parent is not None:
parent = parent.parent
@@ -210,11 +210,11 @@ class Evaluator(object):
if predefined_if_name_dict is not None:
return eval_node(context, element)
return self._eval_element_cached(context, element)
-
+
@evaluator_function_cache(default=NO_CONTEXTS)
def _eval_element_cached(self, context, element):
return eval_node(context, element)
-
+
def goto_definitions(self, context, name):
def_ = name.get_definition(import_name_always=True)
if def_ is not None:
@@ -223,7 +223,7 @@ class Evaluator(object):
return [ClassContext(self, context, name.parent)]
elif type_ == 'funcdef':
return [FunctionContext.from_context(context, name.parent)]
-
+
if type_ == 'expr_stmt':
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
@@ -236,9 +236,9 @@ class Evaluator(object):
return check_tuple_assignments(self, c_node, for_types)
if type_ in ('import_from', 'import_name'):
return imports.infer_import(context, name)
-
+
return helpers.evaluate_call_of_leaf(context, name)
-
+
def goto(self, context, name):
definition = name.get_definition(import_name_always=True)
if definition is not None:
@@ -256,59 +256,59 @@ class Evaluator(object):
elif type_ in ('import_from', 'import_name'):
module_names = imports.infer_import(context, name, is_goto=True)
return module_names
-
- par = name.parent
+
+ par = name.parent
node_type = par.type
if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name:
- # Named param goto.
- trailer = par.parent
- if trailer.type == 'arglist':
- trailer = trailer.parent
- if trailer.type != 'classdef':
- if trailer.type == 'decorator':
+ # Named param goto.
+ trailer = par.parent
+ if trailer.type == 'arglist':
+ trailer = trailer.parent
+ if trailer.type != 'classdef':
+ if trailer.type == 'decorator':
context_set = context.eval_node(trailer.children[1])
- else:
- i = trailer.parent.children.index(trailer)
- to_evaluate = trailer.parent.children[:i]
+ else:
+ i = trailer.parent.children.index(trailer)
+ to_evaluate = trailer.parent.children[:i]
if to_evaluate[0] == 'await':
to_evaluate.pop(0)
context_set = context.eval_node(to_evaluate[0])
- for trailer in to_evaluate[1:]:
+ for trailer in to_evaluate[1:]:
context_set = eval_trailer(context, context_set, trailer)
- param_names = []
+ param_names = []
for context in context_set:
- try:
+ try:
get_param_names = context.get_param_names
- except AttributeError:
- pass
- else:
+ except AttributeError:
+ pass
+ else:
for param_name in get_param_names():
if param_name.string_name == name.value:
param_names.append(param_name)
- return param_names
+ return param_names
elif node_type == 'dotted_name': # Is a decorator.
- index = par.children.index(name)
- if index > 0:
- new_dotted = helpers.deep_ast_copy(par)
- new_dotted.children[index - 1:] = []
+ index = par.children.index(name)
+ if index > 0:
+ new_dotted = helpers.deep_ast_copy(par)
+ new_dotted.children[index - 1:] = []
values = context.eval_node(new_dotted)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
-
+
if node_type == 'trailer' and par.children[0] == '.':
values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
- else:
+ else:
stmt = tree.search_ancestor(
name, 'expr_stmt', 'lambdef'
) or name
if stmt.type == 'lambdef':
- stmt = name
+ stmt = name
return context.py__getattribute__(
name,
position=stmt.start_pos,
diff --git a/contrib/python/jedi/jedi/evaluate/analysis.py b/contrib/python/jedi/jedi/evaluate/analysis.py
index f942a268f2..ded4e9f208 100644
--- a/contrib/python/jedi/jedi/evaluate/analysis.py
+++ b/contrib/python/jedi/jedi/evaluate/analysis.py
@@ -1,18 +1,18 @@
-"""
-Module for statical analysis.
-"""
+"""
+Module for statical analysis.
+"""
from parso.python import tree
from jedi._compatibility import force_unicode
-from jedi import debug
-from jedi.evaluate.compiled import CompiledObject
+from jedi import debug
+from jedi.evaluate.compiled import CompiledObject
from jedi.evaluate.helpers import is_string
-
-
-CODES = {
- 'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
- 'name-error': (2, NameError, 'Potential NameError.'),
- 'import-error': (3, ImportError, 'Potential ImportError.'),
+
+
+CODES = {
+ 'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
+ 'name-error': (2, NameError, 'Potential NameError.'),
+ 'import-error': (3, ImportError, 'Potential ImportError.'),
'type-error-too-many-arguments': (4, TypeError, None),
'type-error-too-few-arguments': (5, TypeError, None),
'type-error-keyword-argument': (6, TypeError, None),
@@ -25,96 +25,96 @@ CODES = {
'type-error-not-subscriptable': (13, TypeError, None),
'value-error-too-many-values': (14, ValueError, None),
'value-error-too-few-values': (15, ValueError, None),
-}
-
-
-class Error(object):
- def __init__(self, name, module_path, start_pos, message=None):
- self.path = module_path
- self._start_pos = start_pos
- self.name = name
- if message is None:
- message = CODES[self.name][2]
- self.message = message
-
- @property
- def line(self):
- return self._start_pos[0]
-
- @property
- def column(self):
- return self._start_pos[1]
-
- @property
- def code(self):
- # The class name start
- first = self.__class__.__name__[0]
- return first + str(CODES[self.name][0])
-
- def __unicode__(self):
- return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
- self.code, self.message)
-
- def __str__(self):
- return self.__unicode__()
-
- def __eq__(self, other):
+}
+
+
+class Error(object):
+ def __init__(self, name, module_path, start_pos, message=None):
+ self.path = module_path
+ self._start_pos = start_pos
+ self.name = name
+ if message is None:
+ message = CODES[self.name][2]
+ self.message = message
+
+ @property
+ def line(self):
+ return self._start_pos[0]
+
+ @property
+ def column(self):
+ return self._start_pos[1]
+
+ @property
+ def code(self):
+ # The class name start
+ first = self.__class__.__name__[0]
+ return first + str(CODES[self.name][0])
+
+ def __unicode__(self):
+ return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
+ self.code, self.message)
+
+ def __str__(self):
+ return self.__unicode__()
+
+ def __eq__(self, other):
return (self.path == other.path and self.name == other.name and
self._start_pos == other._start_pos)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __hash__(self):
- return hash((self.path, self._start_pos, self.name))
-
- def __repr__(self):
- return '<%s %s: %s@%s,%s>' % (self.__class__.__name__,
- self.name, self.path,
- self._start_pos[0], self._start_pos[1])
-
-
-class Warning(Error):
- pass
-
-
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return hash((self.path, self._start_pos, self.name))
+
+ def __repr__(self):
+ return '<%s %s: %s@%s,%s>' % (self.__class__.__name__,
+ self.name, self.path,
+ self._start_pos[0], self._start_pos[1])
+
+
+class Warning(Error):
+ pass
+
+
def add(node_context, error_name, node, message=None, typ=Error, payload=None):
exception = CODES[error_name][1]
if _check_for_exception_catch(node_context, node, exception, payload):
- return
-
+ return
+
# TODO this path is probably not right
module_context = node_context.get_root_context()
module_path = module_context.py__file__()
instance = typ(error_name, module_path, node.start_pos, message)
debug.warning(str(instance), format=False)
node_context.evaluator.analysis.append(instance)
-
-
-def _check_for_setattr(instance):
- """
- Check if there's any setattr method inside an instance. If so, return True.
- """
+
+
+def _check_for_setattr(instance):
+ """
+ Check if there's any setattr method inside an instance. If so, return True.
+ """
from jedi.evaluate.context import ModuleContext
module = instance.get_root_context()
if not isinstance(module, ModuleContext):
return False
node = module.tree_node
- try:
+ try:
stmts = node.get_used_names()['setattr']
- except KeyError:
- return False
-
+ except KeyError:
+ return False
+
return any(node.start_pos < stmt.start_pos < node.end_pos
- for stmt in stmts)
-
-
+ for stmt in stmts)
+
+
def add_attribute_error(name_context, lookup_context, name):
message = ('AttributeError: %s has no attribute %s.' % (lookup_context, name))
from jedi.evaluate.context.instance import AbstractInstanceContext, CompiledInstanceName
- # Check for __getattr__/__getattribute__ existance and issue a warning
- # instead of an error, if that happens.
+ # Check for __getattr__/__getattribute__ existance and issue a warning
+ # instead of an error, if that happens.
typ = Error
if isinstance(lookup_context, AbstractInstanceContext):
slot_names = lookup_context.get_function_slot_names(u'__getattr__') + \
@@ -125,94 +125,94 @@ def add_attribute_error(name_context, lookup_context, name):
n.parent_context.obj == object:
typ = Warning
break
-
+
if _check_for_setattr(lookup_context):
typ = Warning
-
+
payload = lookup_context, name
add(name_context, 'attribute-error', name, message, typ, payload)
-
+
def _check_for_exception_catch(node_context, jedi_name, exception, payload=None):
- """
- Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
- doesn't count as an error (if equal to `exception`).
- Also checks `hasattr` for AttributeErrors and uses the `payload` to compare
- it.
- Returns True if the exception was catched.
- """
- def check_match(cls, exception):
- try:
+ """
+ Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
+ doesn't count as an error (if equal to `exception`).
+ Also checks `hasattr` for AttributeErrors and uses the `payload` to compare
+ it.
+ Returns True if the exception was catched.
+ """
+ def check_match(cls, exception):
+ try:
return isinstance(cls, CompiledObject) and cls.is_super_class(exception)
- except TypeError:
- return False
-
- def check_try_for_except(obj, exception):
- # Only nodes in try
- iterator = iter(obj.children)
- for branch_type in iterator:
- colon = next(iterator)
- suite = next(iterator)
- if branch_type == 'try' \
+ except TypeError:
+ return False
+
+ def check_try_for_except(obj, exception):
+ # Only nodes in try
+ iterator = iter(obj.children)
+ for branch_type in iterator:
+ colon = next(iterator)
+ suite = next(iterator)
+ if branch_type == 'try' \
and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos):
- return False
-
+ return False
+
for node in obj.get_except_clause_tests():
- if node is None:
- return True # An exception block that catches everything.
- else:
+ if node is None:
+ return True # An exception block that catches everything.
+ else:
except_classes = node_context.eval_node(node)
- for cls in except_classes:
+ for cls in except_classes:
from jedi.evaluate.context import iterable
if isinstance(cls, iterable.Sequence) and \
cls.array_type == 'tuple':
- # multiple exceptions
+ # multiple exceptions
for lazy_context in cls.py__iter__():
for typ in lazy_context.infer():
if check_match(typ, exception):
return True
- else:
- if check_match(cls, exception):
- return True
-
- def check_hasattr(node, suite):
- try:
+ else:
+ if check_match(cls, exception):
+ return True
+
+ def check_hasattr(node, suite):
+ try:
assert suite.start_pos <= jedi_name.start_pos < suite.end_pos
assert node.type in ('power', 'atom_expr')
- base = node.children[0]
- assert base.type == 'name' and base.value == 'hasattr'
- trailer = node.children[1]
- assert trailer.type == 'trailer'
- arglist = trailer.children[1]
- assert arglist.type == 'arglist'
+ base = node.children[0]
+ assert base.type == 'name' and base.value == 'hasattr'
+ trailer = node.children[1]
+ assert trailer.type == 'trailer'
+ arglist = trailer.children[1]
+ assert arglist.type == 'arglist'
from jedi.evaluate.arguments import TreeArguments
args = list(TreeArguments(node_context.evaluator, node_context, arglist).unpack())
- # Arguments should be very simple
- assert len(args) == 2
-
- # Check name
+ # Arguments should be very simple
+ assert len(args) == 2
+
+ # Check name
key, lazy_context = args[1]
names = list(lazy_context.infer())
assert len(names) == 1 and is_string(names[0])
assert force_unicode(names[0].get_safe_value()) == payload[1].value
-
- # Check objects
+
+ # Check objects
key, lazy_context = args[0]
objects = lazy_context.infer()
- return payload[0] in objects
- except AssertionError:
- return False
-
+ return payload[0] in objects
+ except AssertionError:
+ return False
+
obj = jedi_name
while obj is not None and not isinstance(obj, (tree.Function, tree.Class)):
if isinstance(obj, tree.Flow):
- # try/except catch check
+ # try/except catch check
if obj.type == 'try_stmt' and check_try_for_except(obj, exception):
- return True
- # hasattr check
+ return True
+ # hasattr check
if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'):
- if check_hasattr(obj.children[1], obj.children[3]):
- return True
- obj = obj.parent
-
- return False
+ if check_hasattr(obj.children[1], obj.children[3]):
+ return True
+ obj = obj.parent
+
+ return False
diff --git a/contrib/python/jedi/jedi/evaluate/cache.py b/contrib/python/jedi/jedi/evaluate/cache.py
index d61a053b42..c619e698a3 100644
--- a/contrib/python/jedi/jedi/evaluate/cache.py
+++ b/contrib/python/jedi/jedi/evaluate/cache.py
@@ -1,50 +1,50 @@
-"""
+"""
- the popular ``_memoize_default`` works like a typical memoize and returns the
- default otherwise.
+ default otherwise.
- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes.
-"""
-
+"""
+
_NO_DEFAULT = object()
-
-
+
+
def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False):
- """ This is a typical memoization decorator, BUT there is one difference:
- To prevent recursion it sets defaults.
-
- Preventing recursion is in this case the much bigger use than speed. I
- don't think, that there is a big speed difference, but there are many cases
- where recursion could happen (think about a = b; b = a).
- """
- def func(function):
- def wrapper(obj, *args, **kwargs):
+ """ This is a typical memoization decorator, BUT there is one difference:
+ To prevent recursion it sets defaults.
+
+ Preventing recursion is in this case the much bigger use than speed. I
+ don't think, that there is a big speed difference, but there are many cases
+ where recursion could happen (think about a = b; b = a).
+ """
+ def func(function):
+ def wrapper(obj, *args, **kwargs):
# TODO These checks are kind of ugly and slow.
- if evaluator_is_first_arg:
- cache = obj.memoize_cache
+ if evaluator_is_first_arg:
+ cache = obj.memoize_cache
elif second_arg_is_evaluator:
cache = args[0].memoize_cache # needed for meta classes
- else:
+ else:
cache = obj.evaluator.memoize_cache
-
- try:
- memo = cache[function]
- except KeyError:
- memo = {}
- cache[function] = memo
-
- key = (obj, args, frozenset(kwargs.items()))
- if key in memo:
- return memo[key]
- else:
+
+ try:
+ memo = cache[function]
+ except KeyError:
+ memo = {}
+ cache[function] = memo
+
+ key = (obj, args, frozenset(kwargs.items()))
+ if key in memo:
+ return memo[key]
+ else:
if default is not _NO_DEFAULT:
- memo[key] = default
- rv = function(obj, *args, **kwargs)
- memo[key] = rv
- return rv
- return wrapper
-
- return func
-
-
+ memo[key] = default
+ rv = function(obj, *args, **kwargs)
+ memo[key] = rv
+ return rv
+ return wrapper
+
+ return func
+
+
def evaluator_function_cache(default=_NO_DEFAULT):
def decorator(func):
return _memoize_default(default=default, evaluator_is_first_arg=True)(func)
@@ -66,12 +66,12 @@ def evaluator_as_method_param_cache():
return decorator
-class CachedMetaClass(type):
- """
- This is basically almost the same than the decorator above, it just caches
- class initializations. Either you do it this way or with decorators, but
- with decorators you lose class access (isinstance, etc).
- """
+class CachedMetaClass(type):
+ """
+ This is basically almost the same than the decorator above, it just caches
+ class initializations. Either you do it this way or with decorators, but
+ with decorators you lose class access (isinstance, etc).
+ """
@evaluator_as_method_param_cache()
- def __call__(self, *args, **kwargs):
- return super(CachedMetaClass, self).__call__(*args, **kwargs)
+ def __call__(self, *args, **kwargs):
+ return super(CachedMetaClass, self).__call__(*args, **kwargs)
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/__init__.py b/contrib/python/jedi/jedi/evaluate/compiled/__init__.py
index 386eef81ea..757aec5234 100644
--- a/contrib/python/jedi/jedi/evaluate/compiled/__init__.py
+++ b/contrib/python/jedi/jedi/evaluate/compiled/__init__.py
@@ -2,41 +2,41 @@ from jedi._compatibility import unicode
from jedi.evaluate.compiled.context import CompiledObject, CompiledName, \
CompiledObjectFilter, CompiledContextName, create_from_access_path, \
create_from_name
-
-
+
+
def builtin_from_name(evaluator, string):
builtins = evaluator.builtins_module
return create_from_name(evaluator, builtins, string)
-
-
+
+
def create_simple_object(evaluator, obj):
- """
+ """
Only allows creations of objects that are easily picklable across Python
versions.
- """
+ """
assert isinstance(obj, (int, float, str, bytes, unicode, slice, complex))
return create_from_access_path(
evaluator,
evaluator.compiled_subprocess.create_simple_object(obj)
)
-
-
+
+
def get_special_object(evaluator, identifier):
return create_from_access_path(
evaluator,
evaluator.compiled_subprocess.get_special_object(identifier)
)
-
-
+
+
def get_string_context_set(evaluator):
return builtin_from_name(evaluator, u'str').execute_evaluated()
-
-
+
+
def load_module(evaluator, dotted_name, **kwargs):
# Temporary, some tensorflow builtins cannot be loaded, so it's tried again
# and again and it's really slow.
if dotted_name.startswith('tensorflow.'):
- return None
+ return None
access_path = evaluator.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)
if access_path is None:
return None
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake.py b/contrib/python/jedi/jedi/evaluate/compiled/fake.py
index 98f126d144..a38ff34938 100644
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake.py
+++ b/contrib/python/jedi/jedi/evaluate/compiled/fake.py
@@ -1,19 +1,19 @@
-"""
-Loads functions that are mixed in to the standard library. E.g. builtins are
-written in C (binaries), but my autocompletion only understands Python code. By
-mixing in Python code, the autocompletion should work much better for builtins.
-"""
+"""
+Loads functions that are mixed in to the standard library. E.g. builtins are
+written in C (binaries), but my autocompletion only understands Python code. By
+mixing in Python code, the autocompletion should work much better for builtins.
+"""
import sys
-import os
+import os
from itertools import chain
-
+
import __res
-
+
from jedi._compatibility import unicode
-
+
fake_modules = {}
-
-
+
+
def _get_path_dict():
path = os.path.dirname(__file__)
base_path = os.path.join(path, 'fake')
@@ -24,37 +24,37 @@ def _get_path_dict():
if file_name.startswith(base_path) and file_name.endswith('.pym'):
dct[file_name[len(base_path) + 1:-4]] = file_name
return dct
-
-
+
+
_path_dict = _get_path_dict()
-
-
+
+
class FakeDoesNotExist(Exception):
pass
-
+
def _load_faked_module(evaluator, module_name):
- try:
+ try:
return fake_modules[module_name]
except KeyError:
- pass
-
+ pass
+
check_module_name = module_name
if module_name == '__builtin__' and evaluator.environment.version_info.major == 2:
check_module_name = 'builtins'
- try:
+ try:
path = _path_dict[check_module_name]
except KeyError:
fake_modules[module_name] = None
return
-
+
if sys.version_info[0] == 3:
path = bytes(path, 'ascii')
source = __res.resfs_read(path)
-
+
fake_modules[module_name] = m = evaluator.latest_grammar.parse(unicode(source))
-
+
if check_module_name != module_name:
# There are two implementations of `open` for either python 2/3.
# -> Rename the python2 version (`look at fake/builtins.pym`).
@@ -63,14 +63,14 @@ def _load_faked_module(evaluator, module_name):
open_func = _search_scope(m, 'open_python2')
open_func.children[1].value = 'open'
return m
-
-
+
+
def _search_scope(scope, obj_name):
for s in chain(scope.iter_classdefs(), scope.iter_funcdefs()):
if s.name.value == obj_name:
return s
-
-
+
+
def get_faked_with_parent_context(parent_context, name):
if parent_context.tree_node is not None:
# Try to search in already clearly defined stuff.
@@ -78,7 +78,7 @@ def get_faked_with_parent_context(parent_context, name):
if found is not None:
return found
raise FakeDoesNotExist
-
+
def get_faked_module(evaluator, string_name):
module = _load_faked_module(evaluator, string_name)
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym
index e56a1f4fa4..909ef03fc3 100644
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym
+++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/_functools.pym
@@ -1,9 +1,9 @@
-class partial():
- def __init__(self, func, *args, **keywords):
- self.__func = func
- self.__args = args
- self.__keywords = keywords
-
- def __call__(self, *args, **kwargs):
- # TODO should be **dict(self.__keywords, **kwargs)
- return self.__func(*(self.__args + args), **self.__keywords)
+class partial():
+ def __init__(self, func, *args, **keywords):
+ self.__func = func
+ self.__args = args
+ self.__keywords = keywords
+
+ def __call__(self, *args, **kwargs):
+ # TODO should be **dict(self.__keywords, **kwargs)
+ return self.__func(*(self.__args + args), **self.__keywords)
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym
index 0df417a0bf..2151e652b4 100644
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym
+++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/_sqlite3.pym
@@ -1,26 +1,26 @@
-def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None):
- return Connection()
-
-
-class Connection():
- def cursor(self):
- return Cursor()
-
-
-class Cursor():
- def cursor(self):
- return Cursor()
-
- def fetchone(self):
- return Row()
-
- def fetchmany(self, size=cursor.arraysize):
- return [self.fetchone()]
-
- def fetchall(self):
- return [self.fetchone()]
-
-
-class Row():
- def keys(self):
- return ['']
+def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None):
+ return Connection()
+
+
+class Connection():
+ def cursor(self):
+ return Cursor()
+
+
+class Cursor():
+ def cursor(self):
+ return Cursor()
+
+ def fetchone(self):
+ return Row()
+
+ def fetchmany(self, size=cursor.arraysize):
+ return [self.fetchone()]
+
+ def fetchall(self):
+ return [self.fetchone()]
+
+
+class Row():
+ def keys(self):
+ return ['']
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym
index 4d271c0cb5..217be56339 100644
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym
+++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/_sre.pym
@@ -1,99 +1,99 @@
-def compile():
- class SRE_Match():
- endpos = int()
- lastgroup = int()
- lastindex = int()
- pos = int()
- string = str()
- regs = ((int(), int()),)
-
- def __init__(self, pattern):
- self.re = pattern
-
- def start(self):
- return int()
-
- def end(self):
- return int()
-
- def span(self):
- return int(), int()
-
- def expand(self):
- return str()
-
- def group(self, nr):
- return str()
-
- def groupdict(self):
- return {str(): str()}
-
- def groups(self):
- return (str(),)
-
- class SRE_Pattern():
- flags = int()
- groupindex = {}
- groups = int()
- pattern = str()
-
- def findall(self, string, pos=None, endpos=None):
- """
- findall(string[, pos[, endpos]]) --> list.
- Return a list of all non-overlapping matches of pattern in string.
- """
- return [str()]
-
- def finditer(self, string, pos=None, endpos=None):
- """
- finditer(string[, pos[, endpos]]) --> iterator.
- Return an iterator over all non-overlapping matches for the
- RE pattern in string. For each match, the iterator returns a
- match object.
- """
- yield SRE_Match(self)
-
- def match(self, string, pos=None, endpos=None):
- """
- match(string[, pos[, endpos]]) --> match object or None.
- Matches zero or more characters at the beginning of the string
- pattern
- """
- return SRE_Match(self)
-
- def scanner(self, string, pos=None, endpos=None):
- pass
-
- def search(self, string, pos=None, endpos=None):
- """
- search(string[, pos[, endpos]]) --> match object or None.
- Scan through string looking for a match, and return a corresponding
- MatchObject instance. Return None if no position in the string matches.
- """
- return SRE_Match(self)
-
- def split(self, string, maxsplit=0]):
- """
- split(string[, maxsplit = 0]) --> list.
- Split string by the occurrences of pattern.
- """
- return [str()]
-
- def sub(self, repl, string, count=0):
- """
- sub(repl, string[, count = 0]) --> newstring
- Return the string obtained by replacing the leftmost non-overlapping
- occurrences of pattern in string by the replacement repl.
- """
- return str()
-
- def subn(self, repl, string, count=0):
- """
- subn(repl, string[, count = 0]) --> (newstring, number of subs)
- Return the tuple (new_string, number_of_subs_made) found by replacing
- the leftmost non-overlapping occurrences of pattern with the
- replacement repl.
- """
- return (str(), int())
-
- return SRE_Pattern()
+def compile():
+ class SRE_Match():
+ endpos = int()
+ lastgroup = int()
+ lastindex = int()
+ pos = int()
+ string = str()
+ regs = ((int(), int()),)
+
+ def __init__(self, pattern):
+ self.re = pattern
+
+ def start(self):
+ return int()
+
+ def end(self):
+ return int()
+
+ def span(self):
+ return int(), int()
+
+ def expand(self):
+ return str()
+
+ def group(self, nr):
+ return str()
+
+ def groupdict(self):
+ return {str(): str()}
+
+ def groups(self):
+ return (str(),)
+
+ class SRE_Pattern():
+ flags = int()
+ groupindex = {}
+ groups = int()
+ pattern = str()
+
+ def findall(self, string, pos=None, endpos=None):
+ """
+ findall(string[, pos[, endpos]]) --> list.
+ Return a list of all non-overlapping matches of pattern in string.
+ """
+ return [str()]
+
+ def finditer(self, string, pos=None, endpos=None):
+ """
+ finditer(string[, pos[, endpos]]) --> iterator.
+ Return an iterator over all non-overlapping matches for the
+ RE pattern in string. For each match, the iterator returns a
+ match object.
+ """
+ yield SRE_Match(self)
+
+ def match(self, string, pos=None, endpos=None):
+ """
+ match(string[, pos[, endpos]]) --> match object or None.
+ Matches zero or more characters at the beginning of the string
+ pattern
+ """
+ return SRE_Match(self)
+
+ def scanner(self, string, pos=None, endpos=None):
+ pass
+
+ def search(self, string, pos=None, endpos=None):
+ """
+ search(string[, pos[, endpos]]) --> match object or None.
+ Scan through string looking for a match, and return a corresponding
+ MatchObject instance. Return None if no position in the string matches.
+ """
+ return SRE_Match(self)
+
+ def split(self, string, maxsplit=0]):
+ """
+ split(string[, maxsplit = 0]) --> list.
+ Split string by the occurrences of pattern.
+ """
+ return [str()]
+
+ def sub(self, repl, string, count=0):
+ """
+ sub(repl, string[, count = 0]) --> newstring
+ Return the string obtained by replacing the leftmost non-overlapping
+ occurrences of pattern in string by the replacement repl.
+ """
+ return str()
+
+ def subn(self, repl, string, count=0):
+ """
+ subn(repl, string[, count = 0]) --> (newstring, number of subs)
+ Return the tuple (new_string, number_of_subs_made) found by replacing
+ the leftmost non-overlapping occurrences of pattern with the
+ replacement repl.
+ """
+ return (str(), int())
+
+ return SRE_Pattern()
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym
index 26148b7df4..298d0b0dba 100644
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym
+++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/_weakref.pym
@@ -1,9 +1,9 @@
-def proxy(object, callback=None):
- return object
-
+def proxy(object, callback=None):
+ return object
+
class ref():
- def __init__(self, object, callback=None):
- self.__object = object
+ def __init__(self, object, callback=None):
+ self.__object = object
- def __call__(self):
- return self.__object
+ def __call__(self):
+ return self.__object
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym
index 4f737d4e8a..46ec619fb4 100644
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym
+++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/builtins.pym
@@ -1,225 +1,225 @@
-"""
-Pure Python implementation of some builtins.
-This code is not going to be executed anywhere.
-These implementations are not always correct, but should work as good as
-possible for the auto completion.
-"""
-
-
-def next(iterator, default=None):
- if random.choice([0, 1]):
- if hasattr("next"):
- return iterator.next()
- else:
- return iterator.__next__()
- else:
- if default is not None:
- return default
-
-
-def iter(collection, sentinel=None):
- if sentinel:
- yield collection()
- else:
- for c in collection:
- yield c
-
-
-def range(start, stop=None, step=1):
- return [0]
-
-
-class file():
- def __iter__(self):
- yield ''
-
- def next(self):
- return ''
-
+"""
+Pure Python implementation of some builtins.
+This code is not going to be executed anywhere.
+These implementations are not always correct, but should work as good as
+possible for the auto completion.
+"""
+
+
+def next(iterator, default=None):
+ if random.choice([0, 1]):
+ if hasattr("next"):
+ return iterator.next()
+ else:
+ return iterator.__next__()
+ else:
+ if default is not None:
+ return default
+
+
+def iter(collection, sentinel=None):
+ if sentinel:
+ yield collection()
+ else:
+ for c in collection:
+ yield c
+
+
+def range(start, stop=None, step=1):
+ return [0]
+
+
+class file():
+ def __iter__(self):
+ yield ''
+
+ def next(self):
+ return ''
+
def readlines(self):
return ['']
-
+
def __enter__(self):
return self
-class xrange():
- # Attention: this function doesn't exist in Py3k (there it is range).
- def __iter__(self):
- yield 1
-
- def count(self):
- return 1
-
- def index(self):
- return 1
-
-
-def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True):
- import io
- return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd)
-
-
-def open_python2(name, mode=None, buffering=None):
- return file(name, mode, buffering)
-
-
-#--------------------------------------------------------
-# descriptors
-#--------------------------------------------------------
-class property():
- def __init__(self, fget, fset=None, fdel=None, doc=None):
- self.fget = fget
- self.fset = fset
- self.fdel = fdel
- self.__doc__ = doc
-
- def __get__(self, obj, cls):
- return self.fget(obj)
-
- def __set__(self, obj, value):
- self.fset(obj, value)
-
- def __delete__(self, obj):
- self.fdel(obj)
-
- def setter(self, func):
- self.fset = func
- return self
-
- def getter(self, func):
- self.fget = func
- return self
-
- def deleter(self, func):
- self.fdel = func
- return self
-
-
-class staticmethod():
- def __init__(self, func):
- self.__func = func
-
- def __get__(self, obj, cls):
- return self.__func
-
-
-class classmethod():
- def __init__(self, func):
- self.__func = func
-
- def __get__(self, obj, cls):
- def _method(*args, **kwargs):
- return self.__func(cls, *args, **kwargs)
- return _method
-
-
-#--------------------------------------------------------
-# array stuff
-#--------------------------------------------------------
-class list():
- def __init__(self, iterable=[]):
- self.__iterable = []
- for i in iterable:
- self.__iterable += [i]
-
- def __iter__(self):
- for i in self.__iterable:
- yield i
-
- def __getitem__(self, y):
- return self.__iterable[y]
-
- def pop(self):
+class xrange():
+ # Attention: this function doesn't exist in Py3k (there it is range).
+ def __iter__(self):
+ yield 1
+
+ def count(self):
+ return 1
+
+ def index(self):
+ return 1
+
+
+def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True):
+ import io
+ return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd)
+
+
+def open_python2(name, mode=None, buffering=None):
+ return file(name, mode, buffering)
+
+
+#--------------------------------------------------------
+# descriptors
+#--------------------------------------------------------
+class property():
+ def __init__(self, fget, fset=None, fdel=None, doc=None):
+ self.fget = fget
+ self.fset = fset
+ self.fdel = fdel
+ self.__doc__ = doc
+
+ def __get__(self, obj, cls):
+ return self.fget(obj)
+
+ def __set__(self, obj, value):
+ self.fset(obj, value)
+
+ def __delete__(self, obj):
+ self.fdel(obj)
+
+ def setter(self, func):
+ self.fset = func
+ return self
+
+ def getter(self, func):
+ self.fget = func
+ return self
+
+ def deleter(self, func):
+ self.fdel = func
+ return self
+
+
+class staticmethod():
+ def __init__(self, func):
+ self.__func = func
+
+ def __get__(self, obj, cls):
+ return self.__func
+
+
+class classmethod():
+ def __init__(self, func):
+ self.__func = func
+
+ def __get__(self, obj, cls):
+ def _method(*args, **kwargs):
+ return self.__func(cls, *args, **kwargs)
+ return _method
+
+
+#--------------------------------------------------------
+# array stuff
+#--------------------------------------------------------
+class list():
+ def __init__(self, iterable=[]):
+ self.__iterable = []
+ for i in iterable:
+ self.__iterable += [i]
+
+ def __iter__(self):
+ for i in self.__iterable:
+ yield i
+
+ def __getitem__(self, y):
+ return self.__iterable[y]
+
+ def pop(self):
return self.__iterable[int()]
-
-
-class tuple():
- def __init__(self, iterable=[]):
- self.__iterable = []
- for i in iterable:
- self.__iterable += [i]
-
- def __iter__(self):
- for i in self.__iterable:
- yield i
-
- def __getitem__(self, y):
- return self.__iterable[y]
-
- def index(self):
- return 1
-
- def count(self):
- return 1
-
-
-class set():
- def __init__(self, iterable=[]):
- self.__iterable = iterable
-
- def __iter__(self):
- for i in self.__iterable:
- yield i
-
- def pop(self):
- return list(self.__iterable)[-1]
-
- def copy(self):
- return self
-
- def difference(self, other):
- return self - other
-
- def intersection(self, other):
- return self & other
-
- def symmetric_difference(self, other):
- return self ^ other
-
- def union(self, other):
- return self | other
-
-
-class frozenset():
- def __init__(self, iterable=[]):
- self.__iterable = iterable
-
- def __iter__(self):
- for i in self.__iterable:
- yield i
-
- def copy(self):
- return self
-
-
-class dict():
- def __init__(self, **elements):
- self.__elements = elements
-
- def clear(self):
- # has a strange docstr
- pass
-
+
+
+class tuple():
+ def __init__(self, iterable=[]):
+ self.__iterable = []
+ for i in iterable:
+ self.__iterable += [i]
+
+ def __iter__(self):
+ for i in self.__iterable:
+ yield i
+
+ def __getitem__(self, y):
+ return self.__iterable[y]
+
+ def index(self):
+ return 1
+
+ def count(self):
+ return 1
+
+
+class set():
+ def __init__(self, iterable=[]):
+ self.__iterable = iterable
+
+ def __iter__(self):
+ for i in self.__iterable:
+ yield i
+
+ def pop(self):
+ return list(self.__iterable)[-1]
+
+ def copy(self):
+ return self
+
+ def difference(self, other):
+ return self - other
+
+ def intersection(self, other):
+ return self & other
+
+ def symmetric_difference(self, other):
+ return self ^ other
+
+ def union(self, other):
+ return self | other
+
+
+class frozenset():
+ def __init__(self, iterable=[]):
+ self.__iterable = iterable
+
+ def __iter__(self):
+ for i in self.__iterable:
+ yield i
+
+ def copy(self):
+ return self
+
+
+class dict():
+ def __init__(self, **elements):
+ self.__elements = elements
+
+ def clear(self):
+ # has a strange docstr
+ pass
+
def __getitem__(self, obj):
return self.__elements[obj]
- def get(self, k, d=None):
- # TODO implement
- try:
+ def get(self, k, d=None):
+ # TODO implement
+ try:
return self.__elements[k]
- pass
- except KeyError:
- return d
-
+ pass
+ except KeyError:
+ return d
+
def values(self):
return self.__elements.values()
- def setdefault(self, k, d):
- # TODO maybe also return the content
- return d
-
-
+ def setdefault(self, k, d):
+ # TODO maybe also return the content
+ return d
+
+
class enumerate():
def __init__(self, sequence, start=0):
self.__sequence = sequence
@@ -235,43 +235,43 @@ class enumerate():
return next(self.__iter__())
-class reversed():
- def __init__(self, sequence):
- self.__sequence = sequence
-
- def __iter__(self):
- for i in self.__sequence:
- yield i
-
- def __next__(self):
- return next(self.__iter__())
-
- def next(self):
- return next(self.__iter__())
-
-
-def sorted(iterable, cmp=None, key=None, reverse=False):
- return iterable
-
-
-#--------------------------------------------------------
-# basic types
-#--------------------------------------------------------
-class int():
- def __init__(self, x, base=None):
- pass
-
-
-class str():
- def __init__(self, obj):
- pass
-
+class reversed():
+ def __init__(self, sequence):
+ self.__sequence = sequence
+
+ def __iter__(self):
+ for i in self.__sequence:
+ yield i
+
+ def __next__(self):
+ return next(self.__iter__())
+
+ def next(self):
+ return next(self.__iter__())
+
+
+def sorted(iterable, cmp=None, key=None, reverse=False):
+ return iterable
+
+
+#--------------------------------------------------------
+# basic types
+#--------------------------------------------------------
+class int():
+ def __init__(self, x, base=None):
+ pass
+
+
+class str():
+ def __init__(self, obj):
+ pass
+
def strip(self):
return str()
-
+
def split(self):
return [str()]
-class type():
- def mro():
- return [object]
+class type():
+ def mro():
+ return [object]
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym
index d981e60051..823ac5b7fd 100644
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym
+++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/datetime.pym
@@ -1,4 +1,4 @@
-class datetime():
- @staticmethod
- def now():
- return datetime()
+class datetime():
+ @staticmethod
+ def now():
+ return datetime()
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym
index e97310e742..c1f4fc0116 100644
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym
+++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/io.pym
@@ -1,9 +1,9 @@
-class TextIOWrapper():
- def __next__(self):
- return str()
-
- def __iter__(self):
- yield str()
+class TextIOWrapper():
+ def __next__(self):
+ return str()
+
+ def __iter__(self):
+ yield str()
def readlines(self):
return ['']
diff --git a/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym b/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym
index 2c8f6a56e3..4417f7cb04 100644
--- a/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym
+++ b/contrib/python/jedi/jedi/evaluate/compiled/fake/posix.pym
@@ -1,5 +1,5 @@
-def getcwd():
- return ''
-
-def getcwdu():
- return ''
+def getcwd():
+ return ''
+
+def getcwdu():
+ return ''
diff --git a/contrib/python/jedi/jedi/evaluate/docstrings.py b/contrib/python/jedi/jedi/evaluate/docstrings.py
index 1983a7635b..aaef8ea853 100644
--- a/contrib/python/jedi/jedi/evaluate/docstrings.py
+++ b/contrib/python/jedi/jedi/evaluate/docstrings.py
@@ -1,49 +1,49 @@
-"""
-Docstrings are another source of information for functions and classes.
-:mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while
+"""
+Docstrings are another source of information for functions and classes.
+:mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while
the docstring parsing is much easier. There are three different types of
-docstrings that |jedi| understands:
-
-- `Sphinx <http://sphinx-doc.org/markup/desc.html#info-field-lists>`_
-- `Epydoc <http://epydoc.sourceforge.net/manual-fields.html>`_
+docstrings that |jedi| understands:
+
+- `Sphinx <http://sphinx-doc.org/markup/desc.html#info-field-lists>`_
+- `Epydoc <http://epydoc.sourceforge.net/manual-fields.html>`_
- `Numpydoc <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_
-
-For example, the sphinx annotation ``:type foo: str`` clearly states that the
-type of ``foo`` is ``str``.
-
-As an addition to parameter searching, this module also provides return
-annotations.
-"""
-
-import re
-from textwrap import dedent
-
+
+For example, the sphinx annotation ``:type foo: str`` clearly states that the
+type of ``foo`` is ``str``.
+
+As an addition to parameter searching, this module also provides return
+annotations.
+"""
+
+import re
+from textwrap import dedent
+
from parso import parse, ParserSyntaxError
-
+
from jedi._compatibility import u
from jedi.evaluate.utils import indent_block
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.base_context import iterator_to_context_set, ContextSet, \
NO_CONTEXTS
from jedi.evaluate.lazy_context import LazyKnownContexts
-
-DOCSTRING_PARAM_PATTERNS = [
- r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
+
+DOCSTRING_PARAM_PATTERNS = [
+ r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
r'\s*:param\s+(\w+)\s+%s:[^\n]*', # Sphinx param with type
- r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc
-]
-
-DOCSTRING_RETURN_PATTERNS = [
- re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx
- re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc
-]
-
-REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
-
-
+ r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc
+]
+
+DOCSTRING_RETURN_PATTERNS = [
+ re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx
+ re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc
+]
+
+REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
+
+
_numpy_doc_string_cache = None
-
+
def _get_numpy_doc_string_cls():
global _numpy_doc_string_cache
@@ -65,7 +65,7 @@ def _search_param_in_numpydocstr(docstr, param_str):
# prepared and return gracefully.
params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters']
except (KeyError, AttributeError, ImportError):
- return []
+ return []
for p_name, p_type, p_descr in params:
if p_name == param_str:
m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type)
@@ -73,8 +73,8 @@ def _search_param_in_numpydocstr(docstr, param_str):
p_type = m.group(1)
return list(_expand_typestr(p_type))
return []
-
-
+
+
def _search_return_in_numpydocstr(docstr):
"""
Search `docstr` (in numpydoc format) for type(-s) of function returns.
@@ -131,59 +131,59 @@ def _expand_typestr(type_str):
yield type_str
-def _search_param_in_docstr(docstr, param_str):
- """
- Search `docstr` for type(-s) of `param_str`.
-
- >>> _search_param_in_docstr(':type param: int', 'param')
- ['int']
- >>> _search_param_in_docstr('@type param: int', 'param')
- ['int']
- >>> _search_param_in_docstr(
- ... ':type param: :class:`threading.Thread`', 'param')
- ['threading.Thread']
- >>> bool(_search_param_in_docstr('no document', 'param'))
- False
- >>> _search_param_in_docstr(':param int param: some description', 'param')
- ['int']
-
- """
- # look at #40 to see definitions of those params
- patterns = [re.compile(p % re.escape(param_str))
- for p in DOCSTRING_PARAM_PATTERNS]
- for pattern in patterns:
- match = pattern.search(docstr)
- if match:
- return [_strip_rst_role(match.group(1))]
-
+def _search_param_in_docstr(docstr, param_str):
+ """
+ Search `docstr` for type(-s) of `param_str`.
+
+ >>> _search_param_in_docstr(':type param: int', 'param')
+ ['int']
+ >>> _search_param_in_docstr('@type param: int', 'param')
+ ['int']
+ >>> _search_param_in_docstr(
+ ... ':type param: :class:`threading.Thread`', 'param')
+ ['threading.Thread']
+ >>> bool(_search_param_in_docstr('no document', 'param'))
+ False
+ >>> _search_param_in_docstr(':param int param: some description', 'param')
+ ['int']
+
+ """
+ # look at #40 to see definitions of those params
+ patterns = [re.compile(p % re.escape(param_str))
+ for p in DOCSTRING_PARAM_PATTERNS]
+ for pattern in patterns:
+ match = pattern.search(docstr)
+ if match:
+ return [_strip_rst_role(match.group(1))]
+
return _search_param_in_numpydocstr(docstr, param_str)
-
-
-def _strip_rst_role(type_str):
- """
- Strip off the part looks like a ReST role in `type_str`.
-
- >>> _strip_rst_role(':class:`ClassName`') # strip off :class:
- 'ClassName'
- >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain
- 'module.Object'
- >>> _strip_rst_role('ClassName') # do nothing when not ReST role
- 'ClassName'
-
- See also:
- http://sphinx-doc.org/domains.html#cross-referencing-python-objects
-
- """
- match = REST_ROLE_PATTERN.match(type_str)
- if match:
- return match.group(1)
- else:
- return type_str
-
-
+
+
+def _strip_rst_role(type_str):
+ """
+ Strip off the part looks like a ReST role in `type_str`.
+
+ >>> _strip_rst_role(':class:`ClassName`') # strip off :class:
+ 'ClassName'
+ >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain
+ 'module.Object'
+ >>> _strip_rst_role('ClassName') # do nothing when not ReST role
+ 'ClassName'
+
+ See also:
+ http://sphinx-doc.org/domains.html#cross-referencing-python-objects
+
+ """
+ match = REST_ROLE_PATTERN.match(type_str)
+ if match:
+ return match.group(1)
+ else:
+ return type_str
+
+
def _evaluate_for_statement_string(module_context, string):
code = dedent(u("""
- def pseudo_docstring_stuff():
+ def pseudo_docstring_stuff():
'''
Create a pseudo function for docstring statements.
Need this docstring so that if the below part is not valid Python this
@@ -191,30 +191,30 @@ def _evaluate_for_statement_string(module_context, string):
'''
{}
"""))
- if string is None:
- return []
-
+ if string is None:
+ return []
+
for element in re.findall(r'((?:\w+\.)*\w+)\.', string):
- # Try to import module part in dotted name.
- # (e.g., 'threading' in 'threading.Thread').
- string = 'import %s\n' % element + string
-
- # Take the default grammar here, if we load the Python 2.7 grammar here, it
- # will be impossible to use `...` (Ellipsis) as a token. Docstring types
- # don't need to conform with the current grammar.
+ # Try to import module part in dotted name.
+ # (e.g., 'threading' in 'threading.Thread').
+ string = 'import %s\n' % element + string
+
+ # Take the default grammar here, if we load the Python 2.7 grammar here, it
+ # will be impossible to use `...` (Ellipsis) as a token. Docstring types
+ # don't need to conform with the current grammar.
grammar = module_context.evaluator.latest_grammar
- try:
+ try:
module = grammar.parse(code.format(indent_block(string)), error_recovery=False)
except ParserSyntaxError:
return []
try:
funcdef = next(module.iter_funcdefs())
# First pick suite, then simple_stmt and then the node,
- # which is also not the last item, because there's a newline.
+ # which is also not the last item, because there's a newline.
stmt = funcdef.children[-1].children[-1].children[-2]
- except (AttributeError, IndexError):
- return []
-
+ except (AttributeError, IndexError):
+ return []
+
if stmt.type not in ('name', 'atom', 'atom_expr'):
return []
@@ -225,34 +225,34 @@ def _evaluate_for_statement_string(module_context, string):
funcdef
)
func_execution_context = function_context.get_function_execution()
- # Use the module of the param.
- # TODO this module is not the module of the param in case of a function
- # call. In that case it's the module of the function call.
- # stuffed with content from a function call.
+ # Use the module of the param.
+ # TODO this module is not the module of the param in case of a function
+ # call. In that case it's the module of the function call.
+ # stuffed with content from a function call.
return list(_execute_types_in_stmt(func_execution_context, stmt))
-
-
+
+
def _execute_types_in_stmt(module_context, stmt):
- """
- Executing all types or general elements that we find in a statement. This
- doesn't include tuple, list and dict literals, because the stuff they
- contain is executed. (Used as type information).
- """
+ """
+ Executing all types or general elements that we find in a statement. This
+ doesn't include tuple, list and dict literals, because the stuff they
+ contain is executed. (Used as type information).
+ """
definitions = module_context.eval_node(stmt)
return ContextSet.from_sets(
_execute_array_values(module_context.evaluator, d)
for d in definitions
)
-
-
-def _execute_array_values(evaluator, array):
- """
- Tuples indicate that there's not just one return value, but the listed
- ones. `(str, int)` means that it returns a tuple with both types.
- """
+
+
+def _execute_array_values(evaluator, array):
+ """
+ Tuples indicate that there's not just one return value, but the listed
+ ones. `(str, int)` means that it returns a tuple with both types.
+ """
from jedi.evaluate.context.iterable import SequenceLiteralContext, FakeSequence
if isinstance(array, SequenceLiteralContext):
- values = []
+ values = []
for lazy_context in array.py__iter__():
objects = ContextSet.from_sets(
_execute_array_values(evaluator, typ)
@@ -260,15 +260,15 @@ def _execute_array_values(evaluator, array):
)
values.append(LazyKnownContexts(objects))
return {FakeSequence(evaluator, array.array_type, values)}
- else:
+ else:
return array.execute_evaluated()
-
-
+
+
@evaluator_method_cache()
def infer_param(execution_context, param):
from jedi.evaluate.context.instance import InstanceArguments
from jedi.evaluate.context import FunctionExecutionContext
-
+
def eval_docstring(docstring):
return ContextSet.from_iterable(
p
@@ -279,29 +279,29 @@ def infer_param(execution_context, param):
func = param.get_parent_function()
if func.type == 'lambdef':
return NO_CONTEXTS
-
+
types = eval_docstring(execution_context.py__doc__())
if isinstance(execution_context, FunctionExecutionContext) \
and isinstance(execution_context.var_args, InstanceArguments) \
and execution_context.function_context.py__name__() == '__init__':
class_context = execution_context.var_args.instance.class_context
types |= eval_docstring(class_context.py__doc__())
-
+
return types
@evaluator_method_cache()
@iterator_to_context_set
def infer_return_types(function_context):
- def search_return_in_docstr(code):
- for p in DOCSTRING_RETURN_PATTERNS:
- match = p.search(code)
- if match:
+ def search_return_in_docstr(code):
+ for p in DOCSTRING_RETURN_PATTERNS:
+ match = p.search(code)
+ if match:
yield _strip_rst_role(match.group(1))
# Check for numpy style return hint
for type_ in _search_return_in_numpydocstr(code):
yield type_
-
+
for type_str in search_return_in_docstr(function_context.py__doc__()):
for type_eval in _evaluate_for_statement_string(function_context.get_root_context(), type_str):
yield type_eval
diff --git a/contrib/python/jedi/jedi/evaluate/dynamic.py b/contrib/python/jedi/jedi/evaluate/dynamic.py
index dcf7796d3b..7f7b0d87bd 100644
--- a/contrib/python/jedi/jedi/evaluate/dynamic.py
+++ b/contrib/python/jedi/jedi/evaluate/dynamic.py
@@ -1,27 +1,27 @@
-"""
-One of the really important features of |jedi| is to have an option to
-understand code like this::
-
- def foo(bar):
- bar. # completion here
- foo(1)
-
-There's no doubt wheter bar is an ``int`` or not, but if there's also a call
-like ``foo('str')``, what would happen? Well, we'll just show both. Because
-that's what a human would expect.
-
-It works as follows:
-
-- |Jedi| sees a param
-- search for function calls named ``foo``
+"""
+One of the really important features of |jedi| is to have an option to
+understand code like this::
+
+ def foo(bar):
+ bar. # completion here
+ foo(1)
+
+There's no doubt wheter bar is an ``int`` or not, but if there's also a call
+like ``foo('str')``, what would happen? Well, we'll just show both. Because
+that's what a human would expect.
+
+It works as follows:
+
+- |Jedi| sees a param
+- search for function calls named ``foo``
- execute these calls and check the input.
-"""
-
+"""
+
from parso.python import tree
-from jedi import settings
-from jedi import debug
+from jedi import settings
+from jedi import debug
from jedi.evaluate.cache import evaluator_function_cache
-from jedi.evaluate import imports
+from jedi.evaluate import imports
from jedi.evaluate.arguments import TreeArguments
from jedi.evaluate.param import create_default_params
from jedi.evaluate.helpers import is_stdlib_path
@@ -30,20 +30,20 @@ from jedi.parser_utils import get_parent_scope
from jedi.evaluate.context import ModuleContext, instance
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
from jedi.evaluate import recursion
-
-
+
+
MAX_PARAM_SEARCHES = 20
class DynamicExecutedParams(object):
- """
+ """
Simulates being a parameter while actually just being multiple params.
- """
-
+ """
+
def __init__(self, evaluator, executed_params):
self.evaluator = evaluator
self._executed_params = executed_params
-
+
def infer(self):
with recursion.execution_allowed(self.evaluator, self) as allowed:
# We need to catch recursions that may occur, because an
@@ -52,25 +52,25 @@ class DynamicExecutedParams(object):
if allowed:
return ContextSet.from_sets(p.infer() for p in self._executed_params)
return NO_CONTEXTS
-
-@debug.increase_indent
+
+@debug.increase_indent
def search_params(evaluator, execution_context, funcdef):
- """
- A dynamic search for param values. If you try to complete a type:
-
- >>> def func(foo):
- ... foo
- >>> func(1)
- >>> func("")
-
- It is not known what the type ``foo`` without analysing the whole code. You
- have to look for all calls to ``func`` to find out what ``foo`` possibly
- is.
- """
- if not settings.dynamic_params:
+ """
+ A dynamic search for param values. If you try to complete a type:
+
+ >>> def func(foo):
+ ... foo
+ >>> func(1)
+ >>> func("")
+
+ It is not known what the type ``foo`` without analysing the whole code. You
+ have to look for all calls to ``func`` to find out what ``foo`` possibly
+ is.
+ """
+ if not settings.dynamic_params:
return create_default_params(execution_context, funcdef)
-
+
evaluator.dynamic_params_depth += 1
try:
path = execution_context.get_root_context().py__file__()
@@ -80,7 +80,7 @@ def search_params(evaluator, execution_context, funcdef):
# This makes everything slower. Just disable it and run the tests,
# you will see the slowdown, especially in 3.6.
return create_default_params(execution_context, funcdef)
-
+
if funcdef.type == 'lambdef':
string_name = _get_lambda_name(funcdef)
if string_name is None:
@@ -88,7 +88,7 @@ def search_params(evaluator, execution_context, funcdef):
else:
string_name = funcdef.name.value
debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA')
-
+
try:
module_context = execution_context.get_root_context()
function_executions = _search_function_executions(
@@ -116,16 +116,16 @@ def search_params(evaluator, execution_context, funcdef):
@evaluator_function_cache(default=None)
@to_list
def _search_function_executions(evaluator, module_context, funcdef, string_name):
- """
- Returns a list of param names.
- """
+ """
+ Returns a list of param names.
+ """
compare_node = funcdef
if string_name == '__init__':
cls = get_parent_scope(funcdef)
if isinstance(cls, tree.Class):
string_name = cls.name.value
compare_node = cls
-
+
found_executions = False
i = 0
for for_mod_context in imports.get_modules_containing_name(
@@ -134,25 +134,25 @@ def _search_function_executions(evaluator, module_context, funcdef, string_name)
return
for name, trailer in _get_possible_nodes(for_mod_context, string_name):
i += 1
-
+
# This is a simple way to stop Jedi's dynamic param recursion
# from going wild: The deeper Jedi's in the recursion, the less
# code should be evaluated.
if i * evaluator.dynamic_params_depth > MAX_PARAM_SEARCHES:
return
-
+
random_context = evaluator.create_context(for_mod_context, name)
for function_execution in _check_name_for_execution(
evaluator, random_context, compare_node, name, trailer):
found_executions = True
yield function_execution
-
+
# If there are results after processing a module, we're probably
# good to process. This is a speed optimization.
if found_executions:
return
-
-
+
+
def _get_lambda_name(node):
stmt = node.parent
if stmt.type == 'expr_stmt':
@@ -161,16 +161,16 @@ def _get_lambda_name(node):
first = stmt.children[0]
if first.type == 'name':
return first.value
-
+
return None
-
+
def _get_possible_nodes(module_context, func_string_name):
- try:
+ try:
names = module_context.tree_node.get_used_names()[func_string_name]
except KeyError:
return
-
+
for name in names:
bracket = name.get_next_leaf()
trailer = bracket.parent
diff --git a/contrib/python/jedi/jedi/evaluate/finder.py b/contrib/python/jedi/jedi/evaluate/finder.py
index 1633e3ae65..5e7043f796 100644
--- a/contrib/python/jedi/jedi/evaluate/finder.py
+++ b/contrib/python/jedi/jedi/evaluate/finder.py
@@ -1,41 +1,41 @@
-"""
-Searching for names with given scope and name. This is very central in Jedi and
-Python. The name resolution is quite complicated with descripter,
-``__getattribute__``, ``__getattr__``, ``global``, etc.
-
+"""
+Searching for names with given scope and name. This is very central in Jedi and
+Python. The name resolution is quite complicated with descripter,
+``__getattribute__``, ``__getattr__``, ``global``, etc.
+
If you want to understand name resolution, please read the first few chapters
in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/.
-Flow checks
-+++++++++++
-
-Flow checks are not really mature. There's only a check for ``isinstance``. It
-would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
-Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
-check for -> a is a string). There's big potential in these checks.
-"""
-
+Flow checks
++++++++++++
+
+Flow checks are not really mature. There's only a check for ``isinstance``. It
+would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
+Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
+check for -> a is a string). There's big potential in these checks.
+"""
+
from parso.python import tree
from parso.tree import search_ancestor
-from jedi import debug
-from jedi import settings
+from jedi import debug
+from jedi import settings
from jedi.evaluate.context import AbstractInstanceContext
-from jedi.evaluate import compiled
-from jedi.evaluate import analysis
-from jedi.evaluate import flow_analysis
+from jedi.evaluate import compiled
+from jedi.evaluate import analysis
+from jedi.evaluate import flow_analysis
from jedi.evaluate.arguments import TreeArguments
-from jedi.evaluate import helpers
+from jedi.evaluate import helpers
from jedi.evaluate.context import iterable
from jedi.evaluate.filters import get_global_filters, TreeNameDefinition
from jedi.evaluate.base_context import ContextSet
from jedi.parser_utils import is_scope, get_parent_scope
-
-
-class NameFinder(object):
+
+
+class NameFinder(object):
def __init__(self, evaluator, context, name_context, name_or_str,
position=None, analysis_errors=True):
- self._evaluator = evaluator
- # Make sure that it's not just a syntax tree node.
+ self._evaluator = evaluator
+ # Make sure that it's not just a syntax tree node.
self._context = context
self._name_context = name_context
self._name = name_or_str
@@ -46,8 +46,8 @@ class NameFinder(object):
self._position = position
self._found_predefined_types = None
self._analysis_errors = analysis_errors
-
- @debug.increase_indent
+
+ @debug.increase_indent
def find(self, filters, attribute_lookup):
"""
:params bool attribute_lookup: Tell to logic if we're accessing the
@@ -63,7 +63,7 @@ class NameFinder(object):
if check is flow_analysis.UNREACHABLE:
return ContextSet()
return self._found_predefined_types
-
+
types = self._names_to_types(names, attribute_lookup)
if not names and self._analysis_errors and not types \
@@ -74,12 +74,12 @@ class NameFinder(object):
analysis.add_attribute_error(
self._name_context, self._context, self._name)
else:
- message = ("NameError: name '%s' is not defined."
+ message = ("NameError: name '%s' is not defined."
% self._string_name)
analysis.add(self._name_context, 'name-error', self._name, message)
-
- return types
-
+
+ return types
+
def _get_origin_scope(self):
if isinstance(self._name, tree.Name):
scope = self._name
@@ -89,14 +89,14 @@ class NameFinder(object):
break
scope = scope.parent
return scope
- else:
+ else:
return None
-
+
def get_filters(self, search_global=False):
origin_scope = self._get_origin_scope()
if search_global:
position = self._position
-
+
# For functions and classes the defaults don't belong to the
# function and get evaluated in the context before the function. So
# make sure to exclude the function/class name.
@@ -113,17 +113,17 @@ class NameFinder(object):
if position < colon.start_pos:
if lambdef is None or position < lambdef.children[-2].start_pos:
position = ancestor.start_pos
-
+
return get_global_filters(self._evaluator, self._context, position, origin_scope)
else:
return self._context.get_filters(search_global, self._position, origin_scope=origin_scope)
-
+
def filter_name(self, filters):
- """
- Searches names that are defined in a scope (the different
+ """
+ Searches names that are defined in a scope (the different
``filters``), until a name fits.
- """
- names = []
+ """
+ names = []
if self._context.predefined_names and isinstance(self._name, tree.Name):
node = self._name
while node is not None and not is_scope(node):
@@ -140,7 +140,7 @@ class NameFinder(object):
for filter in filters:
names = filter.get(self._string_name)
- if names:
+ if names:
if len(names) == 1:
n, = names
if isinstance(n, TreeNameDefinition):
@@ -152,17 +152,17 @@ class NameFinder(object):
if n.tree_name == self._name:
if self._name.get_definition().type == 'import_from':
continue
- break
-
+ break
+
debug.dbg('finder.filter_name %s in (%s): %s@%s',
self._string_name, self._context, names, self._position)
return list(names)
-
- def _check_getattr(self, inst):
- """Checks for both __getattr__ and __getattribute__ methods"""
- # str is important, because it shouldn't be `Name`!
+
+ def _check_getattr(self, inst):
+ """Checks for both __getattr__ and __getattribute__ methods"""
+ # str is important, because it shouldn't be `Name`!
name = compiled.create_simple_object(self._evaluator, self._string_name)
-
+
# This is a little bit special. `__getattribute__` is in Python
# executed before `__getattr__`. But: I know no use case, where
# this could be practical and where Jedi would return wrong types.
@@ -173,15 +173,15 @@ class NameFinder(object):
names = (inst.get_function_slot_names(u'__getattr__') or
inst.get_function_slot_names(u'__getattribute__'))
return inst.execute_function_slots(names, name)
-
+
def _names_to_types(self, names, attribute_lookup):
contexts = ContextSet.from_sets(name.infer() for name in names)
-
+
debug.dbg('finder._names_to_types: %s -> %s', names, contexts)
if not names and isinstance(self._context, AbstractInstanceContext):
- # handling __getattr__ / __getattribute__
+ # handling __getattr__ / __getattribute__
return self._check_getattr(self._context)
-
+
# Add isinstance and other if/assert knowledge.
if not contexts and isinstance(self._name, tree.Name) and \
not isinstance(self._name_context, AbstractInstanceContext):
@@ -198,25 +198,25 @@ class NameFinder(object):
if flow_scope == base_node:
break
return contexts
-
-
+
+
def _check_flow_information(context, flow, search_name, pos):
- """ Try to find out the type of a variable just with the information that
- is given by the flows: e.g. It is also responsible for assert checks.::
-
- if isinstance(k, str):
- k. # <- completion here
-
- ensures that `k` is a string.
- """
- if not settings.dynamic_flow_information:
- return None
-
+ """ Try to find out the type of a variable just with the information that
+ is given by the flows: e.g. It is also responsible for assert checks.::
+
+ if isinstance(k, str):
+ k. # <- completion here
+
+ ensures that `k` is a string.
+ """
+ if not settings.dynamic_flow_information:
+ return None
+
result = None
if is_scope(flow):
- # Check for asserts.
+ # Check for asserts.
module_node = flow.get_root_node()
- try:
+ try:
names = module_node.get_used_names()[search_name.value]
except KeyError:
return None
@@ -224,49 +224,49 @@ def _check_flow_information(context, flow, search_name, pos):
n for n in names
if flow.start_pos <= n.start_pos < (pos or flow.end_pos)
])
-
- for name in names:
+
+ for name in names:
ass = search_ancestor(name, 'assert_stmt')
if ass is not None:
result = _check_isinstance_type(context, ass.assertion, search_name)
if result is not None:
return result
-
+
if flow.type in ('if_stmt', 'while_stmt'):
potential_ifs = [c for c in flow.children[1::4] if c != ':']
for if_test in reversed(potential_ifs):
if search_name.start_pos > if_test.end_pos:
return _check_isinstance_type(context, if_test, search_name)
- return result
-
-
+ return result
+
+
def _check_isinstance_type(context, element, search_name):
- try:
+ try:
assert element.type in ('power', 'atom_expr')
- # this might be removed if we analyze and, etc
- assert len(element.children) == 2
- first, trailer = element.children
+ # this might be removed if we analyze and, etc
+ assert len(element.children) == 2
+ first, trailer = element.children
assert first.type == 'name' and first.value == 'isinstance'
- assert trailer.type == 'trailer' and trailer.children[0] == '('
- assert len(trailer.children) == 3
-
- # arglist stuff
- arglist = trailer.children[1]
+ assert trailer.type == 'trailer' and trailer.children[0] == '('
+ assert len(trailer.children) == 3
+
+ # arglist stuff
+ arglist = trailer.children[1]
args = TreeArguments(context.evaluator, context, arglist, trailer)
param_list = list(args.unpack())
- # Disallow keyword arguments
+ # Disallow keyword arguments
assert len(param_list) == 2
(key1, lazy_context_object), (key2, lazy_context_cls) = param_list
assert key1 is None and key2 is None
call = helpers.call_of_leaf(search_name)
is_instance_call = helpers.call_of_leaf(lazy_context_object.data)
- # Do a simple get_code comparison. They should just have the same code,
- # and everything will be all right.
+ # Do a simple get_code comparison. They should just have the same code,
+ # and everything will be all right.
normalize = context.evaluator.grammar._normalize
assert normalize(is_instance_call) == normalize(call)
- except AssertionError:
+ except AssertionError:
return None
-
+
context_set = ContextSet()
for cls_or_tup in lazy_context_cls.infer():
if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple':
diff --git a/contrib/python/jedi/jedi/evaluate/flow_analysis.py b/contrib/python/jedi/jedi/evaluate/flow_analysis.py
index 2f75cf2f87..474071f14c 100644
--- a/contrib/python/jedi/jedi/evaluate/flow_analysis.py
+++ b/contrib/python/jedi/jedi/evaluate/flow_analysis.py
@@ -1,52 +1,52 @@
from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope
from jedi.evaluate.recursion import execution_allowed
-
-
-class Status(object):
- lookup_table = {}
-
- def __init__(self, value, name):
- self._value = value
- self._name = name
- Status.lookup_table[value] = self
-
- def invert(self):
- if self is REACHABLE:
- return UNREACHABLE
- elif self is UNREACHABLE:
- return REACHABLE
- else:
- return UNSURE
-
- def __and__(self, other):
- if UNSURE in (self, other):
- return UNSURE
- else:
- return REACHABLE if self._value and other._value else UNREACHABLE
-
- def __repr__(self):
- return '<%s: %s>' % (type(self).__name__, self._name)
-
-
-REACHABLE = Status(True, 'reachable')
-UNREACHABLE = Status(False, 'unreachable')
-UNSURE = Status(None, 'unsure')
-
-
+
+
+class Status(object):
+ lookup_table = {}
+
+ def __init__(self, value, name):
+ self._value = value
+ self._name = name
+ Status.lookup_table[value] = self
+
+ def invert(self):
+ if self is REACHABLE:
+ return UNREACHABLE
+ elif self is UNREACHABLE:
+ return REACHABLE
+ else:
+ return UNSURE
+
+ def __and__(self, other):
+ if UNSURE in (self, other):
+ return UNSURE
+ else:
+ return REACHABLE if self._value and other._value else UNREACHABLE
+
+ def __repr__(self):
+ return '<%s: %s>' % (type(self).__name__, self._name)
+
+
+REACHABLE = Status(True, 'reachable')
+UNREACHABLE = Status(False, 'unreachable')
+UNSURE = Status(None, 'unsure')
+
+
def _get_flow_scopes(node):
while True:
node = get_parent_scope(node, include_flows=True)
if node is None or is_scope(node):
return
yield node
-
-
+
+
def reachability_check(context, context_scope, node, origin_scope=None):
first_flow_scope = get_parent_scope(node, include_flows=True)
if origin_scope is not None:
origin_flow_scopes = list(_get_flow_scopes(origin_scope))
node_flow_scopes = list(_get_flow_scopes(node))
-
+
branch_matches = True
for flow_scope in origin_flow_scopes:
if flow_scope in node_flow_scopes:
@@ -79,32 +79,32 @@ def reachability_check(context, context_scope, node, origin_scope=None):
def _break_check(context, context_scope, flow_scope, node):
- reachable = REACHABLE
+ reachable = REACHABLE
if flow_scope.type == 'if_stmt':
if flow_scope.is_node_after_else(node):
for check_node in flow_scope.get_test_nodes():
reachable = _check_if(context, check_node)
- if reachable in (REACHABLE, UNSURE):
- break
- reachable = reachable.invert()
- else:
+ if reachable in (REACHABLE, UNSURE):
+ break
+ reachable = reachable.invert()
+ else:
flow_node = flow_scope.get_corresponding_test_node(node)
if flow_node is not None:
reachable = _check_if(context, flow_node)
elif flow_scope.type in ('try_stmt', 'while_stmt'):
- return UNSURE
-
- # Only reachable branches need to be examined further.
- if reachable in (UNREACHABLE, UNSURE):
- return reachable
-
+ return UNSURE
+
+ # Only reachable branches need to be examined further.
+ if reachable in (UNREACHABLE, UNSURE):
+ return reachable
+
if context_scope != flow_scope and context_scope != flow_scope.parent:
flow_scope = get_parent_scope(flow_scope, include_flows=True)
return reachable & _break_check(context, context_scope, flow_scope, node)
else:
return reachable
-
-
+
+
def _check_if(context, node):
with execution_allowed(context.evaluator, node) as allowed:
if not allowed:
diff --git a/contrib/python/jedi/jedi/evaluate/helpers.py b/contrib/python/jedi/jedi/evaluate/helpers.py
index 989e3676bd..c94a1fbe55 100644
--- a/contrib/python/jedi/jedi/evaluate/helpers.py
+++ b/contrib/python/jedi/jedi/evaluate/helpers.py
@@ -1,27 +1,27 @@
-import copy
+import copy
import sys
import re
import os
-from itertools import chain
+from itertools import chain
from contextlib import contextmanager
-
+
from parso.python import tree
-
+
from jedi._compatibility import unicode
from jedi.parser_utils import get_parent_scope
-
-
+
+
def is_stdlib_path(path):
# Python standard library paths look like this:
# /usr/lib/python3.5/...
# TODO The implementation below is probably incorrect and not complete.
if 'dist-packages' in path or 'site-packages' in path:
return False
-
+
base_path = os.path.join(sys.prefix, 'lib', 'python')
return bool(re.match(re.escape(base_path) + r'\d.\d', path))
-
-
+
+
def deep_ast_copy(obj):
"""
Much, much faster than copy.deepcopy, but just for parser tree nodes.
@@ -35,25 +35,25 @@ def deep_ast_copy(obj):
if isinstance(child, tree.Leaf):
new_child = copy.copy(child)
new_child.parent = new_obj
- else:
+ else:
new_child = deep_ast_copy(child)
new_child.parent = new_obj
new_children.append(new_child)
new_obj.children = new_children
-
- return new_obj
-
-
+
+ return new_obj
+
+
def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False):
- """
- Creates a "call" node that consist of all ``trailer`` and ``power``
- objects. E.g. if you call it with ``append``::
-
- list([]).append(3) or None
-
- You would get a node with the content ``list([]).append`` back.
-
- This generates a copy of the original ast node.
+ """
+ Creates a "call" node that consist of all ``trailer`` and ``power``
+ objects. E.g. if you call it with ``append``::
+
+ list([]).append(3) or None
+
+ You would get a node with the content ``list([]).append`` back.
+
+ This generates a copy of the original ast node.
If you're using the leaf, e.g. the bracket `)` it will return ``list([])``.
@@ -62,12 +62,12 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False):
- infer the type of ``foo`` to offer completions after foo
- infer the type of ``bar`` to be able to jump to the definition of foo
The option ``cut_own_trailer`` must be set to true for the second purpose.
- """
+ """
trailer = leaf.parent
if trailer.type == 'fstring':
from jedi.evaluate import compiled
return compiled.get_string_context_set(context.evaluator)
-
+
# The leaf may not be the last or first child, because there exist three
# different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
# we should not match anything more than x.
@@ -75,14 +75,14 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False):
if trailer.type == 'atom':
return context.eval_node(trailer)
return context.eval_node(leaf)
-
+
power = trailer.parent
index = power.children.index(trailer)
if cut_own_trailer:
cut = index
else:
cut = index + 1
-
+
if power.type == 'error_node':
start = index
while True:
@@ -94,7 +94,7 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False):
else:
base = power.children[0]
trailers = power.children[1:cut]
-
+
if base == 'await':
base = trailers[0]
trailers = trailers[1:]
@@ -161,11 +161,11 @@ def get_names_of_node(node):
return list(chain.from_iterable(get_names_of_node(c) for c in children))
-def get_module_names(module, all_scopes):
- """
- Returns a dictionary with name parts as keys and their call paths as
- values.
- """
+def get_module_names(module, all_scopes):
+ """
+ Returns a dictionary with name parts as keys and their call paths as
+ values.
+ """
names = chain.from_iterable(module.get_used_names().values())
if not all_scopes:
# We have to filter all the names that don't have the module as a
@@ -194,42 +194,42 @@ def is_compiled(context):
def is_string(context):
if context.evaluator.environment.version_info.major == 2:
str_classes = (unicode, bytes)
- else:
+ else:
str_classes = (unicode,)
return is_compiled(context) and isinstance(context.get_safe_value(default=None), str_classes)
-
-
+
+
def is_literal(context):
return is_number(context) or is_string(context)
-
-
+
+
def _get_safe_value_or_none(context, accept):
if is_compiled(context):
value = context.get_safe_value(default=None)
if isinstance(value, accept):
return value
-
-
+
+
def get_int_or_none(context):
return _get_safe_value_or_none(context, int)
-
-
+
+
def is_number(context):
return _get_safe_value_or_none(context, (int, float)) is not None
-
-
+
+
class EvaluatorTypeError(Exception):
pass
-
-
+
+
class EvaluatorIndexError(Exception):
pass
-
-
+
+
class EvaluatorKeyError(Exception):
pass
-
-
+
+
@contextmanager
def reraise_as_evaluator(*exception_classes):
try:
diff --git a/contrib/python/jedi/jedi/evaluate/imports.py b/contrib/python/jedi/jedi/evaluate/imports.py
index 690d93cd1e..3b1df31a44 100644
--- a/contrib/python/jedi/jedi/evaluate/imports.py
+++ b/contrib/python/jedi/jedi/evaluate/imports.py
@@ -1,57 +1,57 @@
-"""
-:mod:`jedi.evaluate.imports` is here to resolve import statements and return
-the modules/classes/functions/whatever, which they stand for. However there's
-not any actual importing done. This module is about finding modules in the
-filesystem. This can be quite tricky sometimes, because Python imports are not
-always that simple.
-
-This module uses imp for python up to 3.2 and importlib for python 3.3 on; the
-correct implementation is delegated to _compatibility.
-
-This module also supports import autocompletion, which means to complete
+"""
+:mod:`jedi.evaluate.imports` is here to resolve import statements and return
+the modules/classes/functions/whatever, which they stand for. However there's
+not any actual importing done. This module is about finding modules in the
+filesystem. This can be quite tricky sometimes, because Python imports are not
+always that simple.
+
+This module uses imp for python up to 3.2 and importlib for python 3.3 on; the
+correct implementation is delegated to _compatibility.
+
+This module also supports import autocompletion, which means to complete
statements like ``from datetim`` (cursor at the end would return ``datetime``).
-"""
-import os
-
+"""
+import os
+
from parso.python import tree
from parso.tree import search_ancestor
from parso import python_bytes_to_unicode
from jedi._compatibility import (FileNotFoundError, ImplicitNSInfo,
force_unicode, unicode)
-from jedi import debug
+from jedi import debug
from jedi import settings
from jedi.parser_utils import get_cached_code_lines
-from jedi.evaluate import sys_path
-from jedi.evaluate import helpers
-from jedi.evaluate import compiled
-from jedi.evaluate import analysis
+from jedi.evaluate import sys_path
+from jedi.evaluate import helpers
+from jedi.evaluate import compiled
+from jedi.evaluate import analysis
from jedi.evaluate.utils import unite
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.filters import AbstractNameDefinition
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
-
-
+
+
class ModuleCache(object):
def __init__(self):
self._path_cache = {}
self._name_cache = {}
-
+
def add(self, module, name):
path = module.py__file__()
self._path_cache[path] = module
self._name_cache[name] = module
-
+
def iterate_modules_with_names(self):
return self._name_cache.items()
-
+
def get(self, name):
return self._name_cache[name]
-
+
def get_from_path(self, path):
return self._path_cache[path]
-
-
+
+
# This memoization is needed, because otherwise we will infinitely loop on
# certain imports.
@evaluator_method_cache(default=NO_CONTEXTS)
@@ -72,18 +72,18 @@ def infer_import(context, tree_name, is_goto=False):
# if from_names exists in the modules.
from_import_name = import_path[-1]
import_path = from_names
-
+
importer = Importer(evaluator, tuple(import_path),
module_context, import_node.level)
-
+
types = importer.follow()
-
+
#if import_node.is_nested() and not self.nested_resolve:
# scopes = [NestedImportModule(module, import_node)]
-
+
if not types:
return NO_CONTEXTS
-
+
if from_import_name is not None:
types = unite(
t.py__getattribute__(
@@ -96,7 +96,7 @@ def infer_import(context, tree_name, is_goto=False):
)
if not is_goto:
types = ContextSet.from_set(types)
-
+
if not types:
path = import_path + [from_import_name]
importer = Importer(evaluator, tuple(path),
@@ -109,62 +109,62 @@ def infer_import(context, tree_name, is_goto=False):
# goto only accepts `Name`
if is_goto:
types = set(s.name for s in types)
-
+
debug.dbg('after import: %s', types)
return types
-
-
-class NestedImportModule(tree.Module):
- """
- TODO while there's no use case for nested import module right now, we might
- be able to use them for static analysis checks later on.
- """
- def __init__(self, module, nested_import):
- self._module = module
- self._nested_import = nested_import
-
- def _get_nested_import_name(self):
- """
- Generates an Import statement, that can be used to fake nested imports.
- """
- i = self._nested_import
- # This is not an existing Import statement. Therefore, set position to
- # 0 (0 is not a valid line number).
- zero = (0, 0)
- names = [unicode(name) for name in i.namespace_names[1:]]
- name = helpers.FakeName(names, self._nested_import)
- new = tree.Import(i._sub_module, zero, zero, name)
- new.parent = self._module
- debug.dbg('Generated a nested import: %s', new)
- return helpers.FakeName(str(i.namespace_names[1]), new)
-
- def __getattr__(self, name):
- return getattr(self._module, name)
-
- def __repr__(self):
- return "<%s: %s of %s>" % (self.__class__.__name__, self._module,
- self._nested_import)
-
-
+
+
+class NestedImportModule(tree.Module):
+ """
+ TODO while there's no use case for nested import module right now, we might
+ be able to use them for static analysis checks later on.
+ """
+ def __init__(self, module, nested_import):
+ self._module = module
+ self._nested_import = nested_import
+
+ def _get_nested_import_name(self):
+ """
+ Generates an Import statement, that can be used to fake nested imports.
+ """
+ i = self._nested_import
+ # This is not an existing Import statement. Therefore, set position to
+ # 0 (0 is not a valid line number).
+ zero = (0, 0)
+ names = [unicode(name) for name in i.namespace_names[1:]]
+ name = helpers.FakeName(names, self._nested_import)
+ new = tree.Import(i._sub_module, zero, zero, name)
+ new.parent = self._module
+ debug.dbg('Generated a nested import: %s', new)
+ return helpers.FakeName(str(i.namespace_names[1]), new)
+
+ def __getattr__(self, name):
+ return getattr(self._module, name)
+
+ def __repr__(self):
+ return "<%s: %s of %s>" % (self.__class__.__name__, self._module,
+ self._nested_import)
+
+
def _add_error(context, name, message=None):
# Should be a name, not a string!
if message is None:
name_str = str(name.value) if isinstance(name, tree.Name) else name
message = 'No module named ' + name_str
- if hasattr(name, 'parent'):
+ if hasattr(name, 'parent'):
analysis.add(context, 'import-error', name, message)
else:
debug.warning('ImportError without origin: ' + message)
-
-
+
+
class ImportName(AbstractNameDefinition):
start_pos = (1, 0)
_level = 0
-
+
def __init__(self, parent_context, string_name):
self.parent_context = parent_context
self.string_name = string_name
-
+
def infer(self):
return Importer(
self.parent_context.evaluator,
@@ -189,35 +189,35 @@ class SubModuleName(ImportName):
_level = 1
-class Importer(object):
+class Importer(object):
def __init__(self, evaluator, import_path, module_context, level=0):
- """
- An implementation similar to ``__import__``. Use `follow`
- to actually follow the imports.
-
- *level* specifies whether to use absolute or relative imports. 0 (the
- default) means only perform absolute imports. Positive values for level
- indicate the number of parent directories to search relative to the
- directory of the module calling ``__import__()`` (see PEP 328 for the
- details).
-
- :param import_path: List of namespaces (strings or Names).
- """
- debug.speed('import %s' % (import_path,))
- self._evaluator = evaluator
- self.level = level
+ """
+ An implementation similar to ``__import__``. Use `follow`
+ to actually follow the imports.
+
+ *level* specifies whether to use absolute or relative imports. 0 (the
+ default) means only perform absolute imports. Positive values for level
+ indicate the number of parent directories to search relative to the
+ directory of the module calling ``__import__()`` (see PEP 328 for the
+ details).
+
+ :param import_path: List of namespaces (strings or Names).
+ """
+ debug.speed('import %s' % (import_path,))
+ self._evaluator = evaluator
+ self.level = level
self.module_context = module_context
- try:
+ try:
self.file_path = module_context.py__file__()
- except AttributeError:
- # Can be None for certain compiled modules like 'builtins'.
- self.file_path = None
-
- if level:
+ except AttributeError:
+ # Can be None for certain compiled modules like 'builtins'.
+ self.file_path = None
+
+ if level:
base = module_context.py__package__().split('.')
if base == [''] or base == ['__main__']:
- base = []
- if level > len(base):
+ base = []
+ if level > len(base):
path = module_context.py__file__()
if path is not None:
import_path = list(import_path)
@@ -246,64 +246,64 @@ class Importer(object):
# are in the file system. Therefore we cannot know what to do.
# In this case we just let the path there and ignore that it's
# a relative path. Not sure if that's a good idea.
- else:
- # Here we basically rewrite the level to 0.
+ else:
+ # Here we basically rewrite the level to 0.
base = tuple(base)
if level > 1:
base = base[:-level + 1]
import_path = base + tuple(import_path)
- self.import_path = import_path
-
- @property
- def str_import_path(self):
- """Returns the import path as pure strings instead of `Name`."""
+ self.import_path = import_path
+
+ @property
+ def str_import_path(self):
+ """Returns the import path as pure strings instead of `Name`."""
return tuple(
name.value if isinstance(name, tree.Name) else name
for name in self.import_path
)
-
- def sys_path_with_modifications(self):
-
+
+ def sys_path_with_modifications(self):
+
sys_path_mod = (
self._evaluator.get_sys_path()
+ sys_path.check_sys_path_modifications(self.module_context)
)
-
+
if self.import_path and self.file_path is not None \
and self._evaluator.environment.version_info.major == 2:
# Python2 uses an old strange way of importing relative imports.
sys_path_mod.append(force_unicode(os.path.dirname(self.file_path)))
-
+
return sys_path_mod
- def follow(self):
+ def follow(self):
if not self.import_path or not self._evaluator.infer_enabled:
return NO_CONTEXTS
- return self._do_import(self.import_path, self.sys_path_with_modifications())
-
- def _do_import(self, import_path, sys_path):
- """
- This method is very similar to importlib's `_gcd_import`.
- """
+ return self._do_import(self.import_path, self.sys_path_with_modifications())
+
+ def _do_import(self, import_path, sys_path):
+ """
+ This method is very similar to importlib's `_gcd_import`.
+ """
import_parts = [
force_unicode(i.value if isinstance(i, tree.Name) else i)
for i in import_path
]
-
- # Handle "magic" Flask extension imports:
- # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
- if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']:
- # New style.
- ipath = ('flask_' + str(import_parts[2]),) + import_path[3:]
- modules = self._do_import(ipath, sys_path)
- if modules:
- return modules
- else:
- # Old style
- return self._do_import(('flaskext',) + import_path[2:], sys_path)
-
+
+ # Handle "magic" Flask extension imports:
+ # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
+ if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']:
+ # New style.
+ ipath = ('flask_' + str(import_parts[2]),) + import_path[3:]
+ modules = self._do_import(ipath, sys_path)
+ if modules:
+ return modules
+ else:
+ # Old style
+ return self._do_import(('flaskext',) + import_path[2:], sys_path)
+
if import_parts[0] in settings.auto_import_modules:
module = _load_module(
self._evaluator,
@@ -312,42 +312,42 @@ class Importer(object):
)
return ContextSet(module)
- module_name = '.'.join(import_parts)
- try:
+ module_name = '.'.join(import_parts)
+ try:
return ContextSet(self._evaluator.module_cache.get(module_name))
- except KeyError:
- pass
-
- if len(import_path) > 1:
- # This is a recursive way of importing that works great with
- # the module cache.
- bases = self._do_import(import_path[:-1], sys_path)
- if not bases:
+ except KeyError:
+ pass
+
+ if len(import_path) > 1:
+ # This is a recursive way of importing that works great with
+ # the module cache.
+ bases = self._do_import(import_path[:-1], sys_path)
+ if not bases:
return NO_CONTEXTS
- # We can take the first element, because only the os special
- # case yields multiple modules, which is not important for
- # further imports.
+ # We can take the first element, because only the os special
+ # case yields multiple modules, which is not important for
+ # further imports.
parent_module = list(bases)[0]
-
- # This is a huge exception, we follow a nested import
- # ``os.path``, because it's a very important one in Python
- # that is being achieved by messing with ``sys.modules`` in
- # ``os``.
+
+ # This is a huge exception, we follow a nested import
+ # ``os.path``, because it's a very important one in Python
+ # that is being achieved by messing with ``sys.modules`` in
+ # ``os``.
if import_parts == ['os', 'path']:
return parent_module.py__getattribute__('path')
-
- try:
+
+ try:
method = parent_module.py__path__
- except AttributeError:
- # The module is not a package.
+ except AttributeError:
+ # The module is not a package.
_add_error(self.module_context, import_path[-1])
return NO_CONTEXTS
- else:
+ else:
paths = method()
- debug.dbg('search_module %s in paths %s', module_name, paths)
- for path in paths:
- # At the moment we are only using one path. So this is
- # not important to be correct.
+ debug.dbg('search_module %s in paths %s', module_name, paths)
+ for path in paths:
+ # At the moment we are only using one path. So this is
+ # not important to be correct.
if not isinstance(path, list):
path = [path]
code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
@@ -357,11 +357,11 @@ class Importer(object):
is_global_search=False,
)
if module_path is not None:
- break
+ break
else:
_add_error(self.module_context, import_path[-1])
return NO_CONTEXTS
- else:
+ else:
debug.dbg('global search_module %s in %s', import_parts[-1], self.file_path)
# Override the sys.path. It works only good that way.
# Injecting the path directly into `find_module` did not work.
@@ -372,111 +372,111 @@ class Importer(object):
is_global_search=True,
)
if module_path is None:
- # The module is not a package.
+ # The module is not a package.
_add_error(self.module_context, import_path[-1])
return NO_CONTEXTS
-
+
module = _load_module(
self._evaluator, module_path, code, sys_path,
import_names=import_parts,
safe_module_name=True,
)
-
+
if module is None:
# The file might raise an ImportError e.g. and therefore not be
# importable.
return NO_CONTEXTS
-
+
return ContextSet(module)
-
+
def _generate_name(self, name, in_module=None):
# Create a pseudo import to be able to follow them.
if in_module is None:
return ImportName(self.module_context, name)
return SubModuleName(in_module, name)
-
+
def _get_module_names(self, search_path=None, in_module=None):
- """
- Get the names of all modules in the search_path. This means file names
- and not names defined in the files.
- """
+ """
+ Get the names of all modules in the search_path. This means file names
+ and not names defined in the files.
+ """
sub = self._evaluator.compiled_subprocess
-
- names = []
- # add builtin module names
+
+ names = []
+ # add builtin module names
if search_path is None and in_module is None:
names += [self._generate_name(name) for name in sub.get_builtin_module_names()]
-
- if search_path is None:
- search_path = self.sys_path_with_modifications()
+
+ if search_path is None:
+ search_path = self.sys_path_with_modifications()
for name in sub.list_module_names(search_path):
names.append(self._generate_name(name, in_module=in_module))
- return names
-
- def completion_names(self, evaluator, only_modules=False):
- """
- :param only_modules: Indicates wheter it's possible to import a
- definition that is not defined in a module.
- """
+ return names
+
+ def completion_names(self, evaluator, only_modules=False):
+ """
+ :param only_modules: Indicates wheter it's possible to import a
+ definition that is not defined in a module.
+ """
from jedi.evaluate.context import ModuleContext
from jedi.evaluate.context.namespace import ImplicitNamespaceContext
- names = []
- if self.import_path:
- # flask
- if self.str_import_path == ('flask', 'ext'):
- # List Flask extensions like ``flask_foo``
- for mod in self._get_module_names():
+ names = []
+ if self.import_path:
+ # flask
+ if self.str_import_path == ('flask', 'ext'):
+ # List Flask extensions like ``flask_foo``
+ for mod in self._get_module_names():
modname = mod.string_name
- if modname.startswith('flask_'):
- extname = modname[len('flask_'):]
- names.append(self._generate_name(extname))
- # Now the old style: ``flaskext.foo``
- for dir in self.sys_path_with_modifications():
- flaskext = os.path.join(dir, 'flaskext')
- if os.path.isdir(flaskext):
- names += self._get_module_names([flaskext])
-
+ if modname.startswith('flask_'):
+ extname = modname[len('flask_'):]
+ names.append(self._generate_name(extname))
+ # Now the old style: ``flaskext.foo``
+ for dir in self.sys_path_with_modifications():
+ flaskext = os.path.join(dir, 'flaskext')
+ if os.path.isdir(flaskext):
+ names += self._get_module_names([flaskext])
+
for context in self.follow():
- # Non-modules are not completable.
+ # Non-modules are not completable.
if context.api_type != 'module': # not a module
- continue
- # namespace packages
+ continue
+ # namespace packages
if isinstance(context, ModuleContext) and context.py__file__().endswith('__init__.py'):
paths = context.py__path__()
names += self._get_module_names(paths, in_module=context)
-
+
# implicit namespace packages
elif isinstance(context, ImplicitNamespaceContext):
paths = context.paths
names += self._get_module_names(paths, in_module=context)
- if only_modules:
- # In the case of an import like `from x.` we don't need to
- # add all the variables.
- if ('os',) == self.str_import_path and not self.level:
- # os.path is a hardcoded exception, because it's a
- # ``sys.modules`` modification.
+ if only_modules:
+ # In the case of an import like `from x.` we don't need to
+ # add all the variables.
+ if ('os',) == self.str_import_path and not self.level:
+ # os.path is a hardcoded exception, because it's a
+ # ``sys.modules`` modification.
names.append(self._generate_name('path', context))
-
- continue
-
+
+ continue
+
for filter in context.get_filters(search_global=False):
names += filter.values()
- else:
- # Empty import path=completion after import
- if not self.level:
- names += self._get_module_names()
-
- if self.file_path is not None:
- path = os.path.abspath(self.file_path)
- for i in range(self.level - 1):
- path = os.path.dirname(path)
- names += self._get_module_names([path])
-
- return names
-
-
+ else:
+ # Empty import path=completion after import
+ if not self.level:
+ names += self._get_module_names()
+
+ if self.file_path is not None:
+ path = os.path.abspath(self.file_path)
+ for i in range(self.level - 1):
+ path = os.path.dirname(path)
+ names += self._get_module_names([path])
+
+ return names
+
+
def _load_module(evaluator, path=None, code=None, sys_path=None,
import_names=None, safe_module_name=False):
if import_names is None:
@@ -515,30 +515,30 @@ def _load_module(evaluator, path=None, code=None, sys_path=None,
path=path,
code_lines=get_cached_code_lines(evaluator.grammar, path),
)
- else:
+ else:
assert dotted_name is not None
module = compiled.load_module(evaluator, dotted_name=dotted_name, sys_path=sys_path)
-
+
if module is not None and dotted_name is not None:
add_module_to_cache(evaluator, dotted_name, module, safe=safe_module_name)
- return module
-
-
+ return module
+
+
def add_module_to_cache(evaluator, module_name, module, safe=False):
if not safe and '.' not in module_name:
- # We cannot add paths with dots, because that would collide with
- # the sepatator dots for nested packages. Therefore we return
- # `__main__` in ModuleWrapper.py__name__(), which is similar to
- # Python behavior.
+ # We cannot add paths with dots, because that would collide with
+ # the sepatator dots for nested packages. Therefore we return
+ # `__main__` in ModuleWrapper.py__name__(), which is similar to
+ # Python behavior.
return
evaluator.module_cache.add(module, module_name)
-
-
+
+
def get_modules_containing_name(evaluator, modules, name):
- """
- Search a name in the directories of modules.
- """
+ """
+ Search a name in the directories of modules.
+ """
def check_directories(paths):
for p in paths:
if p is not None:
@@ -549,8 +549,8 @@ def get_modules_containing_name(evaluator, modules, name):
path = os.path.join(d, file_name)
if file_name.endswith('.py'):
yield path
-
- def check_fs(path):
+
+ def check_fs(path):
try:
f = open(path, 'rb')
except FileNotFoundError:
@@ -565,9 +565,9 @@ def get_modules_containing_name(evaluator, modules, name):
sys_path=e_sys_path,
import_names=import_names,
)
- return module
-
- # skip non python modules
+ return module
+
+ # skip non python modules
used_mod_paths = set()
for m in modules:
try:
@@ -576,11 +576,11 @@ def get_modules_containing_name(evaluator, modules, name):
pass
else:
used_mod_paths.add(path)
- yield m
-
+ yield m
+
if not settings.dynamic_params_for_other_modules:
return
-
+
additional = set(os.path.abspath(p) for p in settings.additional_dynamic_modules)
# Check the directories of used modules.
paths = (additional | set(check_directories(used_mod_paths))) \
diff --git a/contrib/python/jedi/jedi/evaluate/param.py b/contrib/python/jedi/jedi/evaluate/param.py
index c6bd8376f5..84f281e532 100644
--- a/contrib/python/jedi/jedi/evaluate/param.py
+++ b/contrib/python/jedi/jedi/evaluate/param.py
@@ -1,74 +1,74 @@
-from collections import defaultdict
-
+from collections import defaultdict
+
from jedi.evaluate.utils import PushBackIterator
-from jedi.evaluate import analysis
+from jedi.evaluate import analysis
from jedi.evaluate.lazy_context import LazyKnownContext, \
LazyTreeContext, LazyUnknownContext
from jedi.evaluate import docstrings
from jedi.evaluate import pep0484
from jedi.evaluate.context import iterable
-
-
+
+
def _add_argument_issue(parent_context, error_name, lazy_context, message):
if isinstance(lazy_context, LazyTreeContext):
node = lazy_context.data
if node.parent.type == 'argument':
node = node.parent
analysis.add(parent_context, error_name, node, message)
-
-
+
+
class ExecutedParam(object):
- """Fake a param and give it values."""
+ """Fake a param and give it values."""
def __init__(self, execution_context, param_node, lazy_context):
self._execution_context = execution_context
self._param_node = param_node
self._lazy_context = lazy_context
self.string_name = param_node.name.value
-
+
def infer(self):
pep0484_hints = pep0484.infer_param(self._execution_context, self._param_node)
doc_params = docstrings.infer_param(self._execution_context, self._param_node)
if pep0484_hints or doc_params:
return pep0484_hints | doc_params
-
+
return self._lazy_context.infer()
-
- @property
+
+ @property
def var_args(self):
return self._execution_context.var_args
-
+
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.string_name)
-
-
+
+
def get_executed_params(execution_context, var_args):
result_params = []
param_dict = {}
funcdef = execution_context.tree_node
parent_context = execution_context.parent_context
-
+
for param in funcdef.get_params():
param_dict[param.name.value] = param
unpacked_va = list(var_args.unpack(funcdef))
var_arg_iterator = PushBackIterator(iter(unpacked_va))
-
- non_matching_keys = defaultdict(lambda: [])
- keys_used = {}
- keys_only = False
- had_multiple_value_error = False
+
+ non_matching_keys = defaultdict(lambda: [])
+ keys_used = {}
+ keys_only = False
+ had_multiple_value_error = False
for param in funcdef.get_params():
- # The value and key can both be null. There, the defaults apply.
- # args / kwargs will just be empty arrays / dicts, respectively.
- # Wrong value count is just ignored. If you try to test cases that are
- # not allowed in Python, Jedi will maybe not show any completions.
+ # The value and key can both be null. There, the defaults apply.
+ # args / kwargs will just be empty arrays / dicts, respectively.
+ # Wrong value count is just ignored. If you try to test cases that are
+ # not allowed in Python, Jedi will maybe not show any completions.
key, argument = next(var_arg_iterator, (None, None))
- while key is not None:
- keys_only = True
- try:
+ while key is not None:
+ keys_only = True
+ try:
key_param = param_dict[key]
- except KeyError:
+ except KeyError:
non_matching_keys[key] = argument
- else:
+ else:
if key in keys_used:
had_multiple_value_error = True
m = ("TypeError: %s() got multiple values for keyword argument '%s'."
@@ -79,15 +79,15 @@ def get_executed_params(execution_context, var_args):
else:
keys_used[key] = ExecutedParam(execution_context, key_param, argument)
key, argument = next(var_arg_iterator, (None, None))
-
+
try:
result_params.append(keys_used[param.name.value])
continue
except KeyError:
pass
-
+
if param.star_count == 1:
- # *args param
+ # *args param
lazy_context_list = []
if argument is not None:
lazy_context_list.append(argument)
@@ -100,12 +100,12 @@ def get_executed_params(execution_context, var_args):
seq = iterable.FakeSequence(execution_context.evaluator, u'tuple', lazy_context_list)
result_arg = LazyKnownContext(seq)
elif param.star_count == 2:
- # **kwargs param
+ # **kwargs param
dct = iterable.FakeDict(execution_context.evaluator, dict(non_matching_keys))
result_arg = LazyKnownContext(dct)
- non_matching_keys = {}
- else:
- # normal param
+ non_matching_keys = {}
+ else:
+ # normal param
if argument is None:
# No value: Return an empty container
if param.default is None:
@@ -117,30 +117,30 @@ def get_executed_params(execution_context, var_args):
node, message=m)
else:
result_arg = LazyTreeContext(parent_context, param.default)
- else:
+ else:
result_arg = argument
-
+
result_params.append(ExecutedParam(execution_context, param, result_arg))
if not isinstance(result_arg, LazyUnknownContext):
keys_used[param.name.value] = result_params[-1]
-
- if keys_only:
- # All arguments should be handed over to the next function. It's not
- # about the values inside, it's about the names. Jedi needs to now that
- # there's nothing to find for certain names.
- for k in set(param_dict) - set(keys_used):
- param = param_dict[k]
-
+
+ if keys_only:
+ # All arguments should be handed over to the next function. It's not
+ # about the values inside, it's about the names. Jedi needs to now that
+ # there's nothing to find for certain names.
+ for k in set(param_dict) - set(keys_used):
+ param = param_dict[k]
+
if not (non_matching_keys or had_multiple_value_error or
param.star_count or param.default):
- # add a warning only if there's not another one.
+ # add a warning only if there's not another one.
for node in var_args.get_calling_nodes():
m = _error_argument_count(funcdef, len(unpacked_va))
analysis.add(parent_context, 'type-error-too-few-arguments',
node, message=m)
-
+
for key, lazy_context in non_matching_keys.items():
- m = "TypeError: %s() got an unexpected keyword argument '%s'." \
+ m = "TypeError: %s() got an unexpected keyword argument '%s'." \
% (funcdef.name, key)
_add_argument_issue(
parent_context,
@@ -148,31 +148,31 @@ def get_executed_params(execution_context, var_args):
lazy_context,
message=m
)
-
+
remaining_arguments = list(var_arg_iterator)
if remaining_arguments:
m = _error_argument_count(funcdef, len(unpacked_va))
- # Just report an error for the first param that is not needed (like
- # cPython).
+ # Just report an error for the first param that is not needed (like
+ # cPython).
first_key, lazy_context = remaining_arguments[0]
if var_args.get_calling_nodes():
# There might not be a valid calling node so check for that first.
_add_argument_issue(parent_context, 'type-error-too-many-arguments', lazy_context, message=m)
return result_params
-
-
+
+
def _error_argument_count(funcdef, actual_count):
params = funcdef.get_params()
default_arguments = sum(1 for p in params if p.default or p.star_count)
if default_arguments == 0:
before = 'exactly '
- else:
+ else:
before = 'from %s to ' % (len(params) - default_arguments)
return ('TypeError: %s() takes %s%s arguments (%s given).'
% (funcdef.name, before, len(params), actual_count))
-
-
+
+
def _create_default_param(execution_context, param):
if param.star_count == 1:
result_arg = LazyKnownContext(
@@ -184,11 +184,11 @@ def _create_default_param(execution_context, param):
)
elif param.default is None:
result_arg = LazyUnknownContext()
- else:
+ else:
result_arg = LazyTreeContext(execution_context.parent_context, param.default)
return ExecutedParam(execution_context, param, result_arg)
-
-
+
+
def create_default_params(execution_context, funcdef):
return [_create_default_param(execution_context, p)
for p in funcdef.get_params()]
diff --git a/contrib/python/jedi/jedi/evaluate/recursion.py b/contrib/python/jedi/jedi/evaluate/recursion.py
index 0223c47291..1f4f6384e9 100644
--- a/contrib/python/jedi/jedi/evaluate/recursion.py
+++ b/contrib/python/jedi/jedi/evaluate/recursion.py
@@ -1,37 +1,37 @@
-"""
-Recursions are the recipe of |jedi| to conquer Python code. However, someone
-must stop recursions going mad. Some settings are here to make |jedi| stop at
-the right time. You can read more about them :ref:`here <settings-recursion>`.
-
-Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not
-thread-safe. Why? ``execution_recursion_decorator`` uses class variables to
-count the function calls.
-
+"""
+Recursions are the recipe of |jedi| to conquer Python code. However, someone
+must stop recursions going mad. Some settings are here to make |jedi| stop at
+the right time. You can read more about them :ref:`here <settings-recursion>`.
+
+Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not
+thread-safe. Why? ``execution_recursion_decorator`` uses class variables to
+count the function calls.
+
.. _settings-recursion:
-
+
Settings
~~~~~~~~~~
-
+
Recursion settings are important if you don't want extremly
recursive python code to go absolutely crazy.
-
+
The default values are based on experiments while completing the |jedi| library
itself (inception!). But I don't think there's any other Python library that
uses recursion in a similarly extreme way. Completion should also be fast and
therefore the quality might not always be maximal.
-
+
.. autodata:: recursion_limit
.. autodata:: total_function_execution_limit
.. autodata:: per_function_execution_limit
.. autodata:: per_function_recursion_limit
"""
-
+
from contextlib import contextmanager
-
+
from jedi import debug
from jedi.evaluate.base_context import NO_CONTEXTS
-
-
+
+
recursion_limit = 15
"""
Like ``sys.getrecursionlimit()``, just for |jedi|.
@@ -48,13 +48,13 @@ per_function_recursion_limit = 2
"""
A function may not be executed more than this number of times recursively.
"""
-
-
+
+
class RecursionDetector(object):
def __init__(self):
self.pushed_nodes = []
-
-
+
+
@contextmanager
def execution_allowed(evaluator, node):
"""
@@ -62,7 +62,7 @@ def execution_allowed(evaluator, node):
at the same place, in the same module may not be executed two times.
"""
pushed_nodes = evaluator.recursion_detector.pushed_nodes
-
+
if node in pushed_nodes:
debug.warning('catched stmt recursion: %s @%s', node,
getattr(node, 'start_pos', None))
@@ -73,8 +73,8 @@ def execution_allowed(evaluator, node):
yield True
finally:
pushed_nodes.pop()
-
-
+
+
def execution_recursion_decorator(default=NO_CONTEXTS):
def decorator(func):
def wrapper(self, **kwargs):
@@ -90,43 +90,43 @@ def execution_recursion_decorator(default=NO_CONTEXTS):
return result
return wrapper
return decorator
-
-
-class ExecutionRecursionDetector(object):
- """
- Catches recursions of executions.
- """
+
+
+class ExecutionRecursionDetector(object):
+ """
+ Catches recursions of executions.
+ """
def __init__(self, evaluator):
self._evaluator = evaluator
-
+
self._recursion_level = 0
self._parent_execution_funcs = []
self._funcdef_execution_counts = {}
self._execution_count = 0
-
+
def pop_execution(self):
self._parent_execution_funcs.pop()
self._recursion_level -= 1
-
+
def push_execution(self, execution):
funcdef = execution.tree_node
-
+
# These two will be undone in pop_execution.
self._recursion_level += 1
self._parent_execution_funcs.append(funcdef)
-
+
module = execution.get_root_context()
if module == self._evaluator.builtins_module:
# We have control over builtins so we know they are not recursing
# like crazy. Therefore we just let them execute always, because
# they usually just help a lot with getting good results.
- return False
-
+ return False
+
if self._recursion_level > recursion_limit:
- return True
+ return True
if self._execution_count >= total_function_execution_limit:
- return True
+ return True
self._execution_count += 1
if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit:
@@ -135,4 +135,4 @@ class ExecutionRecursionDetector(object):
if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit:
return True
- return False
+ return False
diff --git a/contrib/python/jedi/jedi/evaluate/stdlib.py b/contrib/python/jedi/jedi/evaluate/stdlib.py
index 5303318235..52c223838b 100644
--- a/contrib/python/jedi/jedi/evaluate/stdlib.py
+++ b/contrib/python/jedi/jedi/evaluate/stdlib.py
@@ -1,21 +1,21 @@
-"""
-Implementations of standard library functions, because it's not possible to
-understand them with Jedi.
-
-To add a new implementation, create a function and add it to the
-``_implemented`` dict at the bottom of this module.
-
+"""
+Implementations of standard library functions, because it's not possible to
+understand them with Jedi.
+
+To add a new implementation, create a function and add it to the
+``_implemented`` dict at the bottom of this module.
+
Note that this module exists only to implement very specific functionality in
the standard library. The usual way to understand the standard library is the
compiled module that returns the types for C-builtins.
-"""
+"""
import parso
-
+
from jedi._compatibility import force_unicode
from jedi import debug
from jedi.evaluate.arguments import ValuesArguments, repack_with_argument_clinic
from jedi.evaluate import analysis
-from jedi.evaluate import compiled
+from jedi.evaluate import compiled
from jedi.evaluate.context.instance import \
AbstractInstanceContext, CompiledInstance, BoundMethod, InstanceArguments
from jedi.evaluate.base_context import ContextualizedNode, \
@@ -24,7 +24,7 @@ from jedi.evaluate.context import ClassContext, ModuleContext, FunctionExecution
from jedi.evaluate.context import iterable
from jedi.evaluate.lazy_context import LazyTreeContext
from jedi.evaluate.syntax_tree import is_string
-
+
# Now this is all part of fake tuples in Jedi. However super doesn't work on
# __init__ and __new__ doesn't work at all. So adding this to nametuples is
# just the easiest way.
@@ -32,7 +32,7 @@ _NAMEDTUPLE_INIT = """
def __init__(_cls, {arg_list}):
'A helper function for namedtuple.'
self.__iterable = ({arg_list})
-
+
def __iter__(self):
for i in self.__iterable:
yield i
@@ -43,51 +43,51 @@ _NAMEDTUPLE_INIT = """
"""
-class NotInStdLib(LookupError):
- pass
-
-
+class NotInStdLib(LookupError):
+ pass
+
+
def execute(evaluator, obj, arguments):
if isinstance(obj, BoundMethod):
raise NotInStdLib()
- try:
+ try:
obj_name = obj.name.string_name
- except AttributeError:
- pass
- else:
+ except AttributeError:
+ pass
+ else:
if obj.parent_context == evaluator.builtins_module:
- module_name = 'builtins'
+ module_name = 'builtins'
elif isinstance(obj.parent_context, ModuleContext):
module_name = obj.parent_context.name.string_name
- else:
- module_name = ''
-
- # for now we just support builtin functions.
- try:
+ else:
+ module_name = ''
+
+ # for now we just support builtin functions.
+ try:
func = _implemented[module_name][obj_name]
- except KeyError:
- pass
+ except KeyError:
+ pass
else:
return func(evaluator, obj, arguments=arguments)
- raise NotInStdLib()
-
-
+ raise NotInStdLib()
+
+
def _follow_param(evaluator, arguments, index):
- try:
+ try:
key, lazy_context = list(arguments.unpack())[index]
- except IndexError:
+ except IndexError:
return NO_CONTEXTS
- else:
+ else:
return lazy_context.infer()
-
-
+
+
def argument_clinic(string, want_obj=False, want_context=False, want_arguments=False):
- """
- Works like Argument Clinic (PEP 436), to validate function params.
- """
-
- def f(func):
+ """
+ Works like Argument Clinic (PEP 436), to validate function params.
+ """
+
+ def f(func):
@repack_with_argument_clinic(string, keep_arguments_param=True)
def wrapper(evaluator, obj, *args, **kwargs):
arguments = kwargs.pop('arguments')
@@ -103,11 +103,11 @@ def argument_clinic(string, want_obj=False, want_context=False, want_arguments=F
result = func(evaluator, *args, **kwargs)
debug.dbg('builtin end: %s', result, color='MAGENTA')
return result
-
- return wrapper
- return f
-
-
+
+ return wrapper
+ return f
+
+
@argument_clinic('iterator[, default], /')
def builtins_next(evaluator, iterators, defaults):
"""
@@ -132,45 +132,45 @@ def builtins_next(evaluator, iterators, defaults):
return defaults
-@argument_clinic('object, name[, default], /')
-def builtins_getattr(evaluator, objects, names, defaults=None):
- # follow the first param
- for obj in objects:
- for name in names:
+@argument_clinic('object, name[, default], /')
+def builtins_getattr(evaluator, objects, names, defaults=None):
+ # follow the first param
+ for obj in objects:
+ for name in names:
if is_string(name):
return obj.py__getattribute__(force_unicode(name.get_safe_value()))
- else:
- debug.warning('getattr called without str')
- continue
+ else:
+ debug.warning('getattr called without str')
+ continue
return NO_CONTEXTS
-
-
-@argument_clinic('object[, bases, dict], /')
-def builtins_type(evaluator, objects, bases, dicts):
- if bases or dicts:
+
+
+@argument_clinic('object[, bases, dict], /')
+def builtins_type(evaluator, objects, bases, dicts):
+ if bases or dicts:
# It's a type creation... maybe someday...
return NO_CONTEXTS
- else:
+ else:
return objects.py__class__()
-
-
+
+
class SuperInstance(AbstractInstanceContext):
- """To be used like the object ``super`` returns."""
- def __init__(self, evaluator, cls):
- su = cls.py_mro()[1]
- super().__init__(evaluator, su and su[0] or self)
-
-
+ """To be used like the object ``super`` returns."""
+ def __init__(self, evaluator, cls):
+ su = cls.py_mro()[1]
+ super().__init__(evaluator, su and su[0] or self)
+
+
@argument_clinic('[type[, obj]], /', want_context=True)
def builtins_super(evaluator, types, objects, context):
- # TODO make this able to detect multiple inheritance super
+ # TODO make this able to detect multiple inheritance super
if isinstance(context, FunctionExecutionContext):
if isinstance(context.var_args, InstanceArguments):
su = context.var_args.instance.py__class__().py__bases__()
return su[0].infer().execute_evaluated()
-
+
return NO_CONTEXTS
-
+
@argument_clinic('sequence, /', want_obj=True, want_arguments=True)
def builtins_reversed(evaluator, sequences, obj, arguments):
@@ -185,42 +185,42 @@ def builtins_reversed(evaluator, sequences, obj, arguments):
ordered = list(sequences.iterate(cn))
rev = list(reversed(ordered))
- # Repack iterator values and then run it the normal way. This is
- # necessary, because `reversed` is a function and autocompletion
- # would fail in certain cases like `reversed(x).__iter__` if we
- # just returned the result directly.
+ # Repack iterator values and then run it the normal way. This is
+ # necessary, because `reversed` is a function and autocompletion
+ # would fail in certain cases like `reversed(x).__iter__` if we
+ # just returned the result directly.
seq = iterable.FakeSequence(evaluator, u'list', rev)
arguments = ValuesArguments([ContextSet(seq)])
return ContextSet(CompiledInstance(evaluator, evaluator.builtins_module, obj, arguments))
-
-
+
+
@argument_clinic('obj, type, /', want_arguments=True)
def builtins_isinstance(evaluator, objects, types, arguments):
bool_results = set()
- for o in objects:
+ for o in objects:
cls = o.py__class__()
- try:
+ try:
mro_func = cls.py__mro__
- except AttributeError:
- # This is temporary. Everything should have a class attribute in
- # Python?! Maybe we'll leave it here, because some numpy objects or
- # whatever might not.
+ except AttributeError:
+ # This is temporary. Everything should have a class attribute in
+ # Python?! Maybe we'll leave it here, because some numpy objects or
+ # whatever might not.
bool_results = set([True, False])
break
-
+
mro = mro_func()
-
- for cls_or_tup in types:
- if cls_or_tup.is_class():
- bool_results.add(cls_or_tup in mro)
+
+ for cls_or_tup in types:
+ if cls_or_tup.is_class():
+ bool_results.add(cls_or_tup in mro)
elif cls_or_tup.name.string_name == 'tuple' \
and cls_or_tup.get_root_context() == evaluator.builtins_module:
- # Check for tuples.
+ # Check for tuples.
classes = ContextSet.from_sets(
lazy_context.infer()
for lazy_context in cls_or_tup.iterate()
)
- bool_results.add(any(cls in mro for cls in classes))
+ bool_results.add(any(cls in mro for cls in classes))
else:
_, lazy_context = list(arguments.unpack())[1]
if isinstance(lazy_context, LazyTreeContext):
@@ -229,34 +229,34 @@ def builtins_isinstance(evaluator, objects, types, arguments):
'class, type, or tuple of classes and types, ' \
'not %s.' % cls_or_tup
analysis.add(lazy_context._context, 'type-error-isinstance', node, message)
-
+
return ContextSet.from_iterable(
compiled.builtin_from_name(evaluator, force_unicode(str(b)))
for b in bool_results
)
-
-
+
+
def collections_namedtuple(evaluator, obj, arguments):
- """
- Implementation of the namedtuple function.
-
- This has to be done by processing the namedtuple class template and
- evaluating the result.
-
- """
+ """
+ Implementation of the namedtuple function.
+
+ This has to be done by processing the namedtuple class template and
+ evaluating the result.
+
+ """
collections_context = obj.parent_context
_class_template_set = collections_context.py__getattribute__(u'_class_template')
if not _class_template_set:
# Namedtuples are not supported on Python 2.6, early 2.7, because the
# _class_template variable is not defined, there.
return NO_CONTEXTS
-
- # Process arguments
+
+ # Process arguments
# TODO here we only use one of the types, we should use all.
# TODO this is buggy, doesn't need to be a string
name = list(_follow_param(evaluator, arguments, 0))[0].get_safe_value()
_fields = list(_follow_param(evaluator, arguments, 1))[0]
- if isinstance(_fields, compiled.CompiledObject):
+ if isinstance(_fields, compiled.CompiledObject):
fields = _fields.get_safe_value().replace(',', ' ').split()
elif isinstance(_fields, iterable.Sequence):
fields = [
@@ -264,9 +264,9 @@ def collections_namedtuple(evaluator, obj, arguments):
for lazy_context in _fields.py__iter__()
for v in lazy_context.infer() if is_string(v)
]
- else:
+ else:
return NO_CONTEXTS
-
+
def get_var(name):
x, = collections_context.py__getattribute__(name)
return x.get_safe_value()
@@ -275,15 +275,15 @@ def collections_namedtuple(evaluator, obj, arguments):
base += _NAMEDTUPLE_INIT
# Build source code
code = base.format(
- typename=name,
+ typename=name,
field_names=tuple(fields),
- num_fields=len(fields),
+ num_fields=len(fields),
arg_list=repr(tuple(fields)).replace("u'", "").replace("'", "")[1:-1],
repr_fmt=', '.join(get_var(u'_repr_template').format(name=name) for name in fields),
field_defs='\n'.join(get_var(u'_field_template').format(index=index, name=name)
- for index, name in enumerate(fields))
- )
-
+ for index, name in enumerate(fields))
+ )
+
# Parse source code
module = evaluator.grammar.parse(code)
generated_class = next(module.iter_classdefs())
@@ -292,30 +292,30 @@ def collections_namedtuple(evaluator, obj, arguments):
code_lines=parso.split_lines(code, keepends=True),
)
return ContextSet(ClassContext(evaluator, parent_context, generated_class))
-
-
-@argument_clinic('first, /')
-def _return_first_param(evaluator, firsts):
- return firsts
-
-
-_implemented = {
- 'builtins': {
- 'getattr': builtins_getattr,
- 'type': builtins_type,
- 'super': builtins_super,
- 'reversed': builtins_reversed,
- 'isinstance': builtins_isinstance,
- },
- 'copy': {
- 'copy': _return_first_param,
- 'deepcopy': _return_first_param,
- },
- 'json': {
+
+
+@argument_clinic('first, /')
+def _return_first_param(evaluator, firsts):
+ return firsts
+
+
+_implemented = {
+ 'builtins': {
+ 'getattr': builtins_getattr,
+ 'type': builtins_type,
+ 'super': builtins_super,
+ 'reversed': builtins_reversed,
+ 'isinstance': builtins_isinstance,
+ },
+ 'copy': {
+ 'copy': _return_first_param,
+ 'deepcopy': _return_first_param,
+ },
+ 'json': {
'load': lambda evaluator, obj, arguments: NO_CONTEXTS,
'loads': lambda evaluator, obj, arguments: NO_CONTEXTS,
- },
- 'collections': {
- 'namedtuple': collections_namedtuple,
- },
-}
+ },
+ 'collections': {
+ 'namedtuple': collections_namedtuple,
+ },
+}
diff --git a/contrib/python/jedi/jedi/evaluate/sys_path.py b/contrib/python/jedi/jedi/evaluate/sys_path.py
index a43d2ba177..8fb1843f05 100644
--- a/contrib/python/jedi/jedi/evaluate/sys_path.py
+++ b/contrib/python/jedi/jedi/evaluate/sys_path.py
@@ -1,5 +1,5 @@
-import os
-
+import os
+
from jedi._compatibility import unicode, force_unicode, all_suffixes
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.base_context import ContextualizedNode
@@ -7,58 +7,58 @@ from jedi.evaluate.helpers import is_string
from jedi.common.utils import traverse_parents
from jedi.parser_utils import get_cached_code_lines
from jedi import settings
-from jedi import debug
-
-
+from jedi import debug
+
+
def _abs_path(module_context, path):
if os.path.isabs(path):
return path
-
+
module_path = module_context.py__file__()
if module_path is None:
# In this case we have no idea where we actually are in the file
# system.
return None
-
+
base_dir = os.path.dirname(module_path)
path = force_unicode(path)
return os.path.abspath(os.path.join(base_dir, path))
-
-
+
+
def _paths_from_assignment(module_context, expr_stmt):
- """
- Extracts the assigned strings from an assignment that looks as follows::
-
+ """
+ Extracts the assigned strings from an assignment that looks as follows::
+
sys.path[0:0] = ['module/path', 'another/module/path']
-
- This function is in general pretty tolerant (and therefore 'buggy').
- However, it's not a big issue usually to add more paths to Jedi's sys_path,
- because it will only affect Jedi in very random situations and by adding
- more paths than necessary, it usually benefits the general user.
- """
- for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]):
- try:
- assert operator in ['=', '+=']
+
+ This function is in general pretty tolerant (and therefore 'buggy').
+ However, it's not a big issue usually to add more paths to Jedi's sys_path,
+ because it will only affect Jedi in very random situations and by adding
+ more paths than necessary, it usually benefits the general user.
+ """
+ for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]):
+ try:
+ assert operator in ['=', '+=']
assert assignee.type in ('power', 'atom_expr') and \
len(assignee.children) > 1
- c = assignee.children
- assert c[0].type == 'name' and c[0].value == 'sys'
- trailer = c[1]
- assert trailer.children[0] == '.' and trailer.children[1].value == 'path'
- # TODO Essentially we're not checking details on sys.path
- # manipulation. Both assigment of the sys.path and changing/adding
+ c = assignee.children
+ assert c[0].type == 'name' and c[0].value == 'sys'
+ trailer = c[1]
+ assert trailer.children[0] == '.' and trailer.children[1].value == 'path'
+ # TODO Essentially we're not checking details on sys.path
+ # manipulation. Both assigment of the sys.path and changing/adding
# parts of the sys.path are the same: They get added to the end of
# the current sys.path.
- """
- execution = c[2]
- assert execution.children[0] == '['
- subscript = execution.children[1]
- assert subscript.type == 'subscript'
- assert ':' in subscript.children
- """
- except AssertionError:
- continue
-
+ """
+ execution = c[2]
+ assert execution.children[0] == '['
+ subscript = execution.children[1]
+ assert subscript.type == 'subscript'
+ assert ':' in subscript.children
+ """
+ except AssertionError:
+ continue
+
cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt)
for lazy_context in cn.infer().iterate(cn):
for context in lazy_context.infer():
@@ -66,59 +66,59 @@ def _paths_from_assignment(module_context, expr_stmt):
abs_path = _abs_path(module_context, context.get_safe_value())
if abs_path is not None:
yield abs_path
-
-
+
+
def _paths_from_list_modifications(module_context, trailer1, trailer2):
- """ extract the path from either "sys.path.append" or "sys.path.insert" """
- # Guarantee that both are trailers, the first one a name and the second one
- # a function execution with at least one param.
+ """ extract the path from either "sys.path.append" or "sys.path.insert" """
+ # Guarantee that both are trailers, the first one a name and the second one
+ # a function execution with at least one param.
if not (trailer1.type == 'trailer' and trailer1.children[0] == '.'
and trailer2.type == 'trailer' and trailer2.children[0] == '('
- and len(trailer2.children) == 3):
+ and len(trailer2.children) == 3):
return
-
- name = trailer1.children[1].value
- if name not in ['insert', 'append']:
+
+ name = trailer1.children[1].value
+ if name not in ['insert', 'append']:
return
- arg = trailer2.children[1]
- if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma.
- arg = arg.children[2]
-
+ arg = trailer2.children[1]
+ if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma.
+ arg = arg.children[2]
+
for context in module_context.create_context(arg).eval_node(arg):
if is_string(context):
abs_path = _abs_path(module_context, context.get_safe_value())
if abs_path is not None:
yield abs_path
-
+
@evaluator_method_cache(default=[])
def check_sys_path_modifications(module_context):
"""
Detect sys.path modifications within module.
"""
- def get_sys_path_powers(names):
- for name in names:
- power = name.parent.parent
+ def get_sys_path_powers(names):
+ for name in names:
+ power = name.parent.parent
if power.type in ('power', 'atom_expr'):
- c = power.children
+ c = power.children
if c[0].type == 'name' and c[0].value == 'sys' \
and c[1].type == 'trailer':
- n = c[1].children[1]
+ n = c[1].children[1]
if n.type == 'name' and n.value == 'path':
- yield name, power
-
+ yield name, power
+
if module_context.tree_node is None:
return []
added = []
- try:
+ try:
possible_names = module_context.tree_node.get_used_names()['path']
- except KeyError:
- pass
- else:
- for name, power in get_sys_path_powers(possible_names):
+ except KeyError:
+ pass
+ else:
+ for name, power in get_sys_path_powers(possible_names):
expr_stmt = power.parent
- if len(power.children) >= 4:
+ if len(power.children) >= 4:
added.extend(
_paths_from_list_modifications(
module_context, *power.children[2:4]
@@ -127,18 +127,18 @@ def check_sys_path_modifications(module_context):
elif expr_stmt is not None and expr_stmt.type == 'expr_stmt':
added.extend(_paths_from_assignment(module_context, expr_stmt))
return added
-
-
+
+
def discover_buildout_paths(evaluator, script_path):
- buildout_script_paths = set()
-
+ buildout_script_paths = set()
+
for buildout_script_path in _get_buildout_script_paths(script_path):
for path in _get_paths_from_buildout_script(evaluator, buildout_script_path):
- buildout_script_paths.add(path)
-
+ buildout_script_paths.add(path)
+
return buildout_script_paths
-
-
+
+
def _get_paths_from_buildout_script(evaluator, buildout_script_path):
try:
module_node = evaluator.parse(
@@ -148,52 +148,52 @@ def _get_paths_from_buildout_script(evaluator, buildout_script_path):
)
except IOError:
debug.warning('Error trying to read buildout_script: %s', buildout_script_path)
- return
-
+ return
+
from jedi.evaluate.context import ModuleContext
module = ModuleContext(
evaluator, module_node, buildout_script_path,
code_lines=get_cached_code_lines(evaluator.grammar, buildout_script_path),
)
for path in check_sys_path_modifications(module):
- yield path
-
-
-def _get_parent_dir_with_file(path, filename):
- for parent in traverse_parents(path):
- if os.path.isfile(os.path.join(parent, filename)):
- return parent
- return None
-
-
+ yield path
+
+
+def _get_parent_dir_with_file(path, filename):
+ for parent in traverse_parents(path):
+ if os.path.isfile(os.path.join(parent, filename)):
+ return parent
+ return None
+
+
def _get_buildout_script_paths(search_path):
- """
- if there is a 'buildout.cfg' file in one of the parent directories of the
- given module it will return a list of all files in the buildout bin
- directory that look like python files.
-
+ """
+ if there is a 'buildout.cfg' file in one of the parent directories of the
+ given module it will return a list of all files in the buildout bin
+ directory that look like python files.
+
:param search_path: absolute path to the module.
:type search_path: str
- """
+ """
project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg')
- if not project_root:
+ if not project_root:
return
- bin_path = os.path.join(project_root, 'bin')
- if not os.path.exists(bin_path):
+ bin_path = os.path.join(project_root, 'bin')
+ if not os.path.exists(bin_path):
return
- for filename in os.listdir(bin_path):
- try:
- filepath = os.path.join(bin_path, filename)
- with open(filepath, 'r') as f:
- firstline = f.readline()
- if firstline.startswith('#!') and 'python' in firstline:
+ for filename in os.listdir(bin_path):
+ try:
+ filepath = os.path.join(bin_path, filename)
+ with open(filepath, 'r') as f:
+ firstline = f.readline()
+ if firstline.startswith('#!') and 'python' in firstline:
yield filepath
except (UnicodeDecodeError, IOError) as e:
# Probably a binary file; permission error or race cond. because
# file got deleted. Ignore it.
- debug.warning(unicode(e))
- continue
+ debug.warning(unicode(e))
+ continue
def dotted_path_in_sys_path(sys_path, module_path):
diff --git a/contrib/python/jedi/jedi/refactoring.py b/contrib/python/jedi/jedi/refactoring.py
index d887a16d08..6c1d74d1bd 100644
--- a/contrib/python/jedi/jedi/refactoring.py
+++ b/contrib/python/jedi/jedi/refactoring.py
@@ -1,172 +1,172 @@
-"""
+"""
THIS is not in active development, please check
https://github.com/davidhalter/jedi/issues/667 first before editing.
-Introduce some basic refactoring functions to |jedi|. This module is still in a
-very early development stage and needs much testing and improvement.
-
-.. warning:: I won't do too much here, but if anyone wants to step in, please
- do. Refactoring is none of my priorities
-
+Introduce some basic refactoring functions to |jedi|. This module is still in a
+very early development stage and needs much testing and improvement.
+
+.. warning:: I won't do too much here, but if anyone wants to step in, please
+ do. Refactoring is none of my priorities
+
It uses the |jedi| `API <api.html>`_ and supports currently the
-following functions (sometimes bug-prone):
-
-- rename
-- extract variable
-- inline variable
-"""
-import difflib
-
+following functions (sometimes bug-prone):
+
+- rename
+- extract variable
+- inline variable
+"""
+import difflib
+
from parso import python_bytes_to_unicode, split_lines
-from jedi.evaluate import helpers
-
-
-class Refactoring(object):
- def __init__(self, change_dct):
- """
- :param change_dct: dict(old_path=(new_path, old_lines, new_lines))
- """
- self.change_dct = change_dct
-
- def old_files(self):
- dct = {}
- for old_path, (new_path, old_l, new_l) in self.change_dct.items():
+from jedi.evaluate import helpers
+
+
+class Refactoring(object):
+ def __init__(self, change_dct):
+ """
+ :param change_dct: dict(old_path=(new_path, old_lines, new_lines))
+ """
+ self.change_dct = change_dct
+
+ def old_files(self):
+ dct = {}
+ for old_path, (new_path, old_l, new_l) in self.change_dct.items():
dct[old_path] = '\n'.join(old_l)
- return dct
-
- def new_files(self):
- dct = {}
- for old_path, (new_path, old_l, new_l) in self.change_dct.items():
- dct[new_path] = '\n'.join(new_l)
- return dct
-
- def diff(self):
- texts = []
- for old_path, (new_path, old_l, new_l) in self.change_dct.items():
- if old_path:
- udiff = difflib.unified_diff(old_l, new_l)
- else:
- udiff = difflib.unified_diff(old_l, new_l, old_path, new_path)
- texts.append('\n'.join(udiff))
- return '\n'.join(texts)
-
-
-def rename(script, new_name):
- """ The `args` / `kwargs` params are the same as in `api.Script`.
+ return dct
+
+ def new_files(self):
+ dct = {}
+ for old_path, (new_path, old_l, new_l) in self.change_dct.items():
+ dct[new_path] = '\n'.join(new_l)
+ return dct
+
+ def diff(self):
+ texts = []
+ for old_path, (new_path, old_l, new_l) in self.change_dct.items():
+ if old_path:
+ udiff = difflib.unified_diff(old_l, new_l)
+ else:
+ udiff = difflib.unified_diff(old_l, new_l, old_path, new_path)
+ texts.append('\n'.join(udiff))
+ return '\n'.join(texts)
+
+
+def rename(script, new_name):
+ """ The `args` / `kwargs` params are the same as in `api.Script`.
:param new_name: The new name of the script.
:param script: The source Script object.
- :return: list of changed lines/changed files
- """
- return Refactoring(_rename(script.usages(), new_name))
-
-
-def _rename(names, replace_str):
- """ For both rename and inline. """
- order = sorted(names, key=lambda x: (x.module_path, x.line, x.column),
- reverse=True)
-
- def process(path, old_lines, new_lines):
- if new_lines is not None: # goto next file, save last
- dct[path] = path, old_lines, new_lines
-
- dct = {}
- current_path = object()
- new_lines = old_lines = None
- for name in order:
- if name.in_builtin_module():
- continue
- if current_path != name.module_path:
- current_path = name.module_path
-
- process(current_path, old_lines, new_lines)
- if current_path is not None:
- # None means take the source that is a normal param.
- with open(current_path) as f:
- source = f.read()
-
+ :return: list of changed lines/changed files
+ """
+ return Refactoring(_rename(script.usages(), new_name))
+
+
+def _rename(names, replace_str):
+ """ For both rename and inline. """
+ order = sorted(names, key=lambda x: (x.module_path, x.line, x.column),
+ reverse=True)
+
+ def process(path, old_lines, new_lines):
+ if new_lines is not None: # goto next file, save last
+ dct[path] = path, old_lines, new_lines
+
+ dct = {}
+ current_path = object()
+ new_lines = old_lines = None
+ for name in order:
+ if name.in_builtin_module():
+ continue
+ if current_path != name.module_path:
+ current_path = name.module_path
+
+ process(current_path, old_lines, new_lines)
+ if current_path is not None:
+ # None means take the source that is a normal param.
+ with open(current_path) as f:
+ source = f.read()
+
new_lines = split_lines(python_bytes_to_unicode(source))
- old_lines = new_lines[:]
-
- nr, indent = name.line, name.column
- line = new_lines[nr - 1]
- new_lines[nr - 1] = line[:indent] + replace_str + \
- line[indent + len(name.name):]
- process(current_path, old_lines, new_lines)
- return dct
-
-
-def extract(script, new_name):
- """ The `args` / `kwargs` params are the same as in `api.Script`.
- :param operation: The refactoring operation to execute.
- :type operation: str
- :type source: str
- :return: list of changed lines/changed files
- """
+ old_lines = new_lines[:]
+
+ nr, indent = name.line, name.column
+ line = new_lines[nr - 1]
+ new_lines[nr - 1] = line[:indent] + replace_str + \
+ line[indent + len(name.name):]
+ process(current_path, old_lines, new_lines)
+ return dct
+
+
+def extract(script, new_name):
+ """ The `args` / `kwargs` params are the same as in `api.Script`.
+ :param operation: The refactoring operation to execute.
+ :type operation: str
+ :type source: str
+ :return: list of changed lines/changed files
+ """
new_lines = split_lines(python_bytes_to_unicode(script.source))
- old_lines = new_lines[:]
-
- user_stmt = script._parser.user_stmt()
-
+ old_lines = new_lines[:]
+
+ user_stmt = script._parser.user_stmt()
+
# TODO care for multi-line extracts
- dct = {}
- if user_stmt:
- pos = script._pos
- line_index = pos[0] - 1
+ dct = {}
+ if user_stmt:
+ pos = script._pos
+ line_index = pos[0] - 1
# Be careful here. 'array_for_pos' does not exist in 'helpers'.
- arr, index = helpers.array_for_pos(user_stmt, pos)
- if arr is not None:
- start_pos = arr[index].start_pos
- end_pos = arr[index].end_pos
-
- # take full line if the start line is different from end line
- e = end_pos[1] if end_pos[0] == start_pos[0] else None
- start_line = new_lines[start_pos[0] - 1]
- text = start_line[start_pos[1]:e]
- for l in range(start_pos[0], end_pos[0] - 1):
+ arr, index = helpers.array_for_pos(user_stmt, pos)
+ if arr is not None:
+ start_pos = arr[index].start_pos
+ end_pos = arr[index].end_pos
+
+ # take full line if the start line is different from end line
+ e = end_pos[1] if end_pos[0] == start_pos[0] else None
+ start_line = new_lines[start_pos[0] - 1]
+ text = start_line[start_pos[1]:e]
+ for l in range(start_pos[0], end_pos[0] - 1):
text += '\n' + str(l)
- if e is None:
- end_line = new_lines[end_pos[0] - 1]
- text += '\n' + end_line[:end_pos[1]]
-
- # remove code from new lines
- t = text.lstrip()
- del_start = start_pos[1] + len(text) - len(t)
-
- text = t.rstrip()
- del_end = len(t) - len(text)
- if e is None:
- new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:]
- e = len(start_line)
- else:
- e = e - del_end
- start_line = start_line[:del_start] + new_name + start_line[e:]
- new_lines[start_pos[0] - 1] = start_line
- new_lines[start_pos[0]:end_pos[0] - 1] = []
-
+ if e is None:
+ end_line = new_lines[end_pos[0] - 1]
+ text += '\n' + end_line[:end_pos[1]]
+
+ # remove code from new lines
+ t = text.lstrip()
+ del_start = start_pos[1] + len(text) - len(t)
+
+ text = t.rstrip()
+ del_end = len(t) - len(text)
+ if e is None:
+ new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:]
+ e = len(start_line)
+ else:
+ e = e - del_end
+ start_line = start_line[:del_start] + new_name + start_line[e:]
+ new_lines[start_pos[0] - 1] = start_line
+ new_lines[start_pos[0]:end_pos[0] - 1] = []
+
# add parentheses in multi-line case
- open_brackets = ['(', '[', '{']
- close_brackets = [')', ']', '}']
- if '\n' in text and not (text[0] in open_brackets and text[-1] ==
- close_brackets[open_brackets.index(text[0])]):
- text = '(%s)' % text
-
- # add new line before statement
- indent = user_stmt.start_pos[1]
- new = "%s%s = %s" % (' ' * indent, new_name, text)
- new_lines.insert(line_index, new)
- dct[script.path] = script.path, old_lines, new_lines
- return Refactoring(dct)
-
-
-def inline(script):
- """
- :type script: api.Script
- """
+ open_brackets = ['(', '[', '{']
+ close_brackets = [')', ']', '}']
+ if '\n' in text and not (text[0] in open_brackets and text[-1] ==
+ close_brackets[open_brackets.index(text[0])]):
+ text = '(%s)' % text
+
+ # add new line before statement
+ indent = user_stmt.start_pos[1]
+ new = "%s%s = %s" % (' ' * indent, new_name, text)
+ new_lines.insert(line_index, new)
+ dct[script.path] = script.path, old_lines, new_lines
+ return Refactoring(dct)
+
+
+def inline(script):
+ """
+ :type script: api.Script
+ """
new_lines = split_lines(python_bytes_to_unicode(script.source))
-
- dct = {}
-
- definitions = script.goto_assignments()
+
+ dct = {}
+
+ definitions = script.goto_assignments()
assert len(definitions) == 1
stmt = definitions[0]._definition
usages = script.usages()
@@ -178,7 +178,7 @@ def inline(script):
# don't allow multi-line refactorings for now.
assert stmt.start_pos[0] == stmt.end_pos[0]
index = stmt.start_pos[0] - 1
-
+
line = new_lines[index]
replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1]
replace_str = replace_str.strip()
@@ -187,11 +187,11 @@ def inline(script):
arr = expression_list[0]
if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1:
replace_str = '(%s)' % replace_str
-
+
# if it's the only assignment, remove the statement
if len(stmt.get_defined_names()) == 1:
line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:]
-
+
dct = _rename(inlines, replace_str)
# remove the empty line
new_lines = dct[script.path][2]
@@ -199,5 +199,5 @@ def inline(script):
new_lines[index] = line
else:
new_lines.pop(index)
-
- return Refactoring(dct)
+
+ return Refactoring(dct)
diff --git a/contrib/python/jedi/jedi/settings.py b/contrib/python/jedi/jedi/settings.py
index 2ee60aaeb3..436fadfc0e 100644
--- a/contrib/python/jedi/jedi/settings.py
+++ b/contrib/python/jedi/jedi/settings.py
@@ -1,164 +1,164 @@
-"""
-This module contains variables with global |jedi| settings. To change the
-behavior of |jedi|, change the variables defined in :mod:`jedi.settings`.
-
-Plugins should expose an interface so that the user can adjust the
-configuration.
-
-
-Example usage::
-
- from jedi import settings
- settings.case_insensitive_completion = True
-
-
-Completion output
-~~~~~~~~~~~~~~~~~
-
-.. autodata:: case_insensitive_completion
-.. autodata:: add_bracket_after_function
-.. autodata:: no_completion_duplicates
-
-
-Filesystem cache
-~~~~~~~~~~~~~~~~
-
-.. autodata:: cache_directory
-.. autodata:: use_filesystem_cache
-
-
-Parser
-~~~~~~
-
-.. autodata:: fast_parser
-
-
-Dynamic stuff
-~~~~~~~~~~~~~
-
-.. autodata:: dynamic_array_additions
-.. autodata:: dynamic_params
-.. autodata:: dynamic_params_for_other_modules
-.. autodata:: additional_dynamic_modules
-.. autodata:: auto_import_modules
-
-
-Caching
-~~~~~~~
-
-.. autodata:: call_signatures_validity
-
-
-"""
-import os
-import platform
-
-# ----------------
-# completion output settings
-# ----------------
-
-case_insensitive_completion = True
-"""
-The completion is by default case insensitive.
-"""
-
-add_bracket_after_function = False
-"""
-Adds an opening bracket after a function, because that's normal behaviour.
-Removed it again, because in VIM that is not very practical.
-"""
-
-no_completion_duplicates = True
-"""
-If set, completions with the same name don't appear in the output anymore,
-but are in the `same_name_completions` attribute.
-"""
-
-# ----------------
-# Filesystem cache
-# ----------------
-
-use_filesystem_cache = True
-"""
-Use filesystem cache to save once parsed files with pickle.
-"""
-
-if platform.system().lower() == 'windows':
- _cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi',
- 'Jedi')
-elif platform.system().lower() == 'darwin':
- _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi')
-else:
- _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache',
- 'jedi')
-cache_directory = os.path.expanduser(_cache_directory)
-"""
+"""
+This module contains variables with global |jedi| settings. To change the
+behavior of |jedi|, change the variables defined in :mod:`jedi.settings`.
+
+Plugins should expose an interface so that the user can adjust the
+configuration.
+
+
+Example usage::
+
+ from jedi import settings
+ settings.case_insensitive_completion = True
+
+
+Completion output
+~~~~~~~~~~~~~~~~~
+
+.. autodata:: case_insensitive_completion
+.. autodata:: add_bracket_after_function
+.. autodata:: no_completion_duplicates
+
+
+Filesystem cache
+~~~~~~~~~~~~~~~~
+
+.. autodata:: cache_directory
+.. autodata:: use_filesystem_cache
+
+
+Parser
+~~~~~~
+
+.. autodata:: fast_parser
+
+
+Dynamic stuff
+~~~~~~~~~~~~~
+
+.. autodata:: dynamic_array_additions
+.. autodata:: dynamic_params
+.. autodata:: dynamic_params_for_other_modules
+.. autodata:: additional_dynamic_modules
+.. autodata:: auto_import_modules
+
+
+Caching
+~~~~~~~
+
+.. autodata:: call_signatures_validity
+
+
+"""
+import os
+import platform
+
+# ----------------
+# completion output settings
+# ----------------
+
+case_insensitive_completion = True
+"""
+The completion is by default case insensitive.
+"""
+
+add_bracket_after_function = False
+"""
+Adds an opening bracket after a function, because that's normal behaviour.
+Removed it again, because in VIM that is not very practical.
+"""
+
+no_completion_duplicates = True
+"""
+If set, completions with the same name don't appear in the output anymore,
+but are in the `same_name_completions` attribute.
+"""
+
+# ----------------
+# Filesystem cache
+# ----------------
+
+use_filesystem_cache = True
+"""
+Use filesystem cache to save once parsed files with pickle.
+"""
+
+if platform.system().lower() == 'windows':
+ _cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi',
+ 'Jedi')
+elif platform.system().lower() == 'darwin':
+ _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi')
+else:
+ _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache',
+ 'jedi')
+cache_directory = os.path.expanduser(_cache_directory)
+"""
The path where the cache is stored.
-
-On Linux, this defaults to ``~/.cache/jedi/``, on OS X to
-``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``.
-On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
-``$XDG_CACHE_HOME/jedi`` is used instead of the default one.
-"""
-
-# ----------------
-# parser
-# ----------------
-
-fast_parser = True
-"""
-Use the fast parser. This means that reparsing is only being done if
-something has been changed e.g. to a function. If this happens, only the
-function is being reparsed.
-"""
-
-# ----------------
-# dynamic stuff
-# ----------------
-
-dynamic_array_additions = True
-"""
-check for `append`, etc. on arrays: [], {}, () as well as list/set calls.
-"""
-
-dynamic_params = True
-"""
-A dynamic param completion, finds the callees of the function, which define
-the params of a function.
-"""
-
-dynamic_params_for_other_modules = True
-"""
-Do the same for other modules.
-"""
-
-additional_dynamic_modules = []
-"""
-Additional modules in which |jedi| checks if statements are to be found. This
-is practical for IDEs, that want to administrate their modules themselves.
-"""
-
-dynamic_flow_information = True
-"""
-Check for `isinstance` and other information to infer a type.
-"""
-
-auto_import_modules = [
+
+On Linux, this defaults to ``~/.cache/jedi/``, on OS X to
+``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``.
+On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
+``$XDG_CACHE_HOME/jedi`` is used instead of the default one.
+"""
+
+# ----------------
+# parser
+# ----------------
+
+fast_parser = True
+"""
+Use the fast parser. This means that reparsing is only being done if
+something has been changed e.g. to a function. If this happens, only the
+function is being reparsed.
+"""
+
+# ----------------
+# dynamic stuff
+# ----------------
+
+dynamic_array_additions = True
+"""
+check for `append`, etc. on arrays: [], {}, () as well as list/set calls.
+"""
+
+dynamic_params = True
+"""
+A dynamic param completion, finds the callees of the function, which define
+the params of a function.
+"""
+
+dynamic_params_for_other_modules = True
+"""
+Do the same for other modules.
+"""
+
+additional_dynamic_modules = []
+"""
+Additional modules in which |jedi| checks if statements are to be found. This
+is practical for IDEs, that want to administrate their modules themselves.
+"""
+
+dynamic_flow_information = True
+"""
+Check for `isinstance` and other information to infer a type.
+"""
+
+auto_import_modules = [
'hashlib', # hashlib is mostly using setattr, which jedi doesn't understand
'gi', # This third-party repository (GTK stuff) doesn't really work with jedi
-]
-"""
-Modules that are not analyzed but imported, although they contain Python code.
-This improves autocompletion for libraries that use ``setattr`` or
-``globals()`` modifications a lot.
-"""
-
-# ----------------
-# caching validity (time)
-# ----------------
-
-call_signatures_validity = 3.0
-"""
-Finding function calls might be slow (0.1-0.5s). This is not acceptible for
-normal writing. Therefore cache it for a short time.
-"""
+]
+"""
+Modules that are not analyzed but imported, although they contain Python code.
+This improves autocompletion for libraries that use ``setattr`` or
+``globals()`` modifications a lot.
+"""
+
+# ----------------
+# caching validity (time)
+# ----------------
+
+call_signatures_validity = 3.0
+"""
+Finding function calls might be slow (0.1-0.5s). This is not acceptible for
+normal writing. Therefore cache it for a short time.
+"""
diff --git a/contrib/python/jedi/jedi/utils.py b/contrib/python/jedi/jedi/utils.py
index 0a75afe984..04e87c301c 100644
--- a/contrib/python/jedi/jedi/utils.py
+++ b/contrib/python/jedi/jedi/utils.py
@@ -1,66 +1,66 @@
-"""
-Utilities for end-users.
-"""
-
-from __future__ import absolute_import
-import __main__
-from collections import namedtuple
+"""
+Utilities for end-users.
+"""
+
+from __future__ import absolute_import
+import __main__
+from collections import namedtuple
import logging
import traceback
-import re
-import os
-import sys
-
+import re
+import os
+import sys
+
from parso import split_lines
-from jedi import Interpreter
+from jedi import Interpreter
from jedi.api.helpers import get_on_completion_name
-
-
+
+
READLINE_DEBUG = False
-def setup_readline(namespace_module=__main__):
- """
- Install Jedi completer to :mod:`readline`.
-
- This function setups :mod:`readline` to use Jedi in Python interactive
- shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically
- ``$HOME/.pythonrc.py``), you can add this piece of code::
-
- try:
- from jedi.utils import setup_readline
- setup_readline()
- except ImportError:
- # Fallback to the stdlib readline completer if it is installed.
- # Taken from http://docs.python.org/2/library/rlcompleter.html
- print("Jedi is not installed, falling back to readline")
- try:
- import readline
- import rlcompleter
- readline.parse_and_bind("tab: complete")
- except ImportError:
- print("Readline is not installed either. No tab completion is enabled.")
-
- This will fallback to the readline completer if Jedi is not installed.
- The readline completer will only complete names in the global namespace,
- so for example::
-
- ran<TAB>
-
- will complete to ``range``
-
- with both Jedi and readline, but::
-
- range(10).cou<TAB>
-
- will show complete to ``range(10).count`` only with Jedi.
-
- You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to
- your shell profile (usually ``.bash_profile`` or ``.profile`` if you use
- bash).
-
- """
+def setup_readline(namespace_module=__main__):
+ """
+ Install Jedi completer to :mod:`readline`.
+
+ This function setups :mod:`readline` to use Jedi in Python interactive
+ shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically
+ ``$HOME/.pythonrc.py``), you can add this piece of code::
+
+ try:
+ from jedi.utils import setup_readline
+ setup_readline()
+ except ImportError:
+ # Fallback to the stdlib readline completer if it is installed.
+ # Taken from http://docs.python.org/2/library/rlcompleter.html
+ print("Jedi is not installed, falling back to readline")
+ try:
+ import readline
+ import rlcompleter
+ readline.parse_and_bind("tab: complete")
+ except ImportError:
+ print("Readline is not installed either. No tab completion is enabled.")
+
+ This will fallback to the readline completer if Jedi is not installed.
+ The readline completer will only complete names in the global namespace,
+ so for example::
+
+ ran<TAB>
+
+ will complete to ``range``
+
+ with both Jedi and readline, but::
+
+ range(10).cou<TAB>
+
+ will show complete to ``range(10).count`` only with Jedi.
+
+ You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to
+ your shell profile (usually ``.bash_profile`` or ``.profile`` if you use
+ bash).
+
+ """
if READLINE_DEBUG:
logging.basicConfig(
filename='/tmp/jedi.log',
@@ -68,24 +68,24 @@ def setup_readline(namespace_module=__main__):
level=logging.DEBUG
)
- class JediRL(object):
- def complete(self, text, state):
- """
- This complete stuff is pretty weird, a generator would make
- a lot more sense, but probably due to backwards compatibility
- this is still the way how it works.
-
- The only important part is stuff in the ``state == 0`` flow,
- everything else has been copied from the ``rlcompleter`` std.
- library module.
- """
- if state == 0:
- sys.path.insert(0, os.getcwd())
- # Calling python doesn't have a path, so add to sys.path.
- try:
+ class JediRL(object):
+ def complete(self, text, state):
+ """
+ This complete stuff is pretty weird, a generator would make
+ a lot more sense, but probably due to backwards compatibility
+ this is still the way how it works.
+
+ The only important part is stuff in the ``state == 0`` flow,
+ everything else has been copied from the ``rlcompleter`` std.
+ library module.
+ """
+ if state == 0:
+ sys.path.insert(0, os.getcwd())
+ # Calling python doesn't have a path, so add to sys.path.
+ try:
logging.debug("Start REPL completion: " + repr(text))
- interpreter = Interpreter(text, [namespace_module.__dict__])
-
+ interpreter = Interpreter(text, [namespace_module.__dict__])
+
lines = split_lines(text)
position = (len(lines), len(lines[-1]))
name = get_on_completion_name(
@@ -94,49 +94,49 @@ def setup_readline(namespace_module=__main__):
position
)
before = text[:len(text) - len(name)]
- completions = interpreter.completions()
+ completions = interpreter.completions()
logging.debug("REPL completions: %s", completions)
except:
logging.error("REPL Completion error:\n" + traceback.format_exc())
raise
- finally:
- sys.path.pop(0)
-
- self.matches = [before + c.name_with_symbols for c in completions]
- try:
- return self.matches[state]
- except IndexError:
- return None
-
- try:
+ finally:
+ sys.path.pop(0)
+
+ self.matches = [before + c.name_with_symbols for c in completions]
+ try:
+ return self.matches[state]
+ except IndexError:
+ return None
+
+ try:
# Need to import this one as well to make sure it's executed before
# this code. This didn't use to be an issue until 3.3. Starting with
# 3.4 this is different, it always overwrites the completer if it's not
# already imported here.
import rlcompleter # noqa: F401
- import readline
- except ImportError:
+ import readline
+ except ImportError:
print("Jedi: Module readline not available.")
- else:
- readline.set_completer(JediRL().complete)
- readline.parse_and_bind("tab: complete")
- # jedi itself does the case matching
- readline.parse_and_bind("set completion-ignore-case on")
- # because it's easier to hit the tab just once
- readline.parse_and_bind("set show-all-if-unmodified")
- readline.parse_and_bind("set show-all-if-ambiguous on")
- # don't repeat all the things written in the readline all the time
- readline.parse_and_bind("set completion-prefix-display-length 2")
- # No delimiters, Jedi handles that.
- readline.set_completer_delims('')
-
-
-def version_info():
- """
- Returns a namedtuple of Jedi's version, similar to Python's
- ``sys.version_info``.
- """
- Version = namedtuple('Version', 'major, minor, micro')
- from jedi import __version__
+ else:
+ readline.set_completer(JediRL().complete)
+ readline.parse_and_bind("tab: complete")
+ # jedi itself does the case matching
+ readline.parse_and_bind("set completion-ignore-case on")
+ # because it's easier to hit the tab just once
+ readline.parse_and_bind("set show-all-if-unmodified")
+ readline.parse_and_bind("set show-all-if-ambiguous on")
+ # don't repeat all the things written in the readline all the time
+ readline.parse_and_bind("set completion-prefix-display-length 2")
+ # No delimiters, Jedi handles that.
+ readline.set_completer_delims('')
+
+
+def version_info():
+ """
+ Returns a namedtuple of Jedi's version, similar to Python's
+ ``sys.version_info``.
+ """
+ Version = namedtuple('Version', 'major, minor, micro')
+ from jedi import __version__
tupl = re.findall(r'[a-z]+|\d+', __version__)
- return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])
+ return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])
diff --git a/contrib/python/jedi/ya.make b/contrib/python/jedi/ya.make
index dfc0c7ecb8..eff2fef2a7 100644
--- a/contrib/python/jedi/ya.make
+++ b/contrib/python/jedi/ya.make
@@ -1,45 +1,45 @@
PY23_LIBRARY(jedi)
-
+
LICENSE(MIT)
OWNER(g:python-contrib borman)
-
+
VERSION(0.13.3)
-PEERDIR(
+PEERDIR(
contrib/python/parso
contrib/python/setuptools
-)
-
-PY_SRCS(
- TOP_LEVEL
- jedi/__init__.py
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ jedi/__init__.py
jedi/__main__.py
- jedi/_compatibility.py
- jedi/api/__init__.py
- jedi/api/classes.py
+ jedi/_compatibility.py
+ jedi/api/__init__.py
+ jedi/api/classes.py
jedi/api/completion.py
jedi/api/environment.py
jedi/api/exceptions.py
- jedi/api/helpers.py
- jedi/api/interpreter.py
- jedi/api/keywords.py
+ jedi/api/helpers.py
+ jedi/api/interpreter.py
+ jedi/api/keywords.py
jedi/api/project.py
- jedi/api/replstartup.py
- jedi/cache.py
+ jedi/api/replstartup.py
+ jedi/cache.py
jedi/common/__init__.py
jedi/common/context.py
jedi/common/utils.py
- jedi/debug.py
- jedi/evaluate/__init__.py
- jedi/evaluate/analysis.py
+ jedi/debug.py
+ jedi/evaluate/__init__.py
+ jedi/evaluate/analysis.py
jedi/evaluate/arguments.py
jedi/evaluate/base_context.py
- jedi/evaluate/cache.py
- jedi/evaluate/compiled/__init__.py
+ jedi/evaluate/cache.py
+ jedi/evaluate/compiled/__init__.py
jedi/evaluate/compiled/access.py
jedi/evaluate/compiled/context.py
- jedi/evaluate/compiled/fake.py
+ jedi/evaluate/compiled/fake.py
jedi/evaluate/compiled/getattr_static.py
jedi/evaluate/compiled/mixed.py
jedi/evaluate/compiled/subprocess/__init__.py
@@ -53,30 +53,30 @@ PY_SRCS(
jedi/evaluate/context/klass.py
jedi/evaluate/context/module.py
jedi/evaluate/context/namespace.py
- jedi/evaluate/docstrings.py
- jedi/evaluate/dynamic.py
+ jedi/evaluate/docstrings.py
+ jedi/evaluate/dynamic.py
jedi/evaluate/filters.py
- jedi/evaluate/finder.py
- jedi/evaluate/flow_analysis.py
- jedi/evaluate/helpers.py
- jedi/evaluate/imports.py
+ jedi/evaluate/finder.py
+ jedi/evaluate/flow_analysis.py
+ jedi/evaluate/helpers.py
+ jedi/evaluate/imports.py
jedi/evaluate/jedi_typing.py
jedi/evaluate/lazy_context.py
- jedi/evaluate/param.py
+ jedi/evaluate/param.py
jedi/evaluate/parser_cache.py
jedi/evaluate/pep0484.py
- jedi/evaluate/recursion.py
- jedi/evaluate/stdlib.py
+ jedi/evaluate/recursion.py
+ jedi/evaluate/stdlib.py
jedi/evaluate/syntax_tree.py
- jedi/evaluate/sys_path.py
+ jedi/evaluate/sys_path.py
jedi/evaluate/usages.py
jedi/evaluate/utils.py
jedi/parser_utils.py
- jedi/refactoring.py
- jedi/settings.py
- jedi/utils.py
-)
-
+ jedi/refactoring.py
+ jedi/settings.py
+ jedi/utils.py
+)
+
RESOURCE_FILES(
PREFIX contrib/python/jedi/
.dist-info/METADATA
@@ -94,4 +94,4 @@ RESOURCE_FILES(
NO_LINT()
-END()
+END()
diff --git a/contrib/python/pexpect/LICENSE b/contrib/python/pexpect/LICENSE
index 11f4b1686f..754db5afcb 100644
--- a/contrib/python/pexpect/LICENSE
+++ b/contrib/python/pexpect/LICENSE
@@ -1,20 +1,20 @@
ISC LICENSE
-
- This license is approved by the OSI and FSF as GPL-compatible.
- http://opensource.org/licenses/isc-license.txt
-
- Copyright (c) 2013-2014, Pexpect development team
- Copyright (c) 2012, Noah Spurrier <noah@noah.org>
-
+
+ This license is approved by the OSI and FSF as GPL-compatible.
+ http://opensource.org/licenses/isc-license.txt
+
+ Copyright (c) 2013-2014, Pexpect development team
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
+
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
diff --git a/contrib/python/pexpect/pexpect/ANSI.py b/contrib/python/pexpect/pexpect/ANSI.py
index 50bd7732f7..1cd2e90e7a 100644
--- a/contrib/python/pexpect/pexpect/ANSI.py
+++ b/contrib/python/pexpect/pexpect/ANSI.py
@@ -1,351 +1,351 @@
-'''This implements an ANSI (VT100) terminal emulator as a subclass of screen.
-
-PEXPECT LICENSE
-
- This license is approved by the OSI and FSF as GPL-compatible.
- http://opensource.org/licenses/isc-license.txt
-
- Copyright (c) 2012, Noah Spurrier <noah@noah.org>
- PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
- PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
- COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-'''
-
-# references:
-# http://en.wikipedia.org/wiki/ANSI_escape_code
-# http://www.retards.org/terminals/vt102.html
-# http://vt100.net/docs/vt102-ug/contents.html
-# http://vt100.net/docs/vt220-rm/
-# http://www.termsys.demon.co.uk/vtansi.htm
-
-from . import screen
-from . import FSM
-import string
-
-#
-# The 'Do.*' functions are helper functions for the ANSI class.
-#
-def DoEmit (fsm):
-
- screen = fsm.memory[0]
- screen.write_ch(fsm.input_symbol)
-
-def DoStartNumber (fsm):
-
- fsm.memory.append (fsm.input_symbol)
-
-def DoBuildNumber (fsm):
-
- ns = fsm.memory.pop()
- ns = ns + fsm.input_symbol
- fsm.memory.append (ns)
-
-def DoBackOne (fsm):
-
- screen = fsm.memory[0]
- screen.cursor_back ()
-
-def DoBack (fsm):
-
- count = int(fsm.memory.pop())
- screen = fsm.memory[0]
- screen.cursor_back (count)
-
-def DoDownOne (fsm):
-
- screen = fsm.memory[0]
- screen.cursor_down ()
-
-def DoDown (fsm):
-
- count = int(fsm.memory.pop())
- screen = fsm.memory[0]
- screen.cursor_down (count)
-
-def DoForwardOne (fsm):
-
- screen = fsm.memory[0]
- screen.cursor_forward ()
-
-def DoForward (fsm):
-
- count = int(fsm.memory.pop())
- screen = fsm.memory[0]
- screen.cursor_forward (count)
-
-def DoUpReverse (fsm):
-
- screen = fsm.memory[0]
- screen.cursor_up_reverse()
-
-def DoUpOne (fsm):
-
- screen = fsm.memory[0]
- screen.cursor_up ()
-
-def DoUp (fsm):
-
- count = int(fsm.memory.pop())
- screen = fsm.memory[0]
- screen.cursor_up (count)
-
-def DoHome (fsm):
-
- c = int(fsm.memory.pop())
- r = int(fsm.memory.pop())
- screen = fsm.memory[0]
- screen.cursor_home (r,c)
-
-def DoHomeOrigin (fsm):
-
- c = 1
- r = 1
- screen = fsm.memory[0]
- screen.cursor_home (r,c)
-
-def DoEraseDown (fsm):
-
- screen = fsm.memory[0]
- screen.erase_down()
-
-def DoErase (fsm):
-
- arg = int(fsm.memory.pop())
- screen = fsm.memory[0]
- if arg == 0:
- screen.erase_down()
- elif arg == 1:
- screen.erase_up()
- elif arg == 2:
- screen.erase_screen()
-
-def DoEraseEndOfLine (fsm):
-
- screen = fsm.memory[0]
- screen.erase_end_of_line()
-
-def DoEraseLine (fsm):
-
- arg = int(fsm.memory.pop())
- screen = fsm.memory[0]
- if arg == 0:
- screen.erase_end_of_line()
- elif arg == 1:
- screen.erase_start_of_line()
- elif arg == 2:
- screen.erase_line()
-
-def DoEnableScroll (fsm):
-
- screen = fsm.memory[0]
- screen.scroll_screen()
-
-def DoCursorSave (fsm):
-
- screen = fsm.memory[0]
- screen.cursor_save_attrs()
-
-def DoCursorRestore (fsm):
-
- screen = fsm.memory[0]
- screen.cursor_restore_attrs()
-
-def DoScrollRegion (fsm):
-
- screen = fsm.memory[0]
- r2 = int(fsm.memory.pop())
- r1 = int(fsm.memory.pop())
- screen.scroll_screen_rows (r1,r2)
-
-def DoMode (fsm):
-
- screen = fsm.memory[0]
- mode = fsm.memory.pop() # Should be 4
- # screen.setReplaceMode ()
-
-def DoLog (fsm):
-
- screen = fsm.memory[0]
- fsm.memory = [screen]
- fout = open ('log', 'a')
- fout.write (fsm.input_symbol + ',' + fsm.current_state + '\n')
- fout.close()
-
-class term (screen.screen):
-
- '''This class is an abstract, generic terminal.
- This does nothing. This is a placeholder that
- provides a common base class for other terminals
- such as an ANSI terminal. '''
-
- def __init__ (self, r=24, c=80, *args, **kwargs):
-
- screen.screen.__init__(self, r,c,*args,**kwargs)
-
-class ANSI (term):
- '''This class implements an ANSI (VT100) terminal.
- It is a stream filter that recognizes ANSI terminal
- escape sequences and maintains the state of a screen object. '''
-
- def __init__ (self, r=24,c=80,*args,**kwargs):
-
- term.__init__(self,r,c,*args,**kwargs)
-
- #self.screen = screen (24,80)
- self.state = FSM.FSM ('INIT',[self])
- self.state.set_default_transition (DoLog, 'INIT')
- self.state.add_transition_any ('INIT', DoEmit, 'INIT')
- self.state.add_transition ('\x1b', 'INIT', None, 'ESC')
- self.state.add_transition_any ('ESC', DoLog, 'INIT')
- self.state.add_transition ('(', 'ESC', None, 'G0SCS')
- self.state.add_transition (')', 'ESC', None, 'G1SCS')
- self.state.add_transition_list ('AB012', 'G0SCS', None, 'INIT')
- self.state.add_transition_list ('AB012', 'G1SCS', None, 'INIT')
- self.state.add_transition ('7', 'ESC', DoCursorSave, 'INIT')
- self.state.add_transition ('8', 'ESC', DoCursorRestore, 'INIT')
- self.state.add_transition ('M', 'ESC', DoUpReverse, 'INIT')
- self.state.add_transition ('>', 'ESC', DoUpReverse, 'INIT')
- self.state.add_transition ('<', 'ESC', DoUpReverse, 'INIT')
- self.state.add_transition ('=', 'ESC', None, 'INIT') # Selects application keypad.
- self.state.add_transition ('#', 'ESC', None, 'GRAPHICS_POUND')
- self.state.add_transition_any ('GRAPHICS_POUND', None, 'INIT')
- self.state.add_transition ('[', 'ESC', None, 'ELB')
- # ELB means Escape Left Bracket. That is ^[[
- self.state.add_transition ('H', 'ELB', DoHomeOrigin, 'INIT')
- self.state.add_transition ('D', 'ELB', DoBackOne, 'INIT')
- self.state.add_transition ('B', 'ELB', DoDownOne, 'INIT')
- self.state.add_transition ('C', 'ELB', DoForwardOne, 'INIT')
- self.state.add_transition ('A', 'ELB', DoUpOne, 'INIT')
- self.state.add_transition ('J', 'ELB', DoEraseDown, 'INIT')
- self.state.add_transition ('K', 'ELB', DoEraseEndOfLine, 'INIT')
- self.state.add_transition ('r', 'ELB', DoEnableScroll, 'INIT')
- self.state.add_transition ('m', 'ELB', self.do_sgr, 'INIT')
- self.state.add_transition ('?', 'ELB', None, 'MODECRAP')
- self.state.add_transition_list (string.digits, 'ELB', DoStartNumber, 'NUMBER_1')
- self.state.add_transition_list (string.digits, 'NUMBER_1', DoBuildNumber, 'NUMBER_1')
- self.state.add_transition ('D', 'NUMBER_1', DoBack, 'INIT')
- self.state.add_transition ('B', 'NUMBER_1', DoDown, 'INIT')
- self.state.add_transition ('C', 'NUMBER_1', DoForward, 'INIT')
- self.state.add_transition ('A', 'NUMBER_1', DoUp, 'INIT')
- self.state.add_transition ('J', 'NUMBER_1', DoErase, 'INIT')
- self.state.add_transition ('K', 'NUMBER_1', DoEraseLine, 'INIT')
- self.state.add_transition ('l', 'NUMBER_1', DoMode, 'INIT')
- ### It gets worse... the 'm' code can have infinite number of
- ### number;number;number before it. I've never seen more than two,
- ### but the specs say it's allowed. crap!
- self.state.add_transition ('m', 'NUMBER_1', self.do_sgr, 'INIT')
- ### LED control. Same implementation problem as 'm' code.
- self.state.add_transition ('q', 'NUMBER_1', self.do_decsca, 'INIT')
-
- # \E[?47h switch to alternate screen
- # \E[?47l restores to normal screen from alternate screen.
- self.state.add_transition_list (string.digits, 'MODECRAP', DoStartNumber, 'MODECRAP_NUM')
- self.state.add_transition_list (string.digits, 'MODECRAP_NUM', DoBuildNumber, 'MODECRAP_NUM')
- self.state.add_transition ('l', 'MODECRAP_NUM', self.do_modecrap, 'INIT')
- self.state.add_transition ('h', 'MODECRAP_NUM', self.do_modecrap, 'INIT')
-
-#RM Reset Mode Esc [ Ps l none
- self.state.add_transition (';', 'NUMBER_1', None, 'SEMICOLON')
- self.state.add_transition_any ('SEMICOLON', DoLog, 'INIT')
- self.state.add_transition_list (string.digits, 'SEMICOLON', DoStartNumber, 'NUMBER_2')
- self.state.add_transition_list (string.digits, 'NUMBER_2', DoBuildNumber, 'NUMBER_2')
- self.state.add_transition_any ('NUMBER_2', DoLog, 'INIT')
- self.state.add_transition ('H', 'NUMBER_2', DoHome, 'INIT')
- self.state.add_transition ('f', 'NUMBER_2', DoHome, 'INIT')
- self.state.add_transition ('r', 'NUMBER_2', DoScrollRegion, 'INIT')
- ### It gets worse... the 'm' code can have infinite number of
- ### number;number;number before it. I've never seen more than two,
- ### but the specs say it's allowed. crap!
- self.state.add_transition ('m', 'NUMBER_2', self.do_sgr, 'INIT')
- ### LED control. Same problem as 'm' code.
- self.state.add_transition ('q', 'NUMBER_2', self.do_decsca, 'INIT')
- self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
-
- # Create a state for 'q' and 'm' which allows an infinite number of ignored numbers
- self.state.add_transition_any ('SEMICOLON_X', DoLog, 'INIT')
- self.state.add_transition_list (string.digits, 'SEMICOLON_X', DoStartNumber, 'NUMBER_X')
- self.state.add_transition_list (string.digits, 'NUMBER_X', DoBuildNumber, 'NUMBER_X')
- self.state.add_transition_any ('NUMBER_X', DoLog, 'INIT')
- self.state.add_transition ('m', 'NUMBER_X', self.do_sgr, 'INIT')
- self.state.add_transition ('q', 'NUMBER_X', self.do_decsca, 'INIT')
- self.state.add_transition (';', 'NUMBER_X', None, 'SEMICOLON_X')
-
- def process (self, c):
- """Process a single character. Called by :meth:`write`."""
- if isinstance(c, bytes):
- c = self._decode(c)
- self.state.process(c)
-
- def process_list (self, l):
-
- self.write(l)
-
- def write (self, s):
- """Process text, writing it to the virtual screen while handling
- ANSI escape codes.
- """
- if isinstance(s, bytes):
- s = self._decode(s)
- for c in s:
- self.process(c)
-
- def flush (self):
- pass
-
- def write_ch (self, ch):
- '''This puts a character at the current cursor position. The cursor
- position is moved forward with wrap-around, but no scrolling is done if
- the cursor hits the lower-right corner of the screen. '''
-
- if isinstance(ch, bytes):
- ch = self._decode(ch)
-
- #\r and \n both produce a call to cr() and lf(), respectively.
- ch = ch[0]
-
- if ch == u'\r':
- self.cr()
- return
- if ch == u'\n':
- self.crlf()
- return
- if ch == chr(screen.BS):
- self.cursor_back()
- return
- self.put_abs(self.cur_r, self.cur_c, ch)
- old_r = self.cur_r
- old_c = self.cur_c
- self.cursor_forward()
- if old_c == self.cur_c:
- self.cursor_down()
- if old_r != self.cur_r:
- self.cursor_home (self.cur_r, 1)
- else:
- self.scroll_up ()
- self.cursor_home (self.cur_r, 1)
- self.erase_line()
-
- def do_sgr (self, fsm):
- '''Select Graphic Rendition, e.g. color. '''
- screen = fsm.memory[0]
- fsm.memory = [screen]
-
- def do_decsca (self, fsm):
- '''Select character protection attribute. '''
- screen = fsm.memory[0]
- fsm.memory = [screen]
-
- def do_modecrap (self, fsm):
- '''Handler for \x1b[?<number>h and \x1b[?<number>l. If anyone
- wanted to actually use these, they'd need to add more states to the
- FSM rather than just improve or override this method. '''
- screen = fsm.memory[0]
- fsm.memory = [screen]
+'''This implements an ANSI (VT100) terminal emulator as a subclass of screen.
+
+PEXPECT LICENSE
+
+ This license is approved by the OSI and FSF as GPL-compatible.
+ http://opensource.org/licenses/isc-license.txt
+
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''
+
+# references:
+# http://en.wikipedia.org/wiki/ANSI_escape_code
+# http://www.retards.org/terminals/vt102.html
+# http://vt100.net/docs/vt102-ug/contents.html
+# http://vt100.net/docs/vt220-rm/
+# http://www.termsys.demon.co.uk/vtansi.htm
+
+from . import screen
+from . import FSM
+import string
+
+#
+# The 'Do.*' functions are helper functions for the ANSI class.
+#
+def DoEmit (fsm):
+
+ screen = fsm.memory[0]
+ screen.write_ch(fsm.input_symbol)
+
+def DoStartNumber (fsm):
+
+ fsm.memory.append (fsm.input_symbol)
+
+def DoBuildNumber (fsm):
+
+ ns = fsm.memory.pop()
+ ns = ns + fsm.input_symbol
+ fsm.memory.append (ns)
+
+def DoBackOne (fsm):
+
+ screen = fsm.memory[0]
+ screen.cursor_back ()
+
+def DoBack (fsm):
+
+ count = int(fsm.memory.pop())
+ screen = fsm.memory[0]
+ screen.cursor_back (count)
+
+def DoDownOne (fsm):
+
+ screen = fsm.memory[0]
+ screen.cursor_down ()
+
+def DoDown (fsm):
+
+ count = int(fsm.memory.pop())
+ screen = fsm.memory[0]
+ screen.cursor_down (count)
+
+def DoForwardOne (fsm):
+
+ screen = fsm.memory[0]
+ screen.cursor_forward ()
+
+def DoForward (fsm):
+
+ count = int(fsm.memory.pop())
+ screen = fsm.memory[0]
+ screen.cursor_forward (count)
+
+def DoUpReverse (fsm):
+
+ screen = fsm.memory[0]
+ screen.cursor_up_reverse()
+
+def DoUpOne (fsm):
+
+ screen = fsm.memory[0]
+ screen.cursor_up ()
+
+def DoUp (fsm):
+
+ count = int(fsm.memory.pop())
+ screen = fsm.memory[0]
+ screen.cursor_up (count)
+
+def DoHome (fsm):
+
+ c = int(fsm.memory.pop())
+ r = int(fsm.memory.pop())
+ screen = fsm.memory[0]
+ screen.cursor_home (r,c)
+
+def DoHomeOrigin (fsm):
+
+ c = 1
+ r = 1
+ screen = fsm.memory[0]
+ screen.cursor_home (r,c)
+
+def DoEraseDown (fsm):
+
+ screen = fsm.memory[0]
+ screen.erase_down()
+
+def DoErase (fsm):
+
+ arg = int(fsm.memory.pop())
+ screen = fsm.memory[0]
+ if arg == 0:
+ screen.erase_down()
+ elif arg == 1:
+ screen.erase_up()
+ elif arg == 2:
+ screen.erase_screen()
+
+def DoEraseEndOfLine (fsm):
+
+ screen = fsm.memory[0]
+ screen.erase_end_of_line()
+
+def DoEraseLine (fsm):
+
+ arg = int(fsm.memory.pop())
+ screen = fsm.memory[0]
+ if arg == 0:
+ screen.erase_end_of_line()
+ elif arg == 1:
+ screen.erase_start_of_line()
+ elif arg == 2:
+ screen.erase_line()
+
+def DoEnableScroll (fsm):
+
+ screen = fsm.memory[0]
+ screen.scroll_screen()
+
+def DoCursorSave (fsm):
+
+ screen = fsm.memory[0]
+ screen.cursor_save_attrs()
+
+def DoCursorRestore (fsm):
+
+ screen = fsm.memory[0]
+ screen.cursor_restore_attrs()
+
+def DoScrollRegion (fsm):
+
+ screen = fsm.memory[0]
+ r2 = int(fsm.memory.pop())
+ r1 = int(fsm.memory.pop())
+ screen.scroll_screen_rows (r1,r2)
+
+def DoMode (fsm):
+
+ screen = fsm.memory[0]
+ mode = fsm.memory.pop() # Should be 4
+ # screen.setReplaceMode ()
+
+def DoLog (fsm):
+
+ screen = fsm.memory[0]
+ fsm.memory = [screen]
+ fout = open ('log', 'a')
+ fout.write (fsm.input_symbol + ',' + fsm.current_state + '\n')
+ fout.close()
+
+class term (screen.screen):
+
+ '''This class is an abstract, generic terminal.
+ This does nothing. This is a placeholder that
+ provides a common base class for other terminals
+ such as an ANSI terminal. '''
+
+ def __init__ (self, r=24, c=80, *args, **kwargs):
+
+ screen.screen.__init__(self, r,c,*args,**kwargs)
+
+class ANSI (term):
+ '''This class implements an ANSI (VT100) terminal.
+ It is a stream filter that recognizes ANSI terminal
+ escape sequences and maintains the state of a screen object. '''
+
+ def __init__ (self, r=24,c=80,*args,**kwargs):
+
+ term.__init__(self,r,c,*args,**kwargs)
+
+ #self.screen = screen (24,80)
+ self.state = FSM.FSM ('INIT',[self])
+ self.state.set_default_transition (DoLog, 'INIT')
+ self.state.add_transition_any ('INIT', DoEmit, 'INIT')
+ self.state.add_transition ('\x1b', 'INIT', None, 'ESC')
+ self.state.add_transition_any ('ESC', DoLog, 'INIT')
+ self.state.add_transition ('(', 'ESC', None, 'G0SCS')
+ self.state.add_transition (')', 'ESC', None, 'G1SCS')
+ self.state.add_transition_list ('AB012', 'G0SCS', None, 'INIT')
+ self.state.add_transition_list ('AB012', 'G1SCS', None, 'INIT')
+ self.state.add_transition ('7', 'ESC', DoCursorSave, 'INIT')
+ self.state.add_transition ('8', 'ESC', DoCursorRestore, 'INIT')
+ self.state.add_transition ('M', 'ESC', DoUpReverse, 'INIT')
+ self.state.add_transition ('>', 'ESC', DoUpReverse, 'INIT')
+ self.state.add_transition ('<', 'ESC', DoUpReverse, 'INIT')
+ self.state.add_transition ('=', 'ESC', None, 'INIT') # Selects application keypad.
+ self.state.add_transition ('#', 'ESC', None, 'GRAPHICS_POUND')
+ self.state.add_transition_any ('GRAPHICS_POUND', None, 'INIT')
+ self.state.add_transition ('[', 'ESC', None, 'ELB')
+ # ELB means Escape Left Bracket. That is ^[[
+ self.state.add_transition ('H', 'ELB', DoHomeOrigin, 'INIT')
+ self.state.add_transition ('D', 'ELB', DoBackOne, 'INIT')
+ self.state.add_transition ('B', 'ELB', DoDownOne, 'INIT')
+ self.state.add_transition ('C', 'ELB', DoForwardOne, 'INIT')
+ self.state.add_transition ('A', 'ELB', DoUpOne, 'INIT')
+ self.state.add_transition ('J', 'ELB', DoEraseDown, 'INIT')
+ self.state.add_transition ('K', 'ELB', DoEraseEndOfLine, 'INIT')
+ self.state.add_transition ('r', 'ELB', DoEnableScroll, 'INIT')
+ self.state.add_transition ('m', 'ELB', self.do_sgr, 'INIT')
+ self.state.add_transition ('?', 'ELB', None, 'MODECRAP')
+ self.state.add_transition_list (string.digits, 'ELB', DoStartNumber, 'NUMBER_1')
+ self.state.add_transition_list (string.digits, 'NUMBER_1', DoBuildNumber, 'NUMBER_1')
+ self.state.add_transition ('D', 'NUMBER_1', DoBack, 'INIT')
+ self.state.add_transition ('B', 'NUMBER_1', DoDown, 'INIT')
+ self.state.add_transition ('C', 'NUMBER_1', DoForward, 'INIT')
+ self.state.add_transition ('A', 'NUMBER_1', DoUp, 'INIT')
+ self.state.add_transition ('J', 'NUMBER_1', DoErase, 'INIT')
+ self.state.add_transition ('K', 'NUMBER_1', DoEraseLine, 'INIT')
+ self.state.add_transition ('l', 'NUMBER_1', DoMode, 'INIT')
+ ### It gets worse... the 'm' code can have infinite number of
+ ### number;number;number before it. I've never seen more than two,
+ ### but the specs say it's allowed. crap!
+ self.state.add_transition ('m', 'NUMBER_1', self.do_sgr, 'INIT')
+ ### LED control. Same implementation problem as 'm' code.
+ self.state.add_transition ('q', 'NUMBER_1', self.do_decsca, 'INIT')
+
+ # \E[?47h switch to alternate screen
+ # \E[?47l restores to normal screen from alternate screen.
+ self.state.add_transition_list (string.digits, 'MODECRAP', DoStartNumber, 'MODECRAP_NUM')
+ self.state.add_transition_list (string.digits, 'MODECRAP_NUM', DoBuildNumber, 'MODECRAP_NUM')
+ self.state.add_transition ('l', 'MODECRAP_NUM', self.do_modecrap, 'INIT')
+ self.state.add_transition ('h', 'MODECRAP_NUM', self.do_modecrap, 'INIT')
+
+#RM Reset Mode Esc [ Ps l none
+ self.state.add_transition (';', 'NUMBER_1', None, 'SEMICOLON')
+ self.state.add_transition_any ('SEMICOLON', DoLog, 'INIT')
+ self.state.add_transition_list (string.digits, 'SEMICOLON', DoStartNumber, 'NUMBER_2')
+ self.state.add_transition_list (string.digits, 'NUMBER_2', DoBuildNumber, 'NUMBER_2')
+ self.state.add_transition_any ('NUMBER_2', DoLog, 'INIT')
+ self.state.add_transition ('H', 'NUMBER_2', DoHome, 'INIT')
+ self.state.add_transition ('f', 'NUMBER_2', DoHome, 'INIT')
+ self.state.add_transition ('r', 'NUMBER_2', DoScrollRegion, 'INIT')
+ ### It gets worse... the 'm' code can have infinite number of
+ ### number;number;number before it. I've never seen more than two,
+ ### but the specs say it's allowed. crap!
+ self.state.add_transition ('m', 'NUMBER_2', self.do_sgr, 'INIT')
+ ### LED control. Same problem as 'm' code.
+ self.state.add_transition ('q', 'NUMBER_2', self.do_decsca, 'INIT')
+ self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
+
+ # Create a state for 'q' and 'm' which allows an infinite number of ignored numbers
+ self.state.add_transition_any ('SEMICOLON_X', DoLog, 'INIT')
+ self.state.add_transition_list (string.digits, 'SEMICOLON_X', DoStartNumber, 'NUMBER_X')
+ self.state.add_transition_list (string.digits, 'NUMBER_X', DoBuildNumber, 'NUMBER_X')
+ self.state.add_transition_any ('NUMBER_X', DoLog, 'INIT')
+ self.state.add_transition ('m', 'NUMBER_X', self.do_sgr, 'INIT')
+ self.state.add_transition ('q', 'NUMBER_X', self.do_decsca, 'INIT')
+ self.state.add_transition (';', 'NUMBER_X', None, 'SEMICOLON_X')
+
+ def process (self, c):
+ """Process a single character. Called by :meth:`write`."""
+ if isinstance(c, bytes):
+ c = self._decode(c)
+ self.state.process(c)
+
+ def process_list (self, l):
+
+ self.write(l)
+
+ def write (self, s):
+ """Process text, writing it to the virtual screen while handling
+ ANSI escape codes.
+ """
+ if isinstance(s, bytes):
+ s = self._decode(s)
+ for c in s:
+ self.process(c)
+
+ def flush (self):
+ pass
+
+ def write_ch (self, ch):
+ '''This puts a character at the current cursor position. The cursor
+ position is moved forward with wrap-around, but no scrolling is done if
+ the cursor hits the lower-right corner of the screen. '''
+
+ if isinstance(ch, bytes):
+ ch = self._decode(ch)
+
+ #\r and \n both produce a call to cr() and lf(), respectively.
+ ch = ch[0]
+
+ if ch == u'\r':
+ self.cr()
+ return
+ if ch == u'\n':
+ self.crlf()
+ return
+ if ch == chr(screen.BS):
+ self.cursor_back()
+ return
+ self.put_abs(self.cur_r, self.cur_c, ch)
+ old_r = self.cur_r
+ old_c = self.cur_c
+ self.cursor_forward()
+ if old_c == self.cur_c:
+ self.cursor_down()
+ if old_r != self.cur_r:
+ self.cursor_home (self.cur_r, 1)
+ else:
+ self.scroll_up ()
+ self.cursor_home (self.cur_r, 1)
+ self.erase_line()
+
+ def do_sgr (self, fsm):
+ '''Select Graphic Rendition, e.g. color. '''
+ screen = fsm.memory[0]
+ fsm.memory = [screen]
+
+ def do_decsca (self, fsm):
+ '''Select character protection attribute. '''
+ screen = fsm.memory[0]
+ fsm.memory = [screen]
+
+ def do_modecrap (self, fsm):
+ '''Handler for \x1b[?<number>h and \x1b[?<number>l. If anyone
+ wanted to actually use these, they'd need to add more states to the
+ FSM rather than just improve or override this method. '''
+ screen = fsm.memory[0]
+ fsm.memory = [screen]
diff --git a/contrib/python/pexpect/pexpect/FSM.py b/contrib/python/pexpect/pexpect/FSM.py
index 45fcc1baa9..46b392ea08 100644
--- a/contrib/python/pexpect/pexpect/FSM.py
+++ b/contrib/python/pexpect/pexpect/FSM.py
@@ -1,334 +1,334 @@
-#!/usr/bin/env python
-
-'''This module implements a Finite State Machine (FSM). In addition to state
-this FSM also maintains a user defined "memory". So this FSM can be used as a
-Push-down Automata (PDA) since a PDA is a FSM + memory.
-
-The following describes how the FSM works, but you will probably also need to
-see the example function to understand how the FSM is used in practice.
-
-You define an FSM by building tables of transitions. For a given input symbol
-the process() method uses these tables to decide what action to call and what
-the next state will be. The FSM has a table of transitions that associate:
-
- (input_symbol, current_state) --> (action, next_state)
-
-Where "action" is a function you define. The symbols and states can be any
-objects. You use the add_transition() and add_transition_list() methods to add
-to the transition table. The FSM also has a table of transitions that
-associate:
-
- (current_state) --> (action, next_state)
-
-You use the add_transition_any() method to add to this transition table. The
-FSM also has one default transition that is not associated with any specific
-input_symbol or state. You use the set_default_transition() method to set the
-default transition.
-
-When an action function is called it is passed a reference to the FSM. The
-action function may then access attributes of the FSM such as input_symbol,
-current_state, or "memory". The "memory" attribute can be any object that you
-want to pass along to the action functions. It is not used by the FSM itself.
-For parsing you would typically pass a list to be used as a stack.
-
-The processing sequence is as follows. The process() method is given an
-input_symbol to process. The FSM will search the table of transitions that
-associate:
-
- (input_symbol, current_state) --> (action, next_state)
-
-If the pair (input_symbol, current_state) is found then process() will call the
-associated action function and then set the current state to the next_state.
-
-If the FSM cannot find a match for (input_symbol, current_state) it will then
-search the table of transitions that associate:
-
- (current_state) --> (action, next_state)
-
-If the current_state is found then the process() method will call the
-associated action function and then set the current state to the next_state.
-Notice that this table lacks an input_symbol. It lets you define transitions
-for a current_state and ANY input_symbol. Hence, it is called the "any" table.
-Remember, it is always checked after first searching the table for a specific
-(input_symbol, current_state).
-
-For the case where the FSM did not match either of the previous two cases the
-FSM will try to use the default transition. If the default transition is
-defined then the process() method will call the associated action function and
-then set the current state to the next_state. This lets you define a default
-transition as a catch-all case. You can think of it as an exception handler.
-There can be only one default transition.
-
-Finally, if none of the previous cases are defined for an input_symbol and
-current_state then the FSM will raise an exception. This may be desirable, but
-you can always prevent this just by defining a default transition.
-
-Noah Spurrier 20020822
-
-PEXPECT LICENSE
-
- This license is approved by the OSI and FSF as GPL-compatible.
- http://opensource.org/licenses/isc-license.txt
-
- Copyright (c) 2012, Noah Spurrier <noah@noah.org>
- PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
- PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
- COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-'''
-
-class ExceptionFSM(Exception):
-
- '''This is the FSM Exception class.'''
-
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return 'ExceptionFSM: ' + str(self.value)
-
-class FSM:
-
- '''This is a Finite State Machine (FSM).
- '''
-
- def __init__(self, initial_state, memory=None):
-
- '''This creates the FSM. You set the initial state here. The "memory"
- attribute is any object that you want to pass along to the action
- functions. It is not used by the FSM. For parsing you would typically
- pass a list to be used as a stack. '''
-
- # Map (input_symbol, current_state) --> (action, next_state).
- self.state_transitions = {}
- # Map (current_state) --> (action, next_state).
- self.state_transitions_any = {}
- self.default_transition = None
-
- self.input_symbol = None
- self.initial_state = initial_state
- self.current_state = self.initial_state
- self.next_state = None
- self.action = None
- self.memory = memory
-
- def reset (self):
-
- '''This sets the current_state to the initial_state and sets
- input_symbol to None. The initial state was set by the constructor
- __init__(). '''
-
- self.current_state = self.initial_state
- self.input_symbol = None
-
- def add_transition (self, input_symbol, state, action=None, next_state=None):
-
- '''This adds a transition that associates:
-
- (input_symbol, current_state) --> (action, next_state)
-
- The action may be set to None in which case the process() method will
- ignore the action and only set the next_state. The next_state may be
- set to None in which case the current state will be unchanged.
-
- You can also set transitions for a list of symbols by using
- add_transition_list(). '''
-
- if next_state is None:
- next_state = state
- self.state_transitions[(input_symbol, state)] = (action, next_state)
-
- def add_transition_list (self, list_input_symbols, state, action=None, next_state=None):
-
- '''This adds the same transition for a list of input symbols.
- You can pass a list or a string. Note that it is handy to use
- string.digits, string.whitespace, string.letters, etc. to add
- transitions that match character classes.
-
- The action may be set to None in which case the process() method will
- ignore the action and only set the next_state. The next_state may be
- set to None in which case the current state will be unchanged. '''
-
- if next_state is None:
- next_state = state
- for input_symbol in list_input_symbols:
- self.add_transition (input_symbol, state, action, next_state)
-
- def add_transition_any (self, state, action=None, next_state=None):
-
- '''This adds a transition that associates:
-
- (current_state) --> (action, next_state)
-
- That is, any input symbol will match the current state.
- The process() method checks the "any" state associations after it first
- checks for an exact match of (input_symbol, current_state).
-
- The action may be set to None in which case the process() method will
- ignore the action and only set the next_state. The next_state may be
- set to None in which case the current state will be unchanged. '''
-
- if next_state is None:
- next_state = state
- self.state_transitions_any [state] = (action, next_state)
-
- def set_default_transition (self, action, next_state):
-
- '''This sets the default transition. This defines an action and
- next_state if the FSM cannot find the input symbol and the current
- state in the transition list and if the FSM cannot find the
- current_state in the transition_any list. This is useful as a final
- fall-through state for catching errors and undefined states.
-
- The default transition can be removed by setting the attribute
- default_transition to None. '''
-
- self.default_transition = (action, next_state)
-
- def get_transition (self, input_symbol, state):
-
- '''This returns (action, next state) given an input_symbol and state.
- This does not modify the FSM state, so calling this method has no side
- effects. Normally you do not call this method directly. It is called by
- process().
-
- The sequence of steps to check for a defined transition goes from the
- most specific to the least specific.
-
- 1. Check state_transitions[] that match exactly the tuple,
- (input_symbol, state)
-
- 2. Check state_transitions_any[] that match (state)
- In other words, match a specific state and ANY input_symbol.
-
- 3. Check if the default_transition is defined.
- This catches any input_symbol and any state.
- This is a handler for errors, undefined states, or defaults.
-
- 4. No transition was defined. If we get here then raise an exception.
- '''
-
- if (input_symbol, state) in self.state_transitions:
- return self.state_transitions[(input_symbol, state)]
- elif state in self.state_transitions_any:
- return self.state_transitions_any[state]
- elif self.default_transition is not None:
- return self.default_transition
- else:
- raise ExceptionFSM ('Transition is undefined: (%s, %s).' %
- (str(input_symbol), str(state)) )
-
- def process (self, input_symbol):
-
- '''This is the main method that you call to process input. This may
- cause the FSM to change state and call an action. This method calls
- get_transition() to find the action and next_state associated with the
- input_symbol and current_state. If the action is None then the action
- is not called and only the current state is changed. This method
- processes one complete input symbol. You can process a list of symbols
- (or a string) by calling process_list(). '''
-
- self.input_symbol = input_symbol
- (self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state)
- if self.action is not None:
- self.action (self)
- self.current_state = self.next_state
- self.next_state = None
-
- def process_list (self, input_symbols):
-
- '''This takes a list and sends each element to process(). The list may
- be a string or any iterable object. '''
-
- for s in input_symbols:
- self.process (s)
-
-##############################################################################
-# The following is an example that demonstrates the use of the FSM class to
-# process an RPN expression. Run this module from the command line. You will
-# get a prompt > for input. Enter an RPN Expression. Numbers may be integers.
-# Operators are * / + - Use the = sign to evaluate and print the expression.
-# For example:
-#
-# 167 3 2 2 * * * 1 - =
-#
-# will print:
-#
-# 2003
-##############################################################################
-
-import sys
-import string
-
-PY3 = (sys.version_info[0] >= 3)
-
-#
-# These define the actions.
-# Note that "memory" is a list being used as a stack.
-#
-
-def BeginBuildNumber (fsm):
- fsm.memory.append (fsm.input_symbol)
-
-def BuildNumber (fsm):
- s = fsm.memory.pop ()
- s = s + fsm.input_symbol
- fsm.memory.append (s)
-
-def EndBuildNumber (fsm):
- s = fsm.memory.pop ()
- fsm.memory.append (int(s))
-
-def DoOperator (fsm):
- ar = fsm.memory.pop()
- al = fsm.memory.pop()
- if fsm.input_symbol == '+':
- fsm.memory.append (al + ar)
- elif fsm.input_symbol == '-':
- fsm.memory.append (al - ar)
- elif fsm.input_symbol == '*':
- fsm.memory.append (al * ar)
- elif fsm.input_symbol == '/':
- fsm.memory.append (al / ar)
-
-def DoEqual (fsm):
- print(str(fsm.memory.pop()))
-
-def Error (fsm):
- print('That does not compute.')
- print(str(fsm.input_symbol))
-
-def main():
-
- '''This is where the example starts and the FSM state transitions are
- defined. Note that states are strings (such as 'INIT'). This is not
- necessary, but it makes the example easier to read. '''
-
- f = FSM ('INIT', [])
- f.set_default_transition (Error, 'INIT')
- f.add_transition_any ('INIT', None, 'INIT')
- f.add_transition ('=', 'INIT', DoEqual, 'INIT')
- f.add_transition_list (string.digits, 'INIT', BeginBuildNumber, 'BUILDING_NUMBER')
- f.add_transition_list (string.digits, 'BUILDING_NUMBER', BuildNumber, 'BUILDING_NUMBER')
- f.add_transition_list (string.whitespace, 'BUILDING_NUMBER', EndBuildNumber, 'INIT')
- f.add_transition_list ('+-*/', 'INIT', DoOperator, 'INIT')
-
- print()
- print('Enter an RPN Expression.')
- print('Numbers may be integers. Operators are * / + -')
- print('Use the = sign to evaluate and print the expression.')
- print('For example: ')
- print(' 167 3 2 2 * * * 1 - =')
- inputstr = (input if PY3 else raw_input)('> ') # analysis:ignore
- f.process_list(inputstr)
-
-
-if __name__ == '__main__':
- main()
+#!/usr/bin/env python
+
+'''This module implements a Finite State Machine (FSM). In addition to state
+this FSM also maintains a user defined "memory". So this FSM can be used as a
+Push-down Automata (PDA) since a PDA is a FSM + memory.
+
+The following describes how the FSM works, but you will probably also need to
+see the example function to understand how the FSM is used in practice.
+
+You define an FSM by building tables of transitions. For a given input symbol
+the process() method uses these tables to decide what action to call and what
+the next state will be. The FSM has a table of transitions that associate:
+
+ (input_symbol, current_state) --> (action, next_state)
+
+Where "action" is a function you define. The symbols and states can be any
+objects. You use the add_transition() and add_transition_list() methods to add
+to the transition table. The FSM also has a table of transitions that
+associate:
+
+ (current_state) --> (action, next_state)
+
+You use the add_transition_any() method to add to this transition table. The
+FSM also has one default transition that is not associated with any specific
+input_symbol or state. You use the set_default_transition() method to set the
+default transition.
+
+When an action function is called it is passed a reference to the FSM. The
+action function may then access attributes of the FSM such as input_symbol,
+current_state, or "memory". The "memory" attribute can be any object that you
+want to pass along to the action functions. It is not used by the FSM itself.
+For parsing you would typically pass a list to be used as a stack.
+
+The processing sequence is as follows. The process() method is given an
+input_symbol to process. The FSM will search the table of transitions that
+associate:
+
+ (input_symbol, current_state) --> (action, next_state)
+
+If the pair (input_symbol, current_state) is found then process() will call the
+associated action function and then set the current state to the next_state.
+
+If the FSM cannot find a match for (input_symbol, current_state) it will then
+search the table of transitions that associate:
+
+ (current_state) --> (action, next_state)
+
+If the current_state is found then the process() method will call the
+associated action function and then set the current state to the next_state.
+Notice that this table lacks an input_symbol. It lets you define transitions
+for a current_state and ANY input_symbol. Hence, it is called the "any" table.
+Remember, it is always checked after first searching the table for a specific
+(input_symbol, current_state).
+
+For the case where the FSM did not match either of the previous two cases the
+FSM will try to use the default transition. If the default transition is
+defined then the process() method will call the associated action function and
+then set the current state to the next_state. This lets you define a default
+transition as a catch-all case. You can think of it as an exception handler.
+There can be only one default transition.
+
+Finally, if none of the previous cases are defined for an input_symbol and
+current_state then the FSM will raise an exception. This may be desirable, but
+you can always prevent this just by defining a default transition.
+
+Noah Spurrier 20020822
+
+PEXPECT LICENSE
+
+ This license is approved by the OSI and FSF as GPL-compatible.
+ http://opensource.org/licenses/isc-license.txt
+
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''
+
+class ExceptionFSM(Exception):
+
+ '''This is the FSM Exception class.'''
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return 'ExceptionFSM: ' + str(self.value)
+
+class FSM:
+
+ '''This is a Finite State Machine (FSM).
+ '''
+
+ def __init__(self, initial_state, memory=None):
+
+ '''This creates the FSM. You set the initial state here. The "memory"
+ attribute is any object that you want to pass along to the action
+ functions. It is not used by the FSM. For parsing you would typically
+ pass a list to be used as a stack. '''
+
+ # Map (input_symbol, current_state) --> (action, next_state).
+ self.state_transitions = {}
+ # Map (current_state) --> (action, next_state).
+ self.state_transitions_any = {}
+ self.default_transition = None
+
+ self.input_symbol = None
+ self.initial_state = initial_state
+ self.current_state = self.initial_state
+ self.next_state = None
+ self.action = None
+ self.memory = memory
+
+ def reset (self):
+
+ '''This sets the current_state to the initial_state and sets
+ input_symbol to None. The initial state was set by the constructor
+ __init__(). '''
+
+ self.current_state = self.initial_state
+ self.input_symbol = None
+
+ def add_transition (self, input_symbol, state, action=None, next_state=None):
+
+ '''This adds a transition that associates:
+
+ (input_symbol, current_state) --> (action, next_state)
+
+ The action may be set to None in which case the process() method will
+ ignore the action and only set the next_state. The next_state may be
+ set to None in which case the current state will be unchanged.
+
+ You can also set transitions for a list of symbols by using
+ add_transition_list(). '''
+
+ if next_state is None:
+ next_state = state
+ self.state_transitions[(input_symbol, state)] = (action, next_state)
+
+ def add_transition_list (self, list_input_symbols, state, action=None, next_state=None):
+
+ '''This adds the same transition for a list of input symbols.
+ You can pass a list or a string. Note that it is handy to use
+ string.digits, string.whitespace, string.letters, etc. to add
+ transitions that match character classes.
+
+ The action may be set to None in which case the process() method will
+ ignore the action and only set the next_state. The next_state may be
+ set to None in which case the current state will be unchanged. '''
+
+ if next_state is None:
+ next_state = state
+ for input_symbol in list_input_symbols:
+ self.add_transition (input_symbol, state, action, next_state)
+
+ def add_transition_any (self, state, action=None, next_state=None):
+
+ '''This adds a transition that associates:
+
+ (current_state) --> (action, next_state)
+
+ That is, any input symbol will match the current state.
+ The process() method checks the "any" state associations after it first
+ checks for an exact match of (input_symbol, current_state).
+
+ The action may be set to None in which case the process() method will
+ ignore the action and only set the next_state. The next_state may be
+ set to None in which case the current state will be unchanged. '''
+
+ if next_state is None:
+ next_state = state
+ self.state_transitions_any [state] = (action, next_state)
+
+ def set_default_transition (self, action, next_state):
+
+ '''This sets the default transition. This defines an action and
+ next_state if the FSM cannot find the input symbol and the current
+ state in the transition list and if the FSM cannot find the
+ current_state in the transition_any list. This is useful as a final
+ fall-through state for catching errors and undefined states.
+
+ The default transition can be removed by setting the attribute
+ default_transition to None. '''
+
+ self.default_transition = (action, next_state)
+
+ def get_transition (self, input_symbol, state):
+
+ '''This returns (action, next state) given an input_symbol and state.
+ This does not modify the FSM state, so calling this method has no side
+ effects. Normally you do not call this method directly. It is called by
+ process().
+
+ The sequence of steps to check for a defined transition goes from the
+ most specific to the least specific.
+
+ 1. Check state_transitions[] that match exactly the tuple,
+ (input_symbol, state)
+
+ 2. Check state_transitions_any[] that match (state)
+ In other words, match a specific state and ANY input_symbol.
+
+ 3. Check if the default_transition is defined.
+ This catches any input_symbol and any state.
+ This is a handler for errors, undefined states, or defaults.
+
+ 4. No transition was defined. If we get here then raise an exception.
+ '''
+
+ if (input_symbol, state) in self.state_transitions:
+ return self.state_transitions[(input_symbol, state)]
+ elif state in self.state_transitions_any:
+ return self.state_transitions_any[state]
+ elif self.default_transition is not None:
+ return self.default_transition
+ else:
+ raise ExceptionFSM ('Transition is undefined: (%s, %s).' %
+ (str(input_symbol), str(state)) )
+
+ def process (self, input_symbol):
+
+ '''This is the main method that you call to process input. This may
+ cause the FSM to change state and call an action. This method calls
+ get_transition() to find the action and next_state associated with the
+ input_symbol and current_state. If the action is None then the action
+ is not called and only the current state is changed. This method
+ processes one complete input symbol. You can process a list of symbols
+ (or a string) by calling process_list(). '''
+
+ self.input_symbol = input_symbol
+ (self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state)
+ if self.action is not None:
+ self.action (self)
+ self.current_state = self.next_state
+ self.next_state = None
+
+ def process_list (self, input_symbols):
+
+ '''This takes a list and sends each element to process(). The list may
+ be a string or any iterable object. '''
+
+ for s in input_symbols:
+ self.process (s)
+
+##############################################################################
+# The following is an example that demonstrates the use of the FSM class to
+# process an RPN expression. Run this module from the command line. You will
+# get a prompt > for input. Enter an RPN Expression. Numbers may be integers.
+# Operators are * / + - Use the = sign to evaluate and print the expression.
+# For example:
+#
+# 167 3 2 2 * * * 1 - =
+#
+# will print:
+#
+# 2003
+##############################################################################
+
+import sys
+import string
+
+PY3 = (sys.version_info[0] >= 3)
+
+#
+# These define the actions.
+# Note that "memory" is a list being used as a stack.
+#
+
+def BeginBuildNumber (fsm):
+ fsm.memory.append (fsm.input_symbol)
+
+def BuildNumber (fsm):
+ s = fsm.memory.pop ()
+ s = s + fsm.input_symbol
+ fsm.memory.append (s)
+
+def EndBuildNumber (fsm):
+ s = fsm.memory.pop ()
+ fsm.memory.append (int(s))
+
+def DoOperator (fsm):
+ ar = fsm.memory.pop()
+ al = fsm.memory.pop()
+ if fsm.input_symbol == '+':
+ fsm.memory.append (al + ar)
+ elif fsm.input_symbol == '-':
+ fsm.memory.append (al - ar)
+ elif fsm.input_symbol == '*':
+ fsm.memory.append (al * ar)
+ elif fsm.input_symbol == '/':
+ fsm.memory.append (al / ar)
+
+def DoEqual (fsm):
+ print(str(fsm.memory.pop()))
+
+def Error (fsm):
+ print('That does not compute.')
+ print(str(fsm.input_symbol))
+
+def main():
+
+ '''This is where the example starts and the FSM state transitions are
+ defined. Note that states are strings (such as 'INIT'). This is not
+ necessary, but it makes the example easier to read. '''
+
+ f = FSM ('INIT', [])
+ f.set_default_transition (Error, 'INIT')
+ f.add_transition_any ('INIT', None, 'INIT')
+ f.add_transition ('=', 'INIT', DoEqual, 'INIT')
+ f.add_transition_list (string.digits, 'INIT', BeginBuildNumber, 'BUILDING_NUMBER')
+ f.add_transition_list (string.digits, 'BUILDING_NUMBER', BuildNumber, 'BUILDING_NUMBER')
+ f.add_transition_list (string.whitespace, 'BUILDING_NUMBER', EndBuildNumber, 'INIT')
+ f.add_transition_list ('+-*/', 'INIT', DoOperator, 'INIT')
+
+ print()
+ print('Enter an RPN Expression.')
+ print('Numbers may be integers. Operators are * / + -')
+ print('Use the = sign to evaluate and print the expression.')
+ print('For example: ')
+ print(' 167 3 2 2 * * * 1 - =')
+ inputstr = (input if PY3 else raw_input)('> ') # analysis:ignore
+ f.process_list(inputstr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/contrib/python/pexpect/pexpect/__init__.py b/contrib/python/pexpect/pexpect/__init__.py
index 0e5f215e7c..7e30453787 100644
--- a/contrib/python/pexpect/pexpect/__init__.py
+++ b/contrib/python/pexpect/pexpect/__init__.py
@@ -1,85 +1,85 @@
-'''Pexpect is a Python module for spawning child applications and controlling
-them automatically. Pexpect can be used for automating interactive applications
-such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
-scripts for duplicating software package installations on different servers. It
-can be used for automated software testing. Pexpect is in the spirit of Don
-Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
-require TCL and Expect or require C extensions to be compiled. Pexpect does not
-use C, Expect, or TCL extensions. It should work on any platform that supports
-the standard Python pty module. The Pexpect interface focuses on ease of use so
-that simple tasks are easy.
-
-There are two main interfaces to the Pexpect system; these are the function,
-run() and the class, spawn. The spawn class is more powerful. The run()
-function is simpler than spawn, and is good for quickly calling program. When
-you call the run() function it executes a given program and then returns the
-output. This is a handy replacement for os.system().
-
-For example::
-
- pexpect.run('ls -la')
-
-The spawn class is the more powerful interface to the Pexpect system. You can
-use this to spawn a child program then interact with it by sending input and
-expecting responses (waiting for patterns in the child's output).
-
-For example::
-
- child = pexpect.spawn('scp foo user@example.com:.')
- child.expect('Password:')
- child.sendline(mypassword)
-
-This works even for commands that ask for passwords or other input outside of
-the normal stdio streams. For example, ssh reads input directly from the TTY
-device which bypasses stdin.
-
-Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
-Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
-vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
-Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
-Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
-Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
-Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone.
-
-Pexpect is free, open source, and all that good stuff.
-http://pexpect.sourceforge.net/
-
-PEXPECT LICENSE
-
- This license is approved by the OSI and FSF as GPL-compatible.
- http://opensource.org/licenses/isc-license.txt
-
- Copyright (c) 2012, Noah Spurrier <noah@noah.org>
- PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
- PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
- COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-'''
-
-import sys
-PY3 = (sys.version_info[0] >= 3)
-
-from .exceptions import ExceptionPexpect, EOF, TIMEOUT
-from .utils import split_command_line, which, is_executable_file
-from .expect import Expecter, searcher_re, searcher_string
-
-if sys.platform != 'win32':
- # On Unix, these are available at the top level for backwards compatibility
- from .pty_spawn import spawn, spawnu
- from .run import run, runu
-
+'''Pexpect is a Python module for spawning child applications and controlling
+them automatically. Pexpect can be used for automating interactive applications
+such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
+scripts for duplicating software package installations on different servers. It
+can be used for automated software testing. Pexpect is in the spirit of Don
+Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
+require TCL and Expect or require C extensions to be compiled. Pexpect does not
+use C, Expect, or TCL extensions. It should work on any platform that supports
+the standard Python pty module. The Pexpect interface focuses on ease of use so
+that simple tasks are easy.
+
+There are two main interfaces to the Pexpect system; these are the function,
+run() and the class, spawn. The spawn class is more powerful. The run()
+function is simpler than spawn, and is good for quickly calling program. When
+you call the run() function it executes a given program and then returns the
+output. This is a handy replacement for os.system().
+
+For example::
+
+ pexpect.run('ls -la')
+
+The spawn class is the more powerful interface to the Pexpect system. You can
+use this to spawn a child program then interact with it by sending input and
+expecting responses (waiting for patterns in the child's output).
+
+For example::
+
+ child = pexpect.spawn('scp foo user@example.com:.')
+ child.expect('Password:')
+ child.sendline(mypassword)
+
+This works even for commands that ask for passwords or other input outside of
+the normal stdio streams. For example, ssh reads input directly from the TTY
+device which bypasses stdin.
+
+Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
+Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
+vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
+Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
+Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
+Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
+Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone.
+
+Pexpect is free, open source, and all that good stuff.
+http://pexpect.sourceforge.net/
+
+PEXPECT LICENSE
+
+ This license is approved by the OSI and FSF as GPL-compatible.
+ http://opensource.org/licenses/isc-license.txt
+
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''
+
+import sys
+PY3 = (sys.version_info[0] >= 3)
+
+from .exceptions import ExceptionPexpect, EOF, TIMEOUT
+from .utils import split_command_line, which, is_executable_file
+from .expect import Expecter, searcher_re, searcher_string
+
+if sys.platform != 'win32':
+ # On Unix, these are available at the top level for backwards compatibility
+ from .pty_spawn import spawn, spawnu
+ from .run import run, runu
+
__version__ = '4.8.0'
-__revision__ = ''
-__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnu', 'run', 'runu',
- 'which', 'split_command_line', '__version__', '__revision__']
-
-
-
-# vim: set shiftround expandtab tabstop=4 shiftwidth=4 ft=python autoindent :
+__revision__ = ''
+__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnu', 'run', 'runu',
+ 'which', 'split_command_line', '__version__', '__revision__']
+
+
+
+# vim: set shiftround expandtab tabstop=4 shiftwidth=4 ft=python autoindent :
diff --git a/contrib/python/pexpect/pexpect/_async.py b/contrib/python/pexpect/pexpect/_async.py
index 4260ad6107..dfbfeef5fb 100644
--- a/contrib/python/pexpect/pexpect/_async.py
+++ b/contrib/python/pexpect/pexpect/_async.py
@@ -1,16 +1,16 @@
-import asyncio
-import errno
+import asyncio
+import errno
import signal
-
-from pexpect import EOF
-
-@asyncio.coroutine
-def expect_async(expecter, timeout=None):
- # First process data that was previously read - if it maches, we don't need
- # async stuff.
+
+from pexpect import EOF
+
+@asyncio.coroutine
+def expect_async(expecter, timeout=None):
+ # First process data that was previously read - if it maches, we don't need
+ # async stuff.
idx = expecter.existing_data()
- if idx is not None:
- return idx
+ if idx is not None:
+ return idx
if not expecter.spawn.async_pw_transport:
pw = PatternWaiter()
pw.set_expecter(expecter)
@@ -21,12 +21,12 @@ def expect_async(expecter, timeout=None):
pw, transport = expecter.spawn.async_pw_transport
pw.set_expecter(expecter)
transport.resume_reading()
- try:
- return (yield from asyncio.wait_for(pw.fut, timeout))
- except asyncio.TimeoutError as e:
- transport.pause_reading()
- return expecter.timeout(e)
-
+ try:
+ return (yield from asyncio.wait_for(pw.fut, timeout))
+ except asyncio.TimeoutError as e:
+ transport.pause_reading()
+ return expecter.timeout(e)
+
@asyncio.coroutine
def repl_run_command_async(repl, cmdlines, timeout=-1):
res = []
@@ -45,59 +45,59 @@ def repl_run_command_async(repl, cmdlines, timeout=-1):
raise ValueError("Continuation prompt found - input was incomplete:")
return u''.join(res + [repl.child.before])
-class PatternWaiter(asyncio.Protocol):
+class PatternWaiter(asyncio.Protocol):
transport = None
def set_expecter(self, expecter):
- self.expecter = expecter
- self.fut = asyncio.Future()
+ self.expecter = expecter
+ self.fut = asyncio.Future()
- def found(self, result):
- if not self.fut.done():
- self.fut.set_result(result)
+ def found(self, result):
+ if not self.fut.done():
+ self.fut.set_result(result)
self.transport.pause_reading()
- def error(self, exc):
- if not self.fut.done():
- self.fut.set_exception(exc)
+ def error(self, exc):
+ if not self.fut.done():
+ self.fut.set_exception(exc)
self.transport.pause_reading()
def connection_made(self, transport):
self.transport = transport
- def data_received(self, data):
- spawn = self.expecter.spawn
- s = spawn._decoder.decode(data)
- spawn._log(s, 'read')
-
- if self.fut.done():
+ def data_received(self, data):
+ spawn = self.expecter.spawn
+ s = spawn._decoder.decode(data)
+ spawn._log(s, 'read')
+
+ if self.fut.done():
spawn._before.write(s)
spawn._buffer.write(s)
- return
-
- try:
+ return
+
+ try:
index = self.expecter.new_data(s)
- if index is not None:
- # Found a match
- self.found(index)
- except Exception as e:
- self.expecter.errored()
- self.error(e)
+ if index is not None:
+ # Found a match
+ self.found(index)
+ except Exception as e:
+ self.expecter.errored()
+ self.error(e)
- def eof_received(self):
- # N.B. If this gets called, async will close the pipe (the spawn object)
- # for us
- try:
- self.expecter.spawn.flag_eof = True
- index = self.expecter.eof()
- except EOF as e:
- self.error(e)
- else:
- self.found(index)
+ def eof_received(self):
+ # N.B. If this gets called, async will close the pipe (the spawn object)
+ # for us
+ try:
+ self.expecter.spawn.flag_eof = True
+ index = self.expecter.eof()
+ except EOF as e:
+ self.error(e)
+ else:
+ self.found(index)
- def connection_lost(self, exc):
- if isinstance(exc, OSError) and exc.errno == errno.EIO:
- # We may get here without eof_received being called, e.g on Linux
- self.eof_received()
- elif exc is not None:
- self.error(exc)
+ def connection_lost(self, exc):
+ if isinstance(exc, OSError) and exc.errno == errno.EIO:
+ # We may get here without eof_received being called, e.g on Linux
+ self.eof_received()
+ elif exc is not None:
+ self.error(exc)
diff --git a/contrib/python/pexpect/pexpect/exceptions.py b/contrib/python/pexpect/pexpect/exceptions.py
index f1c10df2e1..cb360f0261 100644
--- a/contrib/python/pexpect/pexpect/exceptions.py
+++ b/contrib/python/pexpect/pexpect/exceptions.py
@@ -1,35 +1,35 @@
-"""Exception classes used by Pexpect"""
-
-import traceback
-import sys
-
-class ExceptionPexpect(Exception):
- '''Base class for all exceptions raised by this module.
- '''
-
- def __init__(self, value):
- super(ExceptionPexpect, self).__init__(value)
- self.value = value
-
- def __str__(self):
- return str(self.value)
-
- def get_trace(self):
- '''This returns an abbreviated stack trace with lines that only concern
- the caller. In other words, the stack trace inside the Pexpect module
- is not included. '''
-
- tblist = traceback.extract_tb(sys.exc_info()[2])
- tblist = [item for item in tblist if ('pexpect/__init__' not in item[0])
- and ('pexpect/expect' not in item[0])]
- tblist = traceback.format_list(tblist)
- return ''.join(tblist)
-
-
-class EOF(ExceptionPexpect):
- '''Raised when EOF is read from a child.
- This usually means the child has exited.'''
-
-
-class TIMEOUT(ExceptionPexpect):
- '''Raised when a read time exceeds the timeout. '''
+"""Exception classes used by Pexpect"""
+
+import traceback
+import sys
+
+class ExceptionPexpect(Exception):
+ '''Base class for all exceptions raised by this module.
+ '''
+
+ def __init__(self, value):
+ super(ExceptionPexpect, self).__init__(value)
+ self.value = value
+
+ def __str__(self):
+ return str(self.value)
+
+ def get_trace(self):
+ '''This returns an abbreviated stack trace with lines that only concern
+ the caller. In other words, the stack trace inside the Pexpect module
+ is not included. '''
+
+ tblist = traceback.extract_tb(sys.exc_info()[2])
+ tblist = [item for item in tblist if ('pexpect/__init__' not in item[0])
+ and ('pexpect/expect' not in item[0])]
+ tblist = traceback.format_list(tblist)
+ return ''.join(tblist)
+
+
+class EOF(ExceptionPexpect):
+ '''Raised when EOF is read from a child.
+ This usually means the child has exited.'''
+
+
+class TIMEOUT(ExceptionPexpect):
+ '''Raised when a read time exceeds the timeout. '''
diff --git a/contrib/python/pexpect/pexpect/expect.py b/contrib/python/pexpect/pexpect/expect.py
index 00ad4f0748..d3409db9d7 100644
--- a/contrib/python/pexpect/pexpect/expect.py
+++ b/contrib/python/pexpect/pexpect/expect.py
@@ -1,27 +1,27 @@
-import time
-
-from .exceptions import EOF, TIMEOUT
-
-class Expecter(object):
- def __init__(self, spawn, searcher, searchwindowsize=-1):
- self.spawn = spawn
- self.searcher = searcher
+import time
+
+from .exceptions import EOF, TIMEOUT
+
+class Expecter(object):
+ def __init__(self, spawn, searcher, searchwindowsize=-1):
+ self.spawn = spawn
+ self.searcher = searcher
# A value of -1 means to use the figure from spawn, which should
# be None or a positive number.
- if searchwindowsize == -1:
- searchwindowsize = spawn.searchwindowsize
- self.searchwindowsize = searchwindowsize
+ if searchwindowsize == -1:
+ searchwindowsize = spawn.searchwindowsize
+ self.searchwindowsize = searchwindowsize
self.lookback = None
if hasattr(searcher, 'longest_string'):
self.lookback = searcher.longest_string
def do_search(self, window, freshlen):
- spawn = self.spawn
- searcher = self.searcher
+ spawn = self.spawn
+ searcher = self.searcher
if freshlen > len(window):
freshlen = len(window)
index = searcher.search(window, freshlen, self.searchwindowsize)
- if index >= 0:
+ if index >= 0:
spawn._buffer = spawn.buffer_type()
spawn._buffer.write(window[searcher.end:])
spawn.before = spawn._before.getvalue()[
@@ -29,10 +29,10 @@ class Expecter(object):
spawn._before = spawn.buffer_type()
spawn._before.write(window[searcher.end:])
spawn.after = window[searcher.start:searcher.end]
- spawn.match = searcher.match
- spawn.match_index = index
- # Found a match
- return index
+ spawn.match = searcher.match
+ spawn.match_index = index
+ # Found a match
+ return index
elif self.searchwindowsize or self.lookback:
maintain = self.searchwindowsize or self.lookback
if spawn._buffer.tell() > maintain:
@@ -97,275 +97,275 @@ class Expecter(object):
window = spawn._buffer.read()
return self.do_search(window, freshlen)
- def eof(self, err=None):
- spawn = self.spawn
-
+ def eof(self, err=None):
+ spawn = self.spawn
+
spawn.before = spawn._before.getvalue()
spawn._buffer = spawn.buffer_type()
spawn._before = spawn.buffer_type()
- spawn.after = EOF
- index = self.searcher.eof_index
- if index >= 0:
- spawn.match = EOF
- spawn.match_index = index
- return index
- else:
- spawn.match = None
- spawn.match_index = None
- msg = str(spawn)
+ spawn.after = EOF
+ index = self.searcher.eof_index
+ if index >= 0:
+ spawn.match = EOF
+ spawn.match_index = index
+ return index
+ else:
+ spawn.match = None
+ spawn.match_index = None
+ msg = str(spawn)
msg += '\nsearcher: %s' % self.searcher
- if err is not None:
- msg = str(err) + '\n' + msg
+ if err is not None:
+ msg = str(err) + '\n' + msg
exc = EOF(msg)
exc.__cause__ = None # in Python 3.x we can use "raise exc from None"
raise exc
- def timeout(self, err=None):
- spawn = self.spawn
-
+ def timeout(self, err=None):
+ spawn = self.spawn
+
spawn.before = spawn._before.getvalue()
- spawn.after = TIMEOUT
- index = self.searcher.timeout_index
- if index >= 0:
- spawn.match = TIMEOUT
- spawn.match_index = index
- return index
- else:
- spawn.match = None
- spawn.match_index = None
- msg = str(spawn)
+ spawn.after = TIMEOUT
+ index = self.searcher.timeout_index
+ if index >= 0:
+ spawn.match = TIMEOUT
+ spawn.match_index = index
+ return index
+ else:
+ spawn.match = None
+ spawn.match_index = None
+ msg = str(spawn)
msg += '\nsearcher: %s' % self.searcher
- if err is not None:
- msg = str(err) + '\n' + msg
-
+ if err is not None:
+ msg = str(err) + '\n' + msg
+
exc = TIMEOUT(msg)
exc.__cause__ = None # in Python 3.x we can use "raise exc from None"
raise exc
- def errored(self):
- spawn = self.spawn
+ def errored(self):
+ spawn = self.spawn
spawn.before = spawn._before.getvalue()
- spawn.after = None
- spawn.match = None
- spawn.match_index = None
-
- def expect_loop(self, timeout=-1):
- """Blocking expect"""
- spawn = self.spawn
-
- if timeout is not None:
- end_time = time.time() + timeout
-
- try:
+ spawn.after = None
+ spawn.match = None
+ spawn.match_index = None
+
+ def expect_loop(self, timeout=-1):
+ """Blocking expect"""
+ spawn = self.spawn
+
+ if timeout is not None:
+ end_time = time.time() + timeout
+
+ try:
idx = self.existing_data()
if idx is not None:
return idx
- while True:
- # No match at this point
- if (timeout is not None) and (timeout < 0):
- return self.timeout()
- # Still have time left, so read more data
- incoming = spawn.read_nonblocking(spawn.maxread, timeout)
+ while True:
+ # No match at this point
+ if (timeout is not None) and (timeout < 0):
+ return self.timeout()
+ # Still have time left, so read more data
+ incoming = spawn.read_nonblocking(spawn.maxread, timeout)
if self.spawn.delayafterread is not None:
time.sleep(self.spawn.delayafterread)
idx = self.new_data(incoming)
# Keep reading until exception or return.
if idx is not None:
return idx
- if timeout is not None:
- timeout = end_time - time.time()
- except EOF as e:
- return self.eof(e)
- except TIMEOUT as e:
- return self.timeout(e)
- except:
- self.errored()
- raise
-
-
-class searcher_string(object):
- '''This is a plain string search helper for the spawn.expect_any() method.
- This helper class is for speed. For more powerful regex patterns
- see the helper class, searcher_re.
-
- Attributes:
-
- eof_index - index of EOF, or -1
- timeout_index - index of TIMEOUT, or -1
-
- After a successful match by the search() method the following attributes
- are available:
-
- start - index into the buffer, first byte of match
- end - index into the buffer, first byte after match
- match - the matching string itself
-
- '''
-
- def __init__(self, strings):
- '''This creates an instance of searcher_string. This argument 'strings'
- may be a list; a sequence of strings; or the EOF or TIMEOUT types. '''
-
- self.eof_index = -1
- self.timeout_index = -1
- self._strings = []
+ if timeout is not None:
+ timeout = end_time - time.time()
+ except EOF as e:
+ return self.eof(e)
+ except TIMEOUT as e:
+ return self.timeout(e)
+ except:
+ self.errored()
+ raise
+
+
+class searcher_string(object):
+ '''This is a plain string search helper for the spawn.expect_any() method.
+ This helper class is for speed. For more powerful regex patterns
+ see the helper class, searcher_re.
+
+ Attributes:
+
+ eof_index - index of EOF, or -1
+ timeout_index - index of TIMEOUT, or -1
+
+ After a successful match by the search() method the following attributes
+ are available:
+
+ start - index into the buffer, first byte of match
+ end - index into the buffer, first byte after match
+ match - the matching string itself
+
+ '''
+
+ def __init__(self, strings):
+ '''This creates an instance of searcher_string. This argument 'strings'
+ may be a list; a sequence of strings; or the EOF or TIMEOUT types. '''
+
+ self.eof_index = -1
+ self.timeout_index = -1
+ self._strings = []
self.longest_string = 0
- for n, s in enumerate(strings):
- if s is EOF:
- self.eof_index = n
- continue
- if s is TIMEOUT:
- self.timeout_index = n
- continue
- self._strings.append((n, s))
+ for n, s in enumerate(strings):
+ if s is EOF:
+ self.eof_index = n
+ continue
+ if s is TIMEOUT:
+ self.timeout_index = n
+ continue
+ self._strings.append((n, s))
if len(s) > self.longest_string:
self.longest_string = len(s)
-
- def __str__(self):
- '''This returns a human-readable string that represents the state of
- the object.'''
-
+
+ def __str__(self):
+ '''This returns a human-readable string that represents the state of
+ the object.'''
+
ss = [(ns[0], ' %d: %r' % ns) for ns in self._strings]
- ss.append((-1, 'searcher_string:'))
- if self.eof_index >= 0:
- ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
- if self.timeout_index >= 0:
- ss.append((self.timeout_index,
- ' %d: TIMEOUT' % self.timeout_index))
- ss.sort()
- ss = list(zip(*ss))[1]
- return '\n'.join(ss)
-
- def search(self, buffer, freshlen, searchwindowsize=None):
+ ss.append((-1, 'searcher_string:'))
+ if self.eof_index >= 0:
+ ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
+ if self.timeout_index >= 0:
+ ss.append((self.timeout_index,
+ ' %d: TIMEOUT' % self.timeout_index))
+ ss.sort()
+ ss = list(zip(*ss))[1]
+ return '\n'.join(ss)
+
+ def search(self, buffer, freshlen, searchwindowsize=None):
'''This searches 'buffer' for the first occurrence of one of the search
- strings. 'freshlen' must indicate the number of bytes at the end of
- 'buffer' which have not been searched before. It helps to avoid
- searching the same, possibly big, buffer over and over again.
-
- See class spawn for the 'searchwindowsize' argument.
-
- If there is a match this returns the index of that string, and sets
- 'start', 'end' and 'match'. Otherwise, this returns -1. '''
-
- first_match = None
-
- # 'freshlen' helps a lot here. Further optimizations could
- # possibly include:
- #
- # using something like the Boyer-Moore Fast String Searching
- # Algorithm; pre-compiling the search through a list of
- # strings into something that can scan the input once to
- # search for all N strings; realize that if we search for
- # ['bar', 'baz'] and the input is '...foo' we need not bother
- # rescanning until we've read three more bytes.
- #
- # Sadly, I don't know enough about this interesting topic. /grahn
-
- for index, s in self._strings:
- if searchwindowsize is None:
- # the match, if any, can only be in the fresh data,
- # or at the very end of the old data
- offset = -(freshlen + len(s))
- else:
- # better obey searchwindowsize
- offset = -searchwindowsize
- n = buffer.find(s, offset)
- if n >= 0 and (first_match is None or n < first_match):
- first_match = n
- best_index, best_match = index, s
- if first_match is None:
- return -1
- self.match = best_match
- self.start = first_match
- self.end = self.start + len(self.match)
- return best_index
-
-
-class searcher_re(object):
- '''This is regular expression string search helper for the
- spawn.expect_any() method. This helper class is for powerful
- pattern matching. For speed, see the helper class, searcher_string.
-
- Attributes:
-
- eof_index - index of EOF, or -1
- timeout_index - index of TIMEOUT, or -1
-
- After a successful match by the search() method the following attributes
- are available:
-
- start - index into the buffer, first byte of match
- end - index into the buffer, first byte after match
+ strings. 'freshlen' must indicate the number of bytes at the end of
+ 'buffer' which have not been searched before. It helps to avoid
+ searching the same, possibly big, buffer over and over again.
+
+ See class spawn for the 'searchwindowsize' argument.
+
+ If there is a match this returns the index of that string, and sets
+ 'start', 'end' and 'match'. Otherwise, this returns -1. '''
+
+ first_match = None
+
+ # 'freshlen' helps a lot here. Further optimizations could
+ # possibly include:
+ #
+ # using something like the Boyer-Moore Fast String Searching
+ # Algorithm; pre-compiling the search through a list of
+ # strings into something that can scan the input once to
+ # search for all N strings; realize that if we search for
+ # ['bar', 'baz'] and the input is '...foo' we need not bother
+ # rescanning until we've read three more bytes.
+ #
+ # Sadly, I don't know enough about this interesting topic. /grahn
+
+ for index, s in self._strings:
+ if searchwindowsize is None:
+ # the match, if any, can only be in the fresh data,
+ # or at the very end of the old data
+ offset = -(freshlen + len(s))
+ else:
+ # better obey searchwindowsize
+ offset = -searchwindowsize
+ n = buffer.find(s, offset)
+ if n >= 0 and (first_match is None or n < first_match):
+ first_match = n
+ best_index, best_match = index, s
+ if first_match is None:
+ return -1
+ self.match = best_match
+ self.start = first_match
+ self.end = self.start + len(self.match)
+ return best_index
+
+
+class searcher_re(object):
+ '''This is regular expression string search helper for the
+ spawn.expect_any() method. This helper class is for powerful
+ pattern matching. For speed, see the helper class, searcher_string.
+
+ Attributes:
+
+ eof_index - index of EOF, or -1
+ timeout_index - index of TIMEOUT, or -1
+
+ After a successful match by the search() method the following attributes
+ are available:
+
+ start - index into the buffer, first byte of match
+ end - index into the buffer, first byte after match
match - the re.match object returned by a successful re.search
-
- '''
-
- def __init__(self, patterns):
- '''This creates an instance that searches for 'patterns' Where
- 'patterns' may be a list or other sequence of compiled regular
- expressions, or the EOF or TIMEOUT types.'''
-
- self.eof_index = -1
- self.timeout_index = -1
- self._searches = []
+
+ '''
+
+ def __init__(self, patterns):
+ '''This creates an instance that searches for 'patterns' Where
+ 'patterns' may be a list or other sequence of compiled regular
+ expressions, or the EOF or TIMEOUT types.'''
+
+ self.eof_index = -1
+ self.timeout_index = -1
+ self._searches = []
for n, s in enumerate(patterns):
- if s is EOF:
- self.eof_index = n
- continue
- if s is TIMEOUT:
- self.timeout_index = n
- continue
- self._searches.append((n, s))
-
- def __str__(self):
- '''This returns a human-readable string that represents the state of
- the object.'''
-
- #ss = [(n, ' %d: re.compile("%s")' %
- # (n, repr(s.pattern))) for n, s in self._searches]
- ss = list()
- for n, s in self._searches:
+ if s is EOF:
+ self.eof_index = n
+ continue
+ if s is TIMEOUT:
+ self.timeout_index = n
+ continue
+ self._searches.append((n, s))
+
+ def __str__(self):
+ '''This returns a human-readable string that represents the state of
+ the object.'''
+
+ #ss = [(n, ' %d: re.compile("%s")' %
+ # (n, repr(s.pattern))) for n, s in self._searches]
+ ss = list()
+ for n, s in self._searches:
ss.append((n, ' %d: re.compile(%r)' % (n, s.pattern)))
- ss.append((-1, 'searcher_re:'))
- if self.eof_index >= 0:
- ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
- if self.timeout_index >= 0:
- ss.append((self.timeout_index, ' %d: TIMEOUT' %
- self.timeout_index))
- ss.sort()
- ss = list(zip(*ss))[1]
- return '\n'.join(ss)
-
- def search(self, buffer, freshlen, searchwindowsize=None):
+ ss.append((-1, 'searcher_re:'))
+ if self.eof_index >= 0:
+ ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
+ if self.timeout_index >= 0:
+ ss.append((self.timeout_index, ' %d: TIMEOUT' %
+ self.timeout_index))
+ ss.sort()
+ ss = list(zip(*ss))[1]
+ return '\n'.join(ss)
+
+ def search(self, buffer, freshlen, searchwindowsize=None):
'''This searches 'buffer' for the first occurrence of one of the regular
- expressions. 'freshlen' must indicate the number of bytes at the end of
- 'buffer' which have not been searched before.
-
- See class spawn for the 'searchwindowsize' argument.
-
- If there is a match this returns the index of that string, and sets
- 'start', 'end' and 'match'. Otherwise, returns -1.'''
-
- first_match = None
- # 'freshlen' doesn't help here -- we cannot predict the
- # length of a match, and the re module provides no help.
- if searchwindowsize is None:
- searchstart = 0
- else:
- searchstart = max(0, len(buffer) - searchwindowsize)
- for index, s in self._searches:
- match = s.search(buffer, searchstart)
- if match is None:
- continue
- n = match.start()
- if first_match is None or n < first_match:
- first_match = n
- the_match = match
- best_index = index
- if first_match is None:
- return -1
- self.start = first_match
- self.match = the_match
- self.end = self.match.end()
+ expressions. 'freshlen' must indicate the number of bytes at the end of
+ 'buffer' which have not been searched before.
+
+ See class spawn for the 'searchwindowsize' argument.
+
+ If there is a match this returns the index of that string, and sets
+ 'start', 'end' and 'match'. Otherwise, returns -1.'''
+
+ first_match = None
+ # 'freshlen' doesn't help here -- we cannot predict the
+ # length of a match, and the re module provides no help.
+ if searchwindowsize is None:
+ searchstart = 0
+ else:
+ searchstart = max(0, len(buffer) - searchwindowsize)
+ for index, s in self._searches:
+ match = s.search(buffer, searchstart)
+ if match is None:
+ continue
+ n = match.start()
+ if first_match is None or n < first_match:
+ first_match = n
+ the_match = match
+ best_index = index
+ if first_match is None:
+ return -1
+ self.start = first_match
+ self.match = the_match
+ self.end = self.match.end()
return best_index
diff --git a/contrib/python/pexpect/pexpect/fdpexpect.py b/contrib/python/pexpect/pexpect/fdpexpect.py
index a3096537f6..cddd50e100 100644
--- a/contrib/python/pexpect/pexpect/fdpexpect.py
+++ b/contrib/python/pexpect/pexpect/fdpexpect.py
@@ -1,119 +1,119 @@
-'''This is like pexpect, but it will work with any file descriptor that you
+'''This is like pexpect, but it will work with any file descriptor that you
pass it. You are responsible for opening and close the file descriptor.
-This allows you to use Pexpect with sockets and named pipes (FIFOs).
-
-PEXPECT LICENSE
-
- This license is approved by the OSI and FSF as GPL-compatible.
- http://opensource.org/licenses/isc-license.txt
-
- Copyright (c) 2012, Noah Spurrier <noah@noah.org>
- PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
- PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
- COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-'''
-
-from .spawnbase import SpawnBase
+This allows you to use Pexpect with sockets and named pipes (FIFOs).
+
+PEXPECT LICENSE
+
+ This license is approved by the OSI and FSF as GPL-compatible.
+ http://opensource.org/licenses/isc-license.txt
+
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''
+
+from .spawnbase import SpawnBase
from .exceptions import ExceptionPexpect, TIMEOUT
from .utils import select_ignore_interrupts, poll_ignore_interrupts
-import os
-
-__all__ = ['fdspawn']
-
-class fdspawn(SpawnBase):
- '''This is like pexpect.spawn but allows you to supply your own open file
- descriptor. For example, you could use it to read through a file looking
- for patterns, or to control a modem or serial device. '''
-
- def __init__ (self, fd, args=None, timeout=30, maxread=2000, searchwindowsize=None,
+import os
+
+__all__ = ['fdspawn']
+
+class fdspawn(SpawnBase):
+ '''This is like pexpect.spawn but allows you to supply your own open file
+ descriptor. For example, you could use it to read through a file looking
+ for patterns, or to control a modem or serial device. '''
+
+ def __init__ (self, fd, args=None, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, encoding=None, codec_errors='strict', use_poll=False):
- '''This takes a file descriptor (an int) or an object that support the
- fileno() method (returning an int). All Python file-like objects
- support fileno(). '''
-
- if type(fd) != type(0) and hasattr(fd, 'fileno'):
- fd = fd.fileno()
-
- if type(fd) != type(0):
- raise ExceptionPexpect('The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn.')
-
- try: # make sure fd is a valid file descriptor
- os.fstat(fd)
- except OSError:
- raise ExceptionPexpect('The fd argument is not a valid file descriptor.')
-
- self.args = None
- self.command = None
- SpawnBase.__init__(self, timeout, maxread, searchwindowsize, logfile,
- encoding=encoding, codec_errors=codec_errors)
- self.child_fd = fd
- self.own_fd = False
- self.closed = False
- self.name = '<file descriptor %d>' % fd
+ '''This takes a file descriptor (an int) or an object that support the
+ fileno() method (returning an int). All Python file-like objects
+ support fileno(). '''
+
+ if type(fd) != type(0) and hasattr(fd, 'fileno'):
+ fd = fd.fileno()
+
+ if type(fd) != type(0):
+ raise ExceptionPexpect('The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn.')
+
+ try: # make sure fd is a valid file descriptor
+ os.fstat(fd)
+ except OSError:
+ raise ExceptionPexpect('The fd argument is not a valid file descriptor.')
+
+ self.args = None
+ self.command = None
+ SpawnBase.__init__(self, timeout, maxread, searchwindowsize, logfile,
+ encoding=encoding, codec_errors=codec_errors)
+ self.child_fd = fd
+ self.own_fd = False
+ self.closed = False
+ self.name = '<file descriptor %d>' % fd
self.use_poll = use_poll
-
- def close (self):
- """Close the file descriptor.
-
- Calling this method a second time does nothing, but if the file
- descriptor was closed elsewhere, :class:`OSError` will be raised.
- """
- if self.child_fd == -1:
- return
-
- self.flush()
- os.close(self.child_fd)
- self.child_fd = -1
- self.closed = True
-
- def isalive (self):
- '''This checks if the file descriptor is still valid. If :func:`os.fstat`
- does not raise an exception then we assume it is alive. '''
-
- if self.child_fd == -1:
- return False
- try:
- os.fstat(self.child_fd)
- return True
- except:
- return False
-
- def terminate (self, force=False): # pragma: no cover
- '''Deprecated and invalid. Just raises an exception.'''
- raise ExceptionPexpect('This method is not valid for file descriptors.')
-
- # These four methods are left around for backwards compatibility, but not
- # documented as part of fdpexpect. You're encouraged to use os.write
- # directly.
- def send(self, s):
- "Write to fd, return number of bytes written"
- s = self._coerce_send_string(s)
- self._log(s, 'send')
-
- b = self._encoder.encode(s, final=False)
- return os.write(self.child_fd, b)
-
- def sendline(self, s):
- "Write to fd with trailing newline, return number of bytes written"
- s = self._coerce_send_string(s)
- return self.send(s + self.linesep)
-
- def write(self, s):
- "Write to fd, return None"
- self.send(s)
-
- def writelines(self, sequence):
- "Call self.write() for each item in sequence"
- for s in sequence:
- self.write(s)
+
+ def close (self):
+ """Close the file descriptor.
+
+ Calling this method a second time does nothing, but if the file
+ descriptor was closed elsewhere, :class:`OSError` will be raised.
+ """
+ if self.child_fd == -1:
+ return
+
+ self.flush()
+ os.close(self.child_fd)
+ self.child_fd = -1
+ self.closed = True
+
+ def isalive (self):
+ '''This checks if the file descriptor is still valid. If :func:`os.fstat`
+ does not raise an exception then we assume it is alive. '''
+
+ if self.child_fd == -1:
+ return False
+ try:
+ os.fstat(self.child_fd)
+ return True
+ except:
+ return False
+
+ def terminate (self, force=False): # pragma: no cover
+ '''Deprecated and invalid. Just raises an exception.'''
+ raise ExceptionPexpect('This method is not valid for file descriptors.')
+
+ # These four methods are left around for backwards compatibility, but not
+ # documented as part of fdpexpect. You're encouraged to use os.write
+ # directly.
+ def send(self, s):
+ "Write to fd, return number of bytes written"
+ s = self._coerce_send_string(s)
+ self._log(s, 'send')
+
+ b = self._encoder.encode(s, final=False)
+ return os.write(self.child_fd, b)
+
+ def sendline(self, s):
+ "Write to fd with trailing newline, return number of bytes written"
+ s = self._coerce_send_string(s)
+ return self.send(s + self.linesep)
+
+ def write(self, s):
+ "Write to fd, return None"
+ self.send(s)
+
+ def writelines(self, sequence):
+ "Call self.write() for each item in sequence"
+ for s in sequence:
+ self.write(s)
def read_nonblocking(self, size=1, timeout=-1):
"""
diff --git a/contrib/python/pexpect/pexpect/popen_spawn.py b/contrib/python/pexpect/pexpect/popen_spawn.py
index 59cd85738a..4bb58cfe76 100644
--- a/contrib/python/pexpect/pexpect/popen_spawn.py
+++ b/contrib/python/pexpect/pexpect/popen_spawn.py
@@ -1,30 +1,30 @@
-"""Provides an interface like pexpect.spawn interface using subprocess.Popen
-"""
-import os
-import threading
-import subprocess
-import sys
-import time
-import signal
-import shlex
-
-try:
- from queue import Queue, Empty # Python 3
-except ImportError:
- from Queue import Queue, Empty # Python 2
-
-from .spawnbase import SpawnBase, PY3
-from .exceptions import EOF
+"""Provides an interface like pexpect.spawn interface using subprocess.Popen
+"""
+import os
+import threading
+import subprocess
+import sys
+import time
+import signal
+import shlex
+
+try:
+ from queue import Queue, Empty # Python 3
+except ImportError:
+ from Queue import Queue, Empty # Python 2
+
+from .spawnbase import SpawnBase, PY3
+from .exceptions import EOF
from .utils import string_types
-
-class PopenSpawn(SpawnBase):
- def __init__(self, cmd, timeout=30, maxread=2000, searchwindowsize=None,
+
+class PopenSpawn(SpawnBase):
+ def __init__(self, cmd, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, cwd=None, env=None, encoding=None,
codec_errors='strict', preexec_fn=None):
- super(PopenSpawn, self).__init__(timeout=timeout, maxread=maxread,
- searchwindowsize=searchwindowsize, logfile=logfile,
- encoding=encoding, codec_errors=codec_errors)
-
+ super(PopenSpawn, self).__init__(timeout=timeout, maxread=maxread,
+ searchwindowsize=searchwindowsize, logfile=logfile,
+ encoding=encoding, codec_errors=codec_errors)
+
# Note that `SpawnBase` initializes `self.crlf` to `\r\n`
# because the default behaviour for a PTY is to convert
# incoming LF to `\r\n` (see the `onlcr` flag and
@@ -37,152 +37,152 @@ class PopenSpawn(SpawnBase):
else:
self.crlf = self.string_type (os.linesep)
- kwargs = dict(bufsize=0, stdin=subprocess.PIPE,
- stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
+ kwargs = dict(bufsize=0, stdin=subprocess.PIPE,
+ stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
cwd=cwd, preexec_fn=preexec_fn, env=env)
-
- if sys.platform == 'win32':
- startupinfo = subprocess.STARTUPINFO()
- startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
- kwargs['startupinfo'] = startupinfo
- kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
-
+
+ if sys.platform == 'win32':
+ startupinfo = subprocess.STARTUPINFO()
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+ kwargs['startupinfo'] = startupinfo
+ kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
+
if isinstance(cmd, string_types) and sys.platform != 'win32':
cmd = shlex.split(cmd, posix=os.name == 'posix')
-
- self.proc = subprocess.Popen(cmd, **kwargs)
+
+ self.proc = subprocess.Popen(cmd, **kwargs)
self.pid = self.proc.pid
- self.closed = False
- self._buf = self.string_type()
-
- self._read_queue = Queue()
- self._read_thread = threading.Thread(target=self._read_incoming)
- self._read_thread.setDaemon(True)
- self._read_thread.start()
-
- _read_reached_eof = False
-
- def read_nonblocking(self, size, timeout):
- buf = self._buf
- if self._read_reached_eof:
- # We have already finished reading. Use up any buffered data,
- # then raise EOF
- if buf:
- self._buf = buf[size:]
- return buf[:size]
- else:
- self.flag_eof = True
- raise EOF('End Of File (EOF).')
-
- if timeout == -1:
- timeout = self.timeout
- elif timeout is None:
- timeout = 1e6
-
- t0 = time.time()
- while (time.time() - t0) < timeout and size and len(buf) < size:
- try:
- incoming = self._read_queue.get_nowait()
- except Empty:
- break
- else:
- if incoming is None:
- self._read_reached_eof = True
- break
-
- buf += self._decoder.decode(incoming, final=False)
-
- r, self._buf = buf[:size], buf[size:]
-
- self._log(r, 'read')
- return r
-
- def _read_incoming(self):
- """Run in a thread to move output from a pipe to a queue."""
- fileno = self.proc.stdout.fileno()
- while 1:
- buf = b''
- try:
- buf = os.read(fileno, 1024)
- except OSError as e:
- self._log(e, 'read')
-
- if not buf:
- # This indicates we have reached EOF
- self._read_queue.put(None)
- return
-
- self._read_queue.put(buf)
-
- def write(self, s):
- '''This is similar to send() except that there is no return value.
- '''
- self.send(s)
-
- def writelines(self, sequence):
- '''This calls write() for each element in the sequence.
-
- The sequence can be any iterable object producing strings, typically a
- list of strings. This does not add line separators. There is no return
- value.
- '''
- for s in sequence:
- self.send(s)
-
- def send(self, s):
- '''Send data to the subprocess' stdin.
-
- Returns the number of bytes written.
- '''
- s = self._coerce_send_string(s)
- self._log(s, 'send')
-
- b = self._encoder.encode(s, final=False)
- if PY3:
- return self.proc.stdin.write(b)
- else:
- # On Python 2, .write() returns None, so we return the length of
- # bytes written ourselves. This assumes they all got written.
- self.proc.stdin.write(b)
- return len(b)
-
- def sendline(self, s=''):
- '''Wraps send(), sending string ``s`` to child process, with os.linesep
- automatically appended. Returns number of bytes written. '''
-
- n = self.send(s)
- return n + self.send(self.linesep)
-
- def wait(self):
- '''Wait for the subprocess to finish.
-
- Returns the exit code.
- '''
- status = self.proc.wait()
- if status >= 0:
- self.exitstatus = status
- self.signalstatus = None
- else:
- self.exitstatus = None
- self.signalstatus = -status
- self.terminated = True
- return status
-
- def kill(self, sig):
- '''Sends a Unix signal to the subprocess.
-
- Use constants from the :mod:`signal` module to specify which signal.
- '''
- if sys.platform == 'win32':
- if sig in [signal.SIGINT, signal.CTRL_C_EVENT]:
- sig = signal.CTRL_C_EVENT
- elif sig in [signal.SIGBREAK, signal.CTRL_BREAK_EVENT]:
- sig = signal.CTRL_BREAK_EVENT
- else:
- sig = signal.SIGTERM
-
- os.kill(self.proc.pid, sig)
-
- def sendeof(self):
- '''Closes the stdin pipe from the writing end.'''
- self.proc.stdin.close()
+ self.closed = False
+ self._buf = self.string_type()
+
+ self._read_queue = Queue()
+ self._read_thread = threading.Thread(target=self._read_incoming)
+ self._read_thread.setDaemon(True)
+ self._read_thread.start()
+
+ _read_reached_eof = False
+
+ def read_nonblocking(self, size, timeout):
+ buf = self._buf
+ if self._read_reached_eof:
+ # We have already finished reading. Use up any buffered data,
+ # then raise EOF
+ if buf:
+ self._buf = buf[size:]
+ return buf[:size]
+ else:
+ self.flag_eof = True
+ raise EOF('End Of File (EOF).')
+
+ if timeout == -1:
+ timeout = self.timeout
+ elif timeout is None:
+ timeout = 1e6
+
+ t0 = time.time()
+ while (time.time() - t0) < timeout and size and len(buf) < size:
+ try:
+ incoming = self._read_queue.get_nowait()
+ except Empty:
+ break
+ else:
+ if incoming is None:
+ self._read_reached_eof = True
+ break
+
+ buf += self._decoder.decode(incoming, final=False)
+
+ r, self._buf = buf[:size], buf[size:]
+
+ self._log(r, 'read')
+ return r
+
+ def _read_incoming(self):
+ """Run in a thread to move output from a pipe to a queue."""
+ fileno = self.proc.stdout.fileno()
+ while 1:
+ buf = b''
+ try:
+ buf = os.read(fileno, 1024)
+ except OSError as e:
+ self._log(e, 'read')
+
+ if not buf:
+ # This indicates we have reached EOF
+ self._read_queue.put(None)
+ return
+
+ self._read_queue.put(buf)
+
+ def write(self, s):
+ '''This is similar to send() except that there is no return value.
+ '''
+ self.send(s)
+
+ def writelines(self, sequence):
+ '''This calls write() for each element in the sequence.
+
+ The sequence can be any iterable object producing strings, typically a
+ list of strings. This does not add line separators. There is no return
+ value.
+ '''
+ for s in sequence:
+ self.send(s)
+
+ def send(self, s):
+ '''Send data to the subprocess' stdin.
+
+ Returns the number of bytes written.
+ '''
+ s = self._coerce_send_string(s)
+ self._log(s, 'send')
+
+ b = self._encoder.encode(s, final=False)
+ if PY3:
+ return self.proc.stdin.write(b)
+ else:
+ # On Python 2, .write() returns None, so we return the length of
+ # bytes written ourselves. This assumes they all got written.
+ self.proc.stdin.write(b)
+ return len(b)
+
+ def sendline(self, s=''):
+ '''Wraps send(), sending string ``s`` to child process, with os.linesep
+ automatically appended. Returns number of bytes written. '''
+
+ n = self.send(s)
+ return n + self.send(self.linesep)
+
+ def wait(self):
+ '''Wait for the subprocess to finish.
+
+ Returns the exit code.
+ '''
+ status = self.proc.wait()
+ if status >= 0:
+ self.exitstatus = status
+ self.signalstatus = None
+ else:
+ self.exitstatus = None
+ self.signalstatus = -status
+ self.terminated = True
+ return status
+
+ def kill(self, sig):
+ '''Sends a Unix signal to the subprocess.
+
+ Use constants from the :mod:`signal` module to specify which signal.
+ '''
+ if sys.platform == 'win32':
+ if sig in [signal.SIGINT, signal.CTRL_C_EVENT]:
+ sig = signal.CTRL_C_EVENT
+ elif sig in [signal.SIGBREAK, signal.CTRL_BREAK_EVENT]:
+ sig = signal.CTRL_BREAK_EVENT
+ else:
+ sig = signal.SIGTERM
+
+ os.kill(self.proc.pid, sig)
+
+ def sendeof(self):
+ '''Closes the stdin pipe from the writing end.'''
+ self.proc.stdin.close()
diff --git a/contrib/python/pexpect/pexpect/pty_spawn.py b/contrib/python/pexpect/pexpect/pty_spawn.py
index 3c055cdda3..8e28ca7cd7 100644
--- a/contrib/python/pexpect/pexpect/pty_spawn.py
+++ b/contrib/python/pexpect/pexpect/pty_spawn.py
@@ -1,447 +1,447 @@
-import os
-import sys
-import time
-import pty
-import tty
-import errno
-import signal
-from contextlib import contextmanager
-
-import ptyprocess
-from ptyprocess.ptyprocess import use_native_pty_fork
-
-from .exceptions import ExceptionPexpect, EOF, TIMEOUT
-from .spawnbase import SpawnBase
+import os
+import sys
+import time
+import pty
+import tty
+import errno
+import signal
+from contextlib import contextmanager
+
+import ptyprocess
+from ptyprocess.ptyprocess import use_native_pty_fork
+
+from .exceptions import ExceptionPexpect, EOF, TIMEOUT
+from .spawnbase import SpawnBase
from .utils import (
which, split_command_line, select_ignore_interrupts, poll_ignore_interrupts
)
-
-@contextmanager
-def _wrap_ptyprocess_err():
- """Turn ptyprocess errors into our own ExceptionPexpect errors"""
- try:
- yield
- except ptyprocess.PtyProcessError as e:
- raise ExceptionPexpect(*e.args)
-
-PY3 = (sys.version_info[0] >= 3)
-
-class spawn(SpawnBase):
- '''This is the main class interface for Pexpect. Use this class to start
- and control child applications. '''
-
- # This is purely informational now - changing it has no effect
- use_native_pty_fork = use_native_pty_fork
-
- def __init__(self, command, args=[], timeout=30, maxread=2000,
- searchwindowsize=None, logfile=None, cwd=None, env=None,
- ignore_sighup=False, echo=True, preexec_fn=None,
+
+@contextmanager
+def _wrap_ptyprocess_err():
+ """Turn ptyprocess errors into our own ExceptionPexpect errors"""
+ try:
+ yield
+ except ptyprocess.PtyProcessError as e:
+ raise ExceptionPexpect(*e.args)
+
+PY3 = (sys.version_info[0] >= 3)
+
+class spawn(SpawnBase):
+ '''This is the main class interface for Pexpect. Use this class to start
+ and control child applications. '''
+
+ # This is purely informational now - changing it has no effect
+ use_native_pty_fork = use_native_pty_fork
+
+ def __init__(self, command, args=[], timeout=30, maxread=2000,
+ searchwindowsize=None, logfile=None, cwd=None, env=None,
+ ignore_sighup=False, echo=True, preexec_fn=None,
encoding=None, codec_errors='strict', dimensions=None,
use_poll=False):
- '''This is the constructor. The command parameter may be a string that
- includes a command and any arguments to the command. For example::
-
- child = pexpect.spawn('/usr/bin/ftp')
- child = pexpect.spawn('/usr/bin/ssh user@example.com')
- child = pexpect.spawn('ls -latr /tmp')
-
- You may also construct it with a list of arguments like so::
-
- child = pexpect.spawn('/usr/bin/ftp', [])
- child = pexpect.spawn('/usr/bin/ssh', ['user@example.com'])
- child = pexpect.spawn('ls', ['-latr', '/tmp'])
-
- After this the child application will be created and will be ready to
- talk to. For normal use, see expect() and send() and sendline().
-
- Remember that Pexpect does NOT interpret shell meta characters such as
- redirect, pipe, or wild cards (``>``, ``|``, or ``*``). This is a
- common mistake. If you want to run a command and pipe it through
- another command then you must also start a shell. For example::
-
- child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"')
- child.expect(pexpect.EOF)
-
- The second form of spawn (where you pass a list of arguments) is useful
- in situations where you wish to spawn a command and pass it its own
- argument list. This can make syntax more clear. For example, the
- following is equivalent to the previous example::
-
- shell_cmd = 'ls -l | grep LOG > logs.txt'
- child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
- child.expect(pexpect.EOF)
-
- The maxread attribute sets the read buffer size. This is maximum number
- of bytes that Pexpect will try to read from a TTY at one time. Setting
- the maxread size to 1 will turn off buffering. Setting the maxread
- value higher may help performance in cases where large amounts of
- output are read back from the child. This feature is useful in
- conjunction with searchwindowsize.
-
- When the keyword argument *searchwindowsize* is None (default), the
- full buffer is searched at each iteration of receiving incoming data.
- The default number of bytes scanned at each iteration is very large
- and may be reduced to collaterally reduce search cost. After
- :meth:`~.expect` returns, the full buffer attribute remains up to
- size *maxread* irrespective of *searchwindowsize* value.
-
- When the keyword argument ``timeout`` is specified as a number,
- (default: *30*), then :class:`TIMEOUT` will be raised after the value
- specified has elapsed, in seconds, for any of the :meth:`~.expect`
- family of method calls. When None, TIMEOUT will not be raised, and
- :meth:`~.expect` may block indefinitely until match.
-
-
- The logfile member turns on or off logging. All input and output will
- be copied to the given file object. Set logfile to None to stop
- logging. This is the default. Set logfile to sys.stdout to echo
- everything to standard output. The logfile is flushed after each write.
-
- Example log input and output to a file::
-
- child = pexpect.spawn('some_command')
- fout = open('mylog.txt','wb')
- child.logfile = fout
-
- Example log to stdout::
-
- # In Python 2:
- child = pexpect.spawn('some_command')
- child.logfile = sys.stdout
-
+ '''This is the constructor. The command parameter may be a string that
+ includes a command and any arguments to the command. For example::
+
+ child = pexpect.spawn('/usr/bin/ftp')
+ child = pexpect.spawn('/usr/bin/ssh user@example.com')
+ child = pexpect.spawn('ls -latr /tmp')
+
+ You may also construct it with a list of arguments like so::
+
+ child = pexpect.spawn('/usr/bin/ftp', [])
+ child = pexpect.spawn('/usr/bin/ssh', ['user@example.com'])
+ child = pexpect.spawn('ls', ['-latr', '/tmp'])
+
+ After this the child application will be created and will be ready to
+ talk to. For normal use, see expect() and send() and sendline().
+
+ Remember that Pexpect does NOT interpret shell meta characters such as
+ redirect, pipe, or wild cards (``>``, ``|``, or ``*``). This is a
+ common mistake. If you want to run a command and pipe it through
+ another command then you must also start a shell. For example::
+
+ child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"')
+ child.expect(pexpect.EOF)
+
+ The second form of spawn (where you pass a list of arguments) is useful
+ in situations where you wish to spawn a command and pass it its own
+ argument list. This can make syntax more clear. For example, the
+ following is equivalent to the previous example::
+
+ shell_cmd = 'ls -l | grep LOG > logs.txt'
+ child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
+ child.expect(pexpect.EOF)
+
+ The maxread attribute sets the read buffer size. This is maximum number
+ of bytes that Pexpect will try to read from a TTY at one time. Setting
+ the maxread size to 1 will turn off buffering. Setting the maxread
+ value higher may help performance in cases where large amounts of
+ output are read back from the child. This feature is useful in
+ conjunction with searchwindowsize.
+
+ When the keyword argument *searchwindowsize* is None (default), the
+ full buffer is searched at each iteration of receiving incoming data.
+ The default number of bytes scanned at each iteration is very large
+ and may be reduced to collaterally reduce search cost. After
+ :meth:`~.expect` returns, the full buffer attribute remains up to
+ size *maxread* irrespective of *searchwindowsize* value.
+
+ When the keyword argument ``timeout`` is specified as a number,
+ (default: *30*), then :class:`TIMEOUT` will be raised after the value
+ specified has elapsed, in seconds, for any of the :meth:`~.expect`
+ family of method calls. When None, TIMEOUT will not be raised, and
+ :meth:`~.expect` may block indefinitely until match.
+
+
+ The logfile member turns on or off logging. All input and output will
+ be copied to the given file object. Set logfile to None to stop
+ logging. This is the default. Set logfile to sys.stdout to echo
+ everything to standard output. The logfile is flushed after each write.
+
+ Example log input and output to a file::
+
+ child = pexpect.spawn('some_command')
+ fout = open('mylog.txt','wb')
+ child.logfile = fout
+
+ Example log to stdout::
+
+ # In Python 2:
+ child = pexpect.spawn('some_command')
+ child.logfile = sys.stdout
+
# In Python 3, we'll use the ``encoding`` argument to decode data
# from the subprocess and handle it as unicode:
child = pexpect.spawn('some_command', encoding='utf-8')
- child.logfile = sys.stdout
-
- The logfile_read and logfile_send members can be used to separately log
- the input from the child and output sent to the child. Sometimes you
- don't want to see everything you write to the child. You only want to
- log what the child sends back. For example::
-
- child = pexpect.spawn('some_command')
- child.logfile_read = sys.stdout
-
- You will need to pass an encoding to spawn in the above code if you are
- using Python 3.
-
- To separately log output sent to the child use logfile_send::
-
- child.logfile_send = fout
-
- If ``ignore_sighup`` is True, the child process will ignore SIGHUP
- signals. The default is False from Pexpect 4.0, meaning that SIGHUP
- will be handled normally by the child.
-
- The delaybeforesend helps overcome a weird behavior that many users
- were experiencing. The typical problem was that a user would expect() a
- "Password:" prompt and then immediately call sendline() to send the
- password. The user would then see that their password was echoed back
- to them. Passwords don't normally echo. The problem is caused by the
- fact that most applications print out the "Password" prompt and then
- turn off stdin echo, but if you send your password before the
- application turned off echo, then you get your password echoed.
- Normally this wouldn't be a problem when interacting with a human at a
- real keyboard. If you introduce a slight delay just before writing then
- this seems to clear up the problem. This was such a common problem for
- many users that I decided that the default pexpect behavior should be
- to sleep just before writing to the child application. 1/20th of a
- second (50 ms) seems to be enough to clear up the problem. You can set
+ child.logfile = sys.stdout
+
+ The logfile_read and logfile_send members can be used to separately log
+ the input from the child and output sent to the child. Sometimes you
+ don't want to see everything you write to the child. You only want to
+ log what the child sends back. For example::
+
+ child = pexpect.spawn('some_command')
+ child.logfile_read = sys.stdout
+
+ You will need to pass an encoding to spawn in the above code if you are
+ using Python 3.
+
+ To separately log output sent to the child use logfile_send::
+
+ child.logfile_send = fout
+
+ If ``ignore_sighup`` is True, the child process will ignore SIGHUP
+ signals. The default is False from Pexpect 4.0, meaning that SIGHUP
+ will be handled normally by the child.
+
+ The delaybeforesend helps overcome a weird behavior that many users
+ were experiencing. The typical problem was that a user would expect() a
+ "Password:" prompt and then immediately call sendline() to send the
+ password. The user would then see that their password was echoed back
+ to them. Passwords don't normally echo. The problem is caused by the
+ fact that most applications print out the "Password" prompt and then
+ turn off stdin echo, but if you send your password before the
+ application turned off echo, then you get your password echoed.
+ Normally this wouldn't be a problem when interacting with a human at a
+ real keyboard. If you introduce a slight delay just before writing then
+ this seems to clear up the problem. This was such a common problem for
+ many users that I decided that the default pexpect behavior should be
+ to sleep just before writing to the child application. 1/20th of a
+ second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to None to return to the old behavior.
-
- Note that spawn is clever about finding commands on your path.
- It uses the same logic that "which" uses to find executables.
-
- If you wish to get the exit status of the child you must call the
- close() method. The exit or signal status of the child will be stored
- in self.exitstatus or self.signalstatus. If the child exited normally
- then exitstatus will store the exit return code and signalstatus will
- be None. If the child was terminated abnormally with a signal then
+
+ Note that spawn is clever about finding commands on your path.
+ It uses the same logic that "which" uses to find executables.
+
+ If you wish to get the exit status of the child you must call the
+ close() method. The exit or signal status of the child will be stored
+ in self.exitstatus or self.signalstatus. If the child exited normally
+ then exitstatus will store the exit return code and signalstatus will
+ be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None::
child = pexpect.spawn('some_command')
child.close()
print(child.exitstatus, child.signalstatus)
- If you need more detail you can also read the self.status member which
- stores the status returned by os.waitpid. You can interpret this using
- os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG.
-
- The echo attribute may be set to False to disable echoing of input.
- As a pseudo-terminal, all input echoed by the "keyboard" (send()
- or sendline()) will be repeated to output. For many cases, it is
- not desirable to have echo enabled, and it may be later disabled
- using setecho(False) followed by waitnoecho(). However, for some
- platforms such as Solaris, this is not possible, and should be
- disabled immediately on spawn.
-
- If preexec_fn is given, it will be called in the child process before
- launching the given command. This is useful to e.g. reset inherited
- signal handlers.
-
- The dimensions attribute specifies the size of the pseudo-terminal as
- seen by the subprocess, and is specified as a two-entry tuple (rows,
- columns). If this is unspecified, the defaults in ptyprocess will apply.
+ If you need more detail you can also read the self.status member which
+ stores the status returned by os.waitpid. You can interpret this using
+ os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG.
+
+ The echo attribute may be set to False to disable echoing of input.
+ As a pseudo-terminal, all input echoed by the "keyboard" (send()
+ or sendline()) will be repeated to output. For many cases, it is
+ not desirable to have echo enabled, and it may be later disabled
+ using setecho(False) followed by waitnoecho(). However, for some
+ platforms such as Solaris, this is not possible, and should be
+ disabled immediately on spawn.
+
+ If preexec_fn is given, it will be called in the child process before
+ launching the given command. This is useful to e.g. reset inherited
+ signal handlers.
+
+ The dimensions attribute specifies the size of the pseudo-terminal as
+ seen by the subprocess, and is specified as a two-entry tuple (rows,
+ columns). If this is unspecified, the defaults in ptyprocess will apply.
The use_poll attribute enables using select.poll() over select.select()
for socket handling. This is handy if your system could have > 1024 fds
- '''
- super(spawn, self).__init__(timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize,
- logfile=logfile, encoding=encoding, codec_errors=codec_errors)
- self.STDIN_FILENO = pty.STDIN_FILENO
- self.STDOUT_FILENO = pty.STDOUT_FILENO
- self.STDERR_FILENO = pty.STDERR_FILENO
+ '''
+ super(spawn, self).__init__(timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize,
+ logfile=logfile, encoding=encoding, codec_errors=codec_errors)
+ self.STDIN_FILENO = pty.STDIN_FILENO
+ self.STDOUT_FILENO = pty.STDOUT_FILENO
+ self.STDERR_FILENO = pty.STDERR_FILENO
self.str_last_chars = 100
- self.cwd = cwd
- self.env = env
- self.echo = echo
- self.ignore_sighup = ignore_sighup
- self.__irix_hack = sys.platform.lower().startswith('irix')
- if command is None:
- self.command = None
- self.args = None
- self.name = '<pexpect factory incomplete>'
- else:
- self._spawn(command, args, preexec_fn, dimensions)
+ self.cwd = cwd
+ self.env = env
+ self.echo = echo
+ self.ignore_sighup = ignore_sighup
+ self.__irix_hack = sys.platform.lower().startswith('irix')
+ if command is None:
+ self.command = None
+ self.args = None
+ self.name = '<pexpect factory incomplete>'
+ else:
+ self._spawn(command, args, preexec_fn, dimensions)
self.use_poll = use_poll
-
- def __str__(self):
- '''This returns a human-readable string that represents the state of
- the object. '''
-
- s = []
- s.append(repr(self))
- s.append('command: ' + str(self.command))
- s.append('args: %r' % (self.args,))
+
+ def __str__(self):
+ '''This returns a human-readable string that represents the state of
+ the object. '''
+
+ s = []
+ s.append(repr(self))
+ s.append('command: ' + str(self.command))
+ s.append('args: %r' % (self.args,))
s.append('buffer (last %s chars): %r' % (self.str_last_chars,self.buffer[-self.str_last_chars:]))
s.append('before (last %s chars): %r' % (self.str_last_chars,self.before[-self.str_last_chars:] if self.before else ''))
- s.append('after: %r' % (self.after,))
- s.append('match: %r' % (self.match,))
- s.append('match_index: ' + str(self.match_index))
- s.append('exitstatus: ' + str(self.exitstatus))
+ s.append('after: %r' % (self.after,))
+ s.append('match: %r' % (self.match,))
+ s.append('match_index: ' + str(self.match_index))
+ s.append('exitstatus: ' + str(self.exitstatus))
if hasattr(self, 'ptyproc'):
s.append('flag_eof: ' + str(self.flag_eof))
- s.append('pid: ' + str(self.pid))
- s.append('child_fd: ' + str(self.child_fd))
- s.append('closed: ' + str(self.closed))
- s.append('timeout: ' + str(self.timeout))
- s.append('delimiter: ' + str(self.delimiter))
- s.append('logfile: ' + str(self.logfile))
- s.append('logfile_read: ' + str(self.logfile_read))
- s.append('logfile_send: ' + str(self.logfile_send))
- s.append('maxread: ' + str(self.maxread))
- s.append('ignorecase: ' + str(self.ignorecase))
- s.append('searchwindowsize: ' + str(self.searchwindowsize))
- s.append('delaybeforesend: ' + str(self.delaybeforesend))
- s.append('delayafterclose: ' + str(self.delayafterclose))
- s.append('delayafterterminate: ' + str(self.delayafterterminate))
- return '\n'.join(s)
-
- def _spawn(self, command, args=[], preexec_fn=None, dimensions=None):
- '''This starts the given command in a child process. This does all the
- fork/exec type of stuff for a pty. This is called by __init__. If args
- is empty then command will be parsed (split on spaces) and args will be
- set to parsed arguments. '''
-
- # The pid and child_fd of this object get set by this method.
- # Note that it is difficult for this method to fail.
- # You cannot detect if the child process cannot start.
- # So the only way you can tell if the child process started
- # or not is to try to read from the file descriptor. If you get
- # EOF immediately then it means that the child is already dead.
- # That may not necessarily be bad because you may have spawned a child
- # that performs some task; creates no stdout output; and then dies.
-
- # If command is an int type then it may represent a file descriptor.
- if isinstance(command, type(0)):
- raise ExceptionPexpect('Command is an int type. ' +
- 'If this is a file descriptor then maybe you want to ' +
- 'use fdpexpect.fdspawn which takes an existing ' +
- 'file descriptor instead of a command string.')
-
- if not isinstance(args, type([])):
- raise TypeError('The argument, args, must be a list.')
-
- if args == []:
- self.args = split_command_line(command)
- self.command = self.args[0]
- else:
- # Make a shallow copy of the args list.
- self.args = args[:]
- self.args.insert(0, command)
- self.command = command
-
+ s.append('pid: ' + str(self.pid))
+ s.append('child_fd: ' + str(self.child_fd))
+ s.append('closed: ' + str(self.closed))
+ s.append('timeout: ' + str(self.timeout))
+ s.append('delimiter: ' + str(self.delimiter))
+ s.append('logfile: ' + str(self.logfile))
+ s.append('logfile_read: ' + str(self.logfile_read))
+ s.append('logfile_send: ' + str(self.logfile_send))
+ s.append('maxread: ' + str(self.maxread))
+ s.append('ignorecase: ' + str(self.ignorecase))
+ s.append('searchwindowsize: ' + str(self.searchwindowsize))
+ s.append('delaybeforesend: ' + str(self.delaybeforesend))
+ s.append('delayafterclose: ' + str(self.delayafterclose))
+ s.append('delayafterterminate: ' + str(self.delayafterterminate))
+ return '\n'.join(s)
+
+ def _spawn(self, command, args=[], preexec_fn=None, dimensions=None):
+ '''This starts the given command in a child process. This does all the
+ fork/exec type of stuff for a pty. This is called by __init__. If args
+ is empty then command will be parsed (split on spaces) and args will be
+ set to parsed arguments. '''
+
+ # The pid and child_fd of this object get set by this method.
+ # Note that it is difficult for this method to fail.
+ # You cannot detect if the child process cannot start.
+ # So the only way you can tell if the child process started
+ # or not is to try to read from the file descriptor. If you get
+ # EOF immediately then it means that the child is already dead.
+ # That may not necessarily be bad because you may have spawned a child
+ # that performs some task; creates no stdout output; and then dies.
+
+ # If command is an int type then it may represent a file descriptor.
+ if isinstance(command, type(0)):
+ raise ExceptionPexpect('Command is an int type. ' +
+ 'If this is a file descriptor then maybe you want to ' +
+ 'use fdpexpect.fdspawn which takes an existing ' +
+ 'file descriptor instead of a command string.')
+
+ if not isinstance(args, type([])):
+ raise TypeError('The argument, args, must be a list.')
+
+ if args == []:
+ self.args = split_command_line(command)
+ self.command = self.args[0]
+ else:
+ # Make a shallow copy of the args list.
+ self.args = args[:]
+ self.args.insert(0, command)
+ self.command = command
+
command_with_path = which(self.command, env=self.env)
- if command_with_path is None:
- raise ExceptionPexpect('The command was not found or was not ' +
- 'executable: %s.' % self.command)
- self.command = command_with_path
- self.args[0] = self.command
-
- self.name = '<' + ' '.join(self.args) + '>'
-
- assert self.pid is None, 'The pid member must be None.'
- assert self.command is not None, 'The command member must not be None.'
-
- kwargs = {'echo': self.echo, 'preexec_fn': preexec_fn}
- if self.ignore_sighup:
- def preexec_wrapper():
- "Set SIGHUP to be ignored, then call the real preexec_fn"
- signal.signal(signal.SIGHUP, signal.SIG_IGN)
- if preexec_fn is not None:
- preexec_fn()
- kwargs['preexec_fn'] = preexec_wrapper
-
- if dimensions is not None:
- kwargs['dimensions'] = dimensions
-
+ if command_with_path is None:
+ raise ExceptionPexpect('The command was not found or was not ' +
+ 'executable: %s.' % self.command)
+ self.command = command_with_path
+ self.args[0] = self.command
+
+ self.name = '<' + ' '.join(self.args) + '>'
+
+ assert self.pid is None, 'The pid member must be None.'
+ assert self.command is not None, 'The command member must not be None.'
+
+ kwargs = {'echo': self.echo, 'preexec_fn': preexec_fn}
+ if self.ignore_sighup:
+ def preexec_wrapper():
+ "Set SIGHUP to be ignored, then call the real preexec_fn"
+ signal.signal(signal.SIGHUP, signal.SIG_IGN)
+ if preexec_fn is not None:
+ preexec_fn()
+ kwargs['preexec_fn'] = preexec_wrapper
+
+ if dimensions is not None:
+ kwargs['dimensions'] = dimensions
+
if self.encoding is not None:
# Encode command line using the specified encoding
self.args = [a if isinstance(a, bytes) else a.encode(self.encoding)
for a in self.args]
-
+
self.ptyproc = self._spawnpty(self.args, env=self.env,
cwd=self.cwd, **kwargs)
- self.pid = self.ptyproc.pid
- self.child_fd = self.ptyproc.fd
-
-
- self.terminated = False
- self.closed = False
-
+ self.pid = self.ptyproc.pid
+ self.child_fd = self.ptyproc.fd
+
+
+ self.terminated = False
+ self.closed = False
+
def _spawnpty(self, args, **kwargs):
'''Spawn a pty and return an instance of PtyProcess.'''
return ptyprocess.PtyProcess.spawn(args, **kwargs)
- def close(self, force=True):
- '''This closes the connection with the child application. Note that
- calling close() more than once is valid. This emulates standard Python
- behavior with files. Set force to True if you want to make sure that
- the child is terminated (SIGKILL is sent if the child ignores SIGHUP
- and SIGINT). '''
-
- self.flush()
+ def close(self, force=True):
+ '''This closes the connection with the child application. Note that
+ calling close() more than once is valid. This emulates standard Python
+ behavior with files. Set force to True if you want to make sure that
+ the child is terminated (SIGKILL is sent if the child ignores SIGHUP
+ and SIGINT). '''
+
+ self.flush()
with _wrap_ptyprocess_err():
# PtyProcessError may be raised if it is not possible to terminate
# the child.
self.ptyproc.close(force=force)
- self.isalive() # Update exit status from ptyproc
- self.child_fd = -1
+ self.isalive() # Update exit status from ptyproc
+ self.child_fd = -1
self.closed = True
-
- def isatty(self):
- '''This returns True if the file descriptor is open and connected to a
- tty(-like) device, else False.
-
- On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
- the child pty may not appear as a terminal device. This means
- methods such as setecho(), setwinsize(), getwinsize() may raise an
- IOError. '''
-
- return os.isatty(self.child_fd)
-
- def waitnoecho(self, timeout=-1):
- '''This waits until the terminal ECHO flag is set False. This returns
- True if the echo mode is off. This returns False if the ECHO flag was
- not set False before the timeout. This can be used to detect when the
- child is waiting for a password. Usually a child application will turn
- off echo mode when it is waiting for the user to enter a password. For
- example, instead of expecting the "password:" prompt you can wait for
- the child to set ECHO off::
-
- p = pexpect.spawn('ssh user@example.com')
- p.waitnoecho()
- p.sendline(mypassword)
-
- If timeout==-1 then this method will use the value in self.timeout.
- If timeout==None then this method to block until ECHO flag is False.
- '''
-
- if timeout == -1:
- timeout = self.timeout
- if timeout is not None:
- end_time = time.time() + timeout
- while True:
- if not self.getecho():
- return True
- if timeout < 0 and timeout is not None:
- return False
- if timeout is not None:
- timeout = end_time - time.time()
- time.sleep(0.1)
-
- def getecho(self):
- '''This returns the terminal echo mode. This returns True if echo is
- on or False if echo is off. Child applications that are expecting you
- to enter a password often set ECHO False. See waitnoecho().
-
- Not supported on platforms where ``isatty()`` returns False. '''
- return self.ptyproc.getecho()
-
- def setecho(self, state):
- '''This sets the terminal echo mode on or off. Note that anything the
- child sent before the echo will be lost, so you should be sure that
- your input buffer is empty before you call setecho(). For example, the
- following will work as expected::
-
- p = pexpect.spawn('cat') # Echo is on by default.
- p.sendline('1234') # We expect see this twice from the child...
- p.expect(['1234']) # ... once from the tty echo...
- p.expect(['1234']) # ... and again from cat itself.
- p.setecho(False) # Turn off tty echo
- p.sendline('abcd') # We will set this only once (echoed by cat).
- p.sendline('wxyz') # We will set this only once (echoed by cat)
- p.expect(['abcd'])
- p.expect(['wxyz'])
-
- The following WILL NOT WORK because the lines sent before the setecho
- will be lost::
-
- p = pexpect.spawn('cat')
- p.sendline('1234')
- p.setecho(False) # Turn off tty echo
- p.sendline('abcd') # We will set this only once (echoed by cat).
- p.sendline('wxyz') # We will set this only once (echoed by cat)
- p.expect(['1234'])
- p.expect(['1234'])
- p.expect(['abcd'])
- p.expect(['wxyz'])
-
-
- Not supported on platforms where ``isatty()`` returns False.
- '''
- return self.ptyproc.setecho(state)
-
- def read_nonblocking(self, size=1, timeout=-1):
- '''This reads at most size characters from the child application. It
- includes a timeout. If the read does not complete within the timeout
- period then a TIMEOUT exception is raised. If the end of file is read
- then an EOF exception will be raised. If a logfile is specified, a
- copy is written to that log.
-
- If timeout is None then the read may block indefinitely.
- If timeout is -1 then the self.timeout value is used. If timeout is 0
- then the child is polled and if there is no data immediately ready
- then this will raise a TIMEOUT exception.
-
- The timeout refers only to the amount of time to read at least one
- character. This is not affected by the 'size' parameter, so if you call
- read_nonblocking(size=100, timeout=30) and only one character is
- available right away then one character will be returned immediately.
- It will not wait for 30 seconds for another 99 characters to come in.
-
+
+ def isatty(self):
+ '''This returns True if the file descriptor is open and connected to a
+ tty(-like) device, else False.
+
+ On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
+ the child pty may not appear as a terminal device. This means
+ methods such as setecho(), setwinsize(), getwinsize() may raise an
+ IOError. '''
+
+ return os.isatty(self.child_fd)
+
+ def waitnoecho(self, timeout=-1):
+ '''This waits until the terminal ECHO flag is set False. This returns
+ True if the echo mode is off. This returns False if the ECHO flag was
+ not set False before the timeout. This can be used to detect when the
+ child is waiting for a password. Usually a child application will turn
+ off echo mode when it is waiting for the user to enter a password. For
+ example, instead of expecting the "password:" prompt you can wait for
+ the child to set ECHO off::
+
+ p = pexpect.spawn('ssh user@example.com')
+ p.waitnoecho()
+ p.sendline(mypassword)
+
+ If timeout==-1 then this method will use the value in self.timeout.
+ If timeout==None then this method to block until ECHO flag is False.
+ '''
+
+ if timeout == -1:
+ timeout = self.timeout
+ if timeout is not None:
+ end_time = time.time() + timeout
+ while True:
+ if not self.getecho():
+ return True
+ if timeout < 0 and timeout is not None:
+ return False
+ if timeout is not None:
+ timeout = end_time - time.time()
+ time.sleep(0.1)
+
+ def getecho(self):
+ '''This returns the terminal echo mode. This returns True if echo is
+ on or False if echo is off. Child applications that are expecting you
+ to enter a password often set ECHO False. See waitnoecho().
+
+ Not supported on platforms where ``isatty()`` returns False. '''
+ return self.ptyproc.getecho()
+
+ def setecho(self, state):
+ '''This sets the terminal echo mode on or off. Note that anything the
+ child sent before the echo will be lost, so you should be sure that
+ your input buffer is empty before you call setecho(). For example, the
+ following will work as expected::
+
+ p = pexpect.spawn('cat') # Echo is on by default.
+ p.sendline('1234') # We expect see this twice from the child...
+ p.expect(['1234']) # ... once from the tty echo...
+ p.expect(['1234']) # ... and again from cat itself.
+ p.setecho(False) # Turn off tty echo
+ p.sendline('abcd') # We will set this only once (echoed by cat).
+ p.sendline('wxyz') # We will set this only once (echoed by cat)
+ p.expect(['abcd'])
+ p.expect(['wxyz'])
+
+ The following WILL NOT WORK because the lines sent before the setecho
+ will be lost::
+
+ p = pexpect.spawn('cat')
+ p.sendline('1234')
+ p.setecho(False) # Turn off tty echo
+ p.sendline('abcd') # We will set this only once (echoed by cat).
+ p.sendline('wxyz') # We will set this only once (echoed by cat)
+ p.expect(['1234'])
+ p.expect(['1234'])
+ p.expect(['abcd'])
+ p.expect(['wxyz'])
+
+
+ Not supported on platforms where ``isatty()`` returns False.
+ '''
+ return self.ptyproc.setecho(state)
+
+ def read_nonblocking(self, size=1, timeout=-1):
+ '''This reads at most size characters from the child application. It
+ includes a timeout. If the read does not complete within the timeout
+ period then a TIMEOUT exception is raised. If the end of file is read
+ then an EOF exception will be raised. If a logfile is specified, a
+ copy is written to that log.
+
+ If timeout is None then the read may block indefinitely.
+ If timeout is -1 then the self.timeout value is used. If timeout is 0
+ then the child is polled and if there is no data immediately ready
+ then this will raise a TIMEOUT exception.
+
+ The timeout refers only to the amount of time to read at least one
+ character. This is not affected by the 'size' parameter, so if you call
+ read_nonblocking(size=100, timeout=30) and only one character is
+ available right away then one character will be returned immediately.
+ It will not wait for 30 seconds for another 99 characters to come in.
+
On the other hand, if there are bytes available to read immediately,
all those bytes will be read (up to the buffer size). So, if the
buffer size is 1 megabyte and there is 1 megabyte of data available
to read, the buffer will be filled, regardless of timeout.
-
+
This is a wrapper around os.read(). It uses select.select() or
select.poll() to implement the timeout. '''
- if self.closed:
- raise ValueError('I/O operation on closed file.')
-
+ if self.closed:
+ raise ValueError('I/O operation on closed file.')
+
if self.use_poll:
def select(timeout):
return poll_ignore_interrupts([self.child_fd], timeout)
@@ -472,10 +472,10 @@ class spawn(SpawnBase):
return incoming
return incoming
- if timeout == -1:
- timeout = self.timeout
-
- if not self.isalive():
+ if timeout == -1:
+ timeout = self.timeout
+
+ if not self.isalive():
# The process is dead, but there may or may not be data
# available to read. Note that some systems such as Solaris
# do not give an EOF when the child dies. In fact, you can
@@ -486,20 +486,20 @@ class spawn(SpawnBase):
return super(spawn, self).read_nonblocking(size)
self.flag_eof = True
raise EOF('End Of File (EOF). Braindead platform.')
- elif self.__irix_hack:
- # Irix takes a long time before it realizes a child was terminated.
+ elif self.__irix_hack:
+ # Irix takes a long time before it realizes a child was terminated.
# Make sure that the timeout is at least 2 seconds.
- # FIXME So does this mean Irix systems are forced to always have
- # FIXME a 2 second delay when calling read_nonblocking? That sucks.
+ # FIXME So does this mean Irix systems are forced to always have
+ # FIXME a 2 second delay when calling read_nonblocking? That sucks.
if timeout is not None and timeout < 2:
timeout = 2
-
+
# Because of the select(0) check above, we know that no data
# is available right now. But if a non-zero timeout is given
# (possibly timeout=None), we call select() with a timeout.
if (timeout != 0) and select(timeout):
- return super(spawn, self).read_nonblocking(size)
-
+ return super(spawn, self).read_nonblocking(size)
+
if not self.isalive():
# Some platforms, such as Irix, will claim that their
# processes are alive; timeout on the select; and
@@ -508,353 +508,353 @@ class spawn(SpawnBase):
raise EOF('End of File (EOF). Very slow platform.')
else:
raise TIMEOUT('Timeout exceeded.')
-
- def write(self, s):
- '''This is similar to send() except that there is no return value.
- '''
-
- self.send(s)
-
- def writelines(self, sequence):
- '''This calls write() for each element in the sequence. The sequence
- can be any iterable object producing strings, typically a list of
- strings. This does not add line separators. There is no return value.
- '''
-
- for s in sequence:
- self.write(s)
-
- def send(self, s):
- '''Sends string ``s`` to the child process, returning the number of
- bytes written. If a logfile is specified, a copy is written to that
- log.
-
- The default terminal input mode is canonical processing unless set
- otherwise by the child process. This allows backspace and other line
- processing to be performed prior to transmitting to the receiving
- program. As this is buffered, there is a limited size of such buffer.
-
- On Linux systems, this is 4096 (defined by N_TTY_BUF_SIZE). All
- other systems honor the POSIX.1 definition PC_MAX_CANON -- 1024
- on OSX, 256 on OpenSolaris, and 1920 on FreeBSD.
-
- This value may be discovered using fpathconf(3)::
-
+
+ def write(self, s):
+ '''This is similar to send() except that there is no return value.
+ '''
+
+ self.send(s)
+
+ def writelines(self, sequence):
+ '''This calls write() for each element in the sequence. The sequence
+ can be any iterable object producing strings, typically a list of
+ strings. This does not add line separators. There is no return value.
+ '''
+
+ for s in sequence:
+ self.write(s)
+
+ def send(self, s):
+ '''Sends string ``s`` to the child process, returning the number of
+ bytes written. If a logfile is specified, a copy is written to that
+ log.
+
+ The default terminal input mode is canonical processing unless set
+ otherwise by the child process. This allows backspace and other line
+ processing to be performed prior to transmitting to the receiving
+ program. As this is buffered, there is a limited size of such buffer.
+
+ On Linux systems, this is 4096 (defined by N_TTY_BUF_SIZE). All
+ other systems honor the POSIX.1 definition PC_MAX_CANON -- 1024
+ on OSX, 256 on OpenSolaris, and 1920 on FreeBSD.
+
+ This value may be discovered using fpathconf(3)::
+
>>> from os import fpathconf
>>> print(fpathconf(0, 'PC_MAX_CANON'))
256
-
- On such a system, only 256 bytes may be received per line. Any
- subsequent bytes received will be discarded. BEL (``'\a'``) is then
- sent to output if IMAXBEL (termios.h) is set by the tty driver.
- This is usually enabled by default. Linux does not honor this as
- an option -- it behaves as though it is always set on.
-
- Canonical input processing may be disabled altogether by executing
- a shell, then stty(1), before executing the final program::
-
+
+ On such a system, only 256 bytes may be received per line. Any
+ subsequent bytes received will be discarded. BEL (``'\a'``) is then
+ sent to output if IMAXBEL (termios.h) is set by the tty driver.
+ This is usually enabled by default. Linux does not honor this as
+ an option -- it behaves as though it is always set on.
+
+ Canonical input processing may be disabled altogether by executing
+ a shell, then stty(1), before executing the final program::
+
>>> bash = pexpect.spawn('/bin/bash', echo=False)
>>> bash.sendline('stty -icanon')
>>> bash.sendline('base64')
>>> bash.sendline('x' * 5000)
- '''
-
+ '''
+
if self.delaybeforesend is not None:
time.sleep(self.delaybeforesend)
-
- s = self._coerce_send_string(s)
- self._log(s, 'send')
-
- b = self._encoder.encode(s, final=False)
- return os.write(self.child_fd, b)
-
- def sendline(self, s=''):
- '''Wraps send(), sending string ``s`` to child process, with
- ``os.linesep`` automatically appended. Returns number of bytes
- written. Only a limited number of bytes may be sent for each
- line in the default terminal mode, see docstring of :meth:`send`.
- '''
+
+ s = self._coerce_send_string(s)
+ self._log(s, 'send')
+
+ b = self._encoder.encode(s, final=False)
+ return os.write(self.child_fd, b)
+
+ def sendline(self, s=''):
+ '''Wraps send(), sending string ``s`` to child process, with
+ ``os.linesep`` automatically appended. Returns number of bytes
+ written. Only a limited number of bytes may be sent for each
+ line in the default terminal mode, see docstring of :meth:`send`.
+ '''
s = self._coerce_send_string(s)
return self.send(s + self.linesep)
-
- def _log_control(self, s):
- """Write control characters to the appropriate log files"""
- if self.encoding is not None:
- s = s.decode(self.encoding, 'replace')
- self._log(s, 'send')
-
- def sendcontrol(self, char):
- '''Helper method that wraps send() with mnemonic access for sending control
- character to the child (such as Ctrl-C or Ctrl-D). For example, to send
- Ctrl-G (ASCII 7, bell, '\a')::
-
- child.sendcontrol('g')
-
- See also, sendintr() and sendeof().
- '''
- n, byte = self.ptyproc.sendcontrol(char)
- self._log_control(byte)
- return n
-
- def sendeof(self):
- '''This sends an EOF to the child. This sends a character which causes
- the pending parent output buffer to be sent to the waiting child
- program without waiting for end-of-line. If it is the first character
- of the line, the read() in the user program returns 0, which signifies
- end-of-file. This means to work as expected a sendeof() has to be
- called at the beginning of a line. This method does not send a newline.
- It is the responsibility of the caller to ensure the eof is sent at the
- beginning of a line. '''
-
- n, byte = self.ptyproc.sendeof()
- self._log_control(byte)
-
- def sendintr(self):
- '''This sends a SIGINT to the child. It does not require
- the SIGINT to be the first character on a line. '''
-
- n, byte = self.ptyproc.sendintr()
- self._log_control(byte)
-
- @property
- def flag_eof(self):
- return self.ptyproc.flag_eof
-
- @flag_eof.setter
- def flag_eof(self, value):
- self.ptyproc.flag_eof = value
-
- def eof(self):
- '''This returns True if the EOF exception was ever raised.
- '''
- return self.flag_eof
-
- def terminate(self, force=False):
- '''This forces a child process to terminate. It starts nicely with
- SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
- returns True if the child was terminated. This returns False if the
- child could not be terminated. '''
-
- if not self.isalive():
- return True
- try:
- self.kill(signal.SIGHUP)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- self.kill(signal.SIGCONT)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- self.kill(signal.SIGINT)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- if force:
- self.kill(signal.SIGKILL)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- else:
- return False
- return False
- except OSError:
- # I think there are kernel timing issues that sometimes cause
- # this to happen. I think isalive() reports True, but the
- # process is dead to the kernel.
- # Make one last attempt to see if the kernel is up to date.
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- else:
- return False
-
- def wait(self):
- '''This waits until the child exits. This is a blocking call. This will
- not read any data from the child, so this will block forever if the
- child has unread output and has terminated. In other words, the child
- may have printed output then called exit(), but, the child is
- technically still alive until its output is read by the parent.
-
- This method is non-blocking if :meth:`wait` has already been called
- previously or :meth:`isalive` method returns False. It simply returns
- the previously determined exit status.
- '''
-
- ptyproc = self.ptyproc
- with _wrap_ptyprocess_err():
- # exception may occur if "Is some other process attempting
- # "job control with our child pid?"
- exitstatus = ptyproc.wait()
- self.status = ptyproc.status
- self.exitstatus = ptyproc.exitstatus
- self.signalstatus = ptyproc.signalstatus
- self.terminated = True
-
- return exitstatus
-
- def isalive(self):
- '''This tests if the child process is running or not. This is
- non-blocking. If the child was terminated then this will read the
- exitstatus or signalstatus of the child. This returns True if the child
- process appears to be running or False if not. It can take literally
- SECONDS for Solaris to return the right status. '''
-
- ptyproc = self.ptyproc
- with _wrap_ptyprocess_err():
- alive = ptyproc.isalive()
-
- if not alive:
- self.status = ptyproc.status
- self.exitstatus = ptyproc.exitstatus
- self.signalstatus = ptyproc.signalstatus
- self.terminated = True
-
- return alive
-
- def kill(self, sig):
-
- '''This sends the given signal to the child application. In keeping
- with UNIX tradition it has a misleading name. It does not necessarily
- kill the child unless you send the right signal. '''
-
- # Same as os.kill, but the pid is given for you.
- if self.isalive():
- os.kill(self.pid, sig)
-
- def getwinsize(self):
- '''This returns the terminal window size of the child tty. The return
- value is a tuple of (rows, cols). '''
- return self.ptyproc.getwinsize()
-
- def setwinsize(self, rows, cols):
- '''This sets the terminal window size of the child tty. This will cause
- a SIGWINCH signal to be sent to the child. This does not change the
- physical window size. It changes the size reported to TTY-aware
- applications like vi or curses -- applications that respond to the
- SIGWINCH signal. '''
- return self.ptyproc.setwinsize(rows, cols)
-
-
- def interact(self, escape_character=chr(29),
- input_filter=None, output_filter=None):
-
- '''This gives control of the child process to the interactive user (the
- human at the keyboard). Keystrokes are sent to the child process, and
- the stdout and stderr output of the child process is printed. This
- simply echos the child stdout and child stderr to the real stdout and
- it echos the real stdin to the child stdin. When the user types the
- escape_character this method will return None. The escape_character
- will not be transmitted. The default for escape_character is
- entered as ``Ctrl - ]``, the very same as BSD telnet. To prevent
- escaping, escape_character may be set to None.
-
- If a logfile is specified, then the data sent and received from the
- child process in interact mode is duplicated to the given log.
-
- You may pass in optional input and output filter functions. These
+
+ def _log_control(self, s):
+ """Write control characters to the appropriate log files"""
+ if self.encoding is not None:
+ s = s.decode(self.encoding, 'replace')
+ self._log(s, 'send')
+
+ def sendcontrol(self, char):
+ '''Helper method that wraps send() with mnemonic access for sending control
+ character to the child (such as Ctrl-C or Ctrl-D). For example, to send
+ Ctrl-G (ASCII 7, bell, '\a')::
+
+ child.sendcontrol('g')
+
+ See also, sendintr() and sendeof().
+ '''
+ n, byte = self.ptyproc.sendcontrol(char)
+ self._log_control(byte)
+ return n
+
+ def sendeof(self):
+ '''This sends an EOF to the child. This sends a character which causes
+ the pending parent output buffer to be sent to the waiting child
+ program without waiting for end-of-line. If it is the first character
+ of the line, the read() in the user program returns 0, which signifies
+ end-of-file. This means to work as expected a sendeof() has to be
+ called at the beginning of a line. This method does not send a newline.
+ It is the responsibility of the caller to ensure the eof is sent at the
+ beginning of a line. '''
+
+ n, byte = self.ptyproc.sendeof()
+ self._log_control(byte)
+
+ def sendintr(self):
+ '''This sends a SIGINT to the child. It does not require
+ the SIGINT to be the first character on a line. '''
+
+ n, byte = self.ptyproc.sendintr()
+ self._log_control(byte)
+
+ @property
+ def flag_eof(self):
+ return self.ptyproc.flag_eof
+
+ @flag_eof.setter
+ def flag_eof(self, value):
+ self.ptyproc.flag_eof = value
+
+ def eof(self):
+ '''This returns True if the EOF exception was ever raised.
+ '''
+ return self.flag_eof
+
+ def terminate(self, force=False):
+ '''This forces a child process to terminate. It starts nicely with
+ SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
+ returns True if the child was terminated. This returns False if the
+ child could not be terminated. '''
+
+ if not self.isalive():
+ return True
+ try:
+ self.kill(signal.SIGHUP)
+ time.sleep(self.delayafterterminate)
+ if not self.isalive():
+ return True
+ self.kill(signal.SIGCONT)
+ time.sleep(self.delayafterterminate)
+ if not self.isalive():
+ return True
+ self.kill(signal.SIGINT)
+ time.sleep(self.delayafterterminate)
+ if not self.isalive():
+ return True
+ if force:
+ self.kill(signal.SIGKILL)
+ time.sleep(self.delayafterterminate)
+ if not self.isalive():
+ return True
+ else:
+ return False
+ return False
+ except OSError:
+ # I think there are kernel timing issues that sometimes cause
+ # this to happen. I think isalive() reports True, but the
+ # process is dead to the kernel.
+ # Make one last attempt to see if the kernel is up to date.
+ time.sleep(self.delayafterterminate)
+ if not self.isalive():
+ return True
+ else:
+ return False
+
+ def wait(self):
+ '''This waits until the child exits. This is a blocking call. This will
+ not read any data from the child, so this will block forever if the
+ child has unread output and has terminated. In other words, the child
+ may have printed output then called exit(), but, the child is
+ technically still alive until its output is read by the parent.
+
+ This method is non-blocking if :meth:`wait` has already been called
+ previously or :meth:`isalive` method returns False. It simply returns
+ the previously determined exit status.
+ '''
+
+ ptyproc = self.ptyproc
+ with _wrap_ptyprocess_err():
+ # exception may occur if "Is some other process attempting
+ # "job control with our child pid?"
+ exitstatus = ptyproc.wait()
+ self.status = ptyproc.status
+ self.exitstatus = ptyproc.exitstatus
+ self.signalstatus = ptyproc.signalstatus
+ self.terminated = True
+
+ return exitstatus
+
+ def isalive(self):
+ '''This tests if the child process is running or not. This is
+ non-blocking. If the child was terminated then this will read the
+ exitstatus or signalstatus of the child. This returns True if the child
+ process appears to be running or False if not. It can take literally
+ SECONDS for Solaris to return the right status. '''
+
+ ptyproc = self.ptyproc
+ with _wrap_ptyprocess_err():
+ alive = ptyproc.isalive()
+
+ if not alive:
+ self.status = ptyproc.status
+ self.exitstatus = ptyproc.exitstatus
+ self.signalstatus = ptyproc.signalstatus
+ self.terminated = True
+
+ return alive
+
+ def kill(self, sig):
+
+ '''This sends the given signal to the child application. In keeping
+ with UNIX tradition it has a misleading name. It does not necessarily
+ kill the child unless you send the right signal. '''
+
+ # Same as os.kill, but the pid is given for you.
+ if self.isalive():
+ os.kill(self.pid, sig)
+
+ def getwinsize(self):
+ '''This returns the terminal window size of the child tty. The return
+ value is a tuple of (rows, cols). '''
+ return self.ptyproc.getwinsize()
+
+ def setwinsize(self, rows, cols):
+ '''This sets the terminal window size of the child tty. This will cause
+ a SIGWINCH signal to be sent to the child. This does not change the
+ physical window size. It changes the size reported to TTY-aware
+ applications like vi or curses -- applications that respond to the
+ SIGWINCH signal. '''
+ return self.ptyproc.setwinsize(rows, cols)
+
+
+ def interact(self, escape_character=chr(29),
+ input_filter=None, output_filter=None):
+
+ '''This gives control of the child process to the interactive user (the
+ human at the keyboard). Keystrokes are sent to the child process, and
+ the stdout and stderr output of the child process is printed. This
+ simply echos the child stdout and child stderr to the real stdout and
+ it echos the real stdin to the child stdin. When the user types the
+ escape_character this method will return None. The escape_character
+ will not be transmitted. The default for escape_character is
+ entered as ``Ctrl - ]``, the very same as BSD telnet. To prevent
+ escaping, escape_character may be set to None.
+
+ If a logfile is specified, then the data sent and received from the
+ child process in interact mode is duplicated to the given log.
+
+ You may pass in optional input and output filter functions. These
functions should take bytes array and return bytes array too. Even
with ``encoding='utf-8'`` support, meth:`interact` will always pass
input_filter and output_filter bytes. You may need to wrap your
function to decode and encode back to UTF-8.
-
+
The output_filter will be passed all the output from the child process.
The input_filter will be passed all the keyboard input from the user.
The input_filter is run BEFORE the check for the escape_character.
- Note that if you change the window size of the parent the SIGWINCH
- signal will not be passed through to the child. If you want the child
- window size to change when the parent's window size changes then do
- something like the following example::
-
- import pexpect, struct, fcntl, termios, signal, sys
- def sigwinch_passthrough (sig, data):
- s = struct.pack("HHHH", 0, 0, 0, 0)
- a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
- termios.TIOCGWINSZ , s))
+ Note that if you change the window size of the parent the SIGWINCH
+ signal will not be passed through to the child. If you want the child
+ window size to change when the parent's window size changes then do
+ something like the following example::
+
+ import pexpect, struct, fcntl, termios, signal, sys
+ def sigwinch_passthrough (sig, data):
+ s = struct.pack("HHHH", 0, 0, 0, 0)
+ a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
+ termios.TIOCGWINSZ , s))
if not p.closed:
p.setwinsize(a[0],a[1])
# Note this 'p' is global and used in sigwinch_passthrough.
- p = pexpect.spawn('/bin/bash')
- signal.signal(signal.SIGWINCH, sigwinch_passthrough)
- p.interact()
- '''
-
- # Flush the buffer.
- self.write_to_stdout(self.buffer)
- self.stdout.flush()
+ p = pexpect.spawn('/bin/bash')
+ signal.signal(signal.SIGWINCH, sigwinch_passthrough)
+ p.interact()
+ '''
+
+ # Flush the buffer.
+ self.write_to_stdout(self.buffer)
+ self.stdout.flush()
self._buffer = self.buffer_type()
- mode = tty.tcgetattr(self.STDIN_FILENO)
- tty.setraw(self.STDIN_FILENO)
- if escape_character is not None and PY3:
- escape_character = escape_character.encode('latin-1')
- try:
- self.__interact_copy(escape_character, input_filter, output_filter)
- finally:
- tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
-
- def __interact_writen(self, fd, data):
- '''This is used by the interact() method.
- '''
-
- while data != b'' and self.isalive():
- n = os.write(fd, data)
- data = data[n:]
-
- def __interact_read(self, fd):
- '''This is used by the interact() method.
- '''
-
- return os.read(fd, 1000)
-
+ mode = tty.tcgetattr(self.STDIN_FILENO)
+ tty.setraw(self.STDIN_FILENO)
+ if escape_character is not None and PY3:
+ escape_character = escape_character.encode('latin-1')
+ try:
+ self.__interact_copy(escape_character, input_filter, output_filter)
+ finally:
+ tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
+
+ def __interact_writen(self, fd, data):
+ '''This is used by the interact() method.
+ '''
+
+ while data != b'' and self.isalive():
+ n = os.write(fd, data)
+ data = data[n:]
+
+ def __interact_read(self, fd):
+ '''This is used by the interact() method.
+ '''
+
+ return os.read(fd, 1000)
+
def __interact_copy(
self, escape_character=None, input_filter=None, output_filter=None
):
-
- '''This is used by the interact() method.
- '''
-
- while self.isalive():
+
+ '''This is used by the interact() method.
+ '''
+
+ while self.isalive():
if self.use_poll:
r = poll_ignore_interrupts([self.child_fd, self.STDIN_FILENO])
else:
r, w, e = select_ignore_interrupts(
[self.child_fd, self.STDIN_FILENO], [], []
)
- if self.child_fd in r:
- try:
- data = self.__interact_read(self.child_fd)
- except OSError as err:
- if err.args[0] == errno.EIO:
- # Linux-style EOF
- break
- raise
- if data == b'':
- # BSD-style EOF
- break
- if output_filter:
- data = output_filter(data)
- self._log(data, 'read')
- os.write(self.STDOUT_FILENO, data)
- if self.STDIN_FILENO in r:
- data = self.__interact_read(self.STDIN_FILENO)
- if input_filter:
- data = input_filter(data)
- i = -1
- if escape_character is not None:
- i = data.rfind(escape_character)
- if i != -1:
- data = data[:i]
- if data:
- self._log(data, 'send')
- self.__interact_writen(self.child_fd, data)
- break
- self._log(data, 'send')
- self.__interact_writen(self.child_fd, data)
-
-
-def spawnu(*args, **kwargs):
- """Deprecated: pass encoding to spawn() instead."""
- kwargs.setdefault('encoding', 'utf-8')
- return spawn(*args, **kwargs)
+ if self.child_fd in r:
+ try:
+ data = self.__interact_read(self.child_fd)
+ except OSError as err:
+ if err.args[0] == errno.EIO:
+ # Linux-style EOF
+ break
+ raise
+ if data == b'':
+ # BSD-style EOF
+ break
+ if output_filter:
+ data = output_filter(data)
+ self._log(data, 'read')
+ os.write(self.STDOUT_FILENO, data)
+ if self.STDIN_FILENO in r:
+ data = self.__interact_read(self.STDIN_FILENO)
+ if input_filter:
+ data = input_filter(data)
+ i = -1
+ if escape_character is not None:
+ i = data.rfind(escape_character)
+ if i != -1:
+ data = data[:i]
+ if data:
+ self._log(data, 'send')
+ self.__interact_writen(self.child_fd, data)
+ break
+ self._log(data, 'send')
+ self.__interact_writen(self.child_fd, data)
+
+
+def spawnu(*args, **kwargs):
+ """Deprecated: pass encoding to spawn() instead."""
+ kwargs.setdefault('encoding', 'utf-8')
+ return spawn(*args, **kwargs)
diff --git a/contrib/python/pexpect/pexpect/pxssh.py b/contrib/python/pexpect/pexpect/pxssh.py
index 00e10aad07..3d53bd9746 100644
--- a/contrib/python/pexpect/pexpect/pxssh.py
+++ b/contrib/python/pexpect/pexpect/pxssh.py
@@ -1,38 +1,38 @@
-'''This class extends pexpect.spawn to specialize setting up SSH connections.
-This adds methods for login, logout, and expecting the shell prompt.
-
-PEXPECT LICENSE
-
- This license is approved by the OSI and FSF as GPL-compatible.
- http://opensource.org/licenses/isc-license.txt
-
- Copyright (c) 2012, Noah Spurrier <noah@noah.org>
- PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
- PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
- COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-'''
-
-from pexpect import ExceptionPexpect, TIMEOUT, EOF, spawn
-import time
-import os
+'''This class extends pexpect.spawn to specialize setting up SSH connections.
+This adds methods for login, logout, and expecting the shell prompt.
+
+PEXPECT LICENSE
+
+ This license is approved by the OSI and FSF as GPL-compatible.
+ http://opensource.org/licenses/isc-license.txt
+
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''
+
+from pexpect import ExceptionPexpect, TIMEOUT, EOF, spawn
+import time
+import os
import sys
import re
-
-__all__ = ['ExceptionPxssh', 'pxssh']
-
-# Exception classes used by this module.
-class ExceptionPxssh(ExceptionPexpect):
- '''Raised for pxssh exceptions.
- '''
-
+
+__all__ = ['ExceptionPxssh', 'pxssh']
+
+# Exception classes used by this module.
+class ExceptionPxssh(ExceptionPexpect):
+ '''Raised for pxssh exceptions.
+ '''
+
if sys.version_info > (3, 0):
from shlex import quote
else:
@@ -49,240 +49,240 @@ else:
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
-class pxssh (spawn):
- '''This class extends pexpect.spawn to specialize setting up SSH
- connections. This adds methods for login, logout, and expecting the shell
- prompt. It does various tricky things to handle many situations in the SSH
- login process. For example, if the session is your first login, then pxssh
- automatically accepts the remote certificate; or if you have public key
- authentication setup then pxssh won't wait for the password prompt.
-
- pxssh uses the shell prompt to synchronize output from the remote host. In
- order to make this more robust it sets the shell prompt to something more
- unique than just $ or #. This should work on most Borne/Bash or Csh style
- shells.
-
- Example that runs a few commands on a remote server and prints the result::
-
+class pxssh (spawn):
+ '''This class extends pexpect.spawn to specialize setting up SSH
+ connections. This adds methods for login, logout, and expecting the shell
+ prompt. It does various tricky things to handle many situations in the SSH
+ login process. For example, if the session is your first login, then pxssh
+ automatically accepts the remote certificate; or if you have public key
+ authentication setup then pxssh won't wait for the password prompt.
+
+ pxssh uses the shell prompt to synchronize output from the remote host. In
+ order to make this more robust it sets the shell prompt to something more
+ unique than just $ or #. This should work on most Borne/Bash or Csh style
+ shells.
+
+ Example that runs a few commands on a remote server and prints the result::
+
from pexpect import pxssh
- import getpass
- try:
- s = pxssh.pxssh()
- hostname = raw_input('hostname: ')
- username = raw_input('username: ')
- password = getpass.getpass('password: ')
- s.login(hostname, username, password)
- s.sendline('uptime') # run a command
- s.prompt() # match the prompt
- print(s.before) # print everything before the prompt.
- s.sendline('ls -l')
- s.prompt()
- print(s.before)
- s.sendline('df')
- s.prompt()
- print(s.before)
- s.logout()
- except pxssh.ExceptionPxssh as e:
- print("pxssh failed on login.")
- print(e)
-
- Example showing how to specify SSH options::
-
+ import getpass
+ try:
+ s = pxssh.pxssh()
+ hostname = raw_input('hostname: ')
+ username = raw_input('username: ')
+ password = getpass.getpass('password: ')
+ s.login(hostname, username, password)
+ s.sendline('uptime') # run a command
+ s.prompt() # match the prompt
+ print(s.before) # print everything before the prompt.
+ s.sendline('ls -l')
+ s.prompt()
+ print(s.before)
+ s.sendline('df')
+ s.prompt()
+ print(s.before)
+ s.logout()
+ except pxssh.ExceptionPxssh as e:
+ print("pxssh failed on login.")
+ print(e)
+
+ Example showing how to specify SSH options::
+
from pexpect import pxssh
- s = pxssh.pxssh(options={
- "StrictHostKeyChecking": "no",
- "UserKnownHostsFile": "/dev/null"})
- ...
-
- Note that if you have ssh-agent running while doing development with pxssh
- then this can lead to a lot of confusion. Many X display managers (xdm,
- gdm, kdm, etc.) will automatically start a GUI agent. You may see a GUI
- dialog box popup asking for a password during development. You should turn
- off any key agents during testing. The 'force_password' attribute will turn
- off public key authentication. This will only work if the remote SSH server
- is configured to allow password logins. Example of using 'force_password'
- attribute::
-
- s = pxssh.pxssh()
- s.force_password = True
- hostname = raw_input('hostname: ')
- username = raw_input('username: ')
- password = getpass.getpass('password: ')
- s.login (hostname, username, password)
+ s = pxssh.pxssh(options={
+ "StrictHostKeyChecking": "no",
+ "UserKnownHostsFile": "/dev/null"})
+ ...
+
+ Note that if you have ssh-agent running while doing development with pxssh
+ then this can lead to a lot of confusion. Many X display managers (xdm,
+ gdm, kdm, etc.) will automatically start a GUI agent. You may see a GUI
+ dialog box popup asking for a password during development. You should turn
+ off any key agents during testing. The 'force_password' attribute will turn
+ off public key authentication. This will only work if the remote SSH server
+ is configured to allow password logins. Example of using 'force_password'
+ attribute::
+
+ s = pxssh.pxssh()
+ s.force_password = True
+ hostname = raw_input('hostname: ')
+ username = raw_input('username: ')
+ password = getpass.getpass('password: ')
+ s.login (hostname, username, password)
`debug_command_string` is only for the test suite to confirm that the string
generated for SSH is correct, using this will not allow you to do
anything other than get a string back from `pxssh.pxssh.login()`.
- '''
-
- def __init__ (self, timeout=30, maxread=2000, searchwindowsize=None,
- logfile=None, cwd=None, env=None, ignore_sighup=True, echo=True,
+ '''
+
+ def __init__ (self, timeout=30, maxread=2000, searchwindowsize=None,
+ logfile=None, cwd=None, env=None, ignore_sighup=True, echo=True,
options={}, encoding=None, codec_errors='strict',
debug_command_string=False, use_poll=False):
-
- spawn.__init__(self, None, timeout=timeout, maxread=maxread,
- searchwindowsize=searchwindowsize, logfile=logfile,
- cwd=cwd, env=env, ignore_sighup=ignore_sighup, echo=echo,
+
+ spawn.__init__(self, None, timeout=timeout, maxread=maxread,
+ searchwindowsize=searchwindowsize, logfile=logfile,
+ cwd=cwd, env=env, ignore_sighup=ignore_sighup, echo=echo,
encoding=encoding, codec_errors=codec_errors, use_poll=use_poll)
-
- self.name = '<pxssh>'
-
- #SUBTLE HACK ALERT! Note that the command that SETS the prompt uses a
- #slightly different string than the regular expression to match it. This
- #is because when you set the prompt the command will echo back, but we
- #don't want to match the echoed command. So if we make the set command
- #slightly different than the regex we eliminate the problem. To make the
- #set command different we add a backslash in front of $. The $ doesn't
- #need to be escaped, but it doesn't hurt and serves to make the set
- #prompt command different than the regex.
-
- # used to match the command-line prompt
+
+ self.name = '<pxssh>'
+
+ #SUBTLE HACK ALERT! Note that the command that SETS the prompt uses a
+ #slightly different string than the regular expression to match it. This
+ #is because when you set the prompt the command will echo back, but we
+ #don't want to match the echoed command. So if we make the set command
+ #slightly different than the regex we eliminate the problem. To make the
+ #set command different we add a backslash in front of $. The $ doesn't
+ #need to be escaped, but it doesn't hurt and serves to make the set
+ #prompt command different than the regex.
+
+ # used to match the command-line prompt
self.UNIQUE_PROMPT = r"\[PEXPECT\][\$\#] "
- self.PROMPT = self.UNIQUE_PROMPT
-
- # used to set shell command-line prompt to UNIQUE_PROMPT.
+ self.PROMPT = self.UNIQUE_PROMPT
+
+ # used to set shell command-line prompt to UNIQUE_PROMPT.
self.PROMPT_SET_SH = r"PS1='[PEXPECT]\$ '"
self.PROMPT_SET_CSH = r"set prompt='[PEXPECT]\$ '"
- self.SSH_OPTS = ("-o'RSAAuthentication=no'"
- + " -o 'PubkeyAuthentication=no'")
-# Disabling host key checking, makes you vulnerable to MITM attacks.
-# + " -o 'StrictHostKeyChecking=no'"
-# + " -o 'UserKnownHostsFile /dev/null' ")
- # Disabling X11 forwarding gets rid of the annoying SSH_ASKPASS from
- # displaying a GUI password dialog. I have not figured out how to
- # disable only SSH_ASKPASS without also disabling X11 forwarding.
- # Unsetting SSH_ASKPASS on the remote side doesn't disable it! Annoying!
- #self.SSH_OPTS = "-x -o'RSAAuthentication=no' -o 'PubkeyAuthentication=no'"
- self.force_password = False
+ self.SSH_OPTS = ("-o'RSAAuthentication=no'"
+ + " -o 'PubkeyAuthentication=no'")
+# Disabling host key checking, makes you vulnerable to MITM attacks.
+# + " -o 'StrictHostKeyChecking=no'"
+# + " -o 'UserKnownHostsFile /dev/null' ")
+ # Disabling X11 forwarding gets rid of the annoying SSH_ASKPASS from
+ # displaying a GUI password dialog. I have not figured out how to
+ # disable only SSH_ASKPASS without also disabling X11 forwarding.
+ # Unsetting SSH_ASKPASS on the remote side doesn't disable it! Annoying!
+ #self.SSH_OPTS = "-x -o'RSAAuthentication=no' -o 'PubkeyAuthentication=no'"
+ self.force_password = False
self.debug_command_string = debug_command_string
-
- # User defined SSH options, eg,
- # ssh.otions = dict(StrictHostKeyChecking="no",UserKnownHostsFile="/dev/null")
- self.options = options
-
- def levenshtein_distance(self, a, b):
- '''This calculates the Levenshtein distance between a and b.
- '''
-
- n, m = len(a), len(b)
- if n > m:
- a,b = b,a
- n,m = m,n
- current = range(n+1)
- for i in range(1,m+1):
- previous, current = current, [i]+[0]*n
- for j in range(1,n+1):
- add, delete = previous[j]+1, current[j-1]+1
- change = previous[j-1]
- if a[j-1] != b[i-1]:
- change = change + 1
- current[j] = min(add, delete, change)
- return current[n]
-
- def try_read_prompt(self, timeout_multiplier):
- '''This facilitates using communication timeouts to perform
- synchronization as quickly as possible, while supporting high latency
- connections with a tunable worst case performance. Fast connections
- should be read almost immediately. Worst case performance for this
- method is timeout_multiplier * 3 seconds.
- '''
-
- # maximum time allowed to read the first response
- first_char_timeout = timeout_multiplier * 0.5
-
- # maximum time allowed between subsequent characters
- inter_char_timeout = timeout_multiplier * 0.1
-
- # maximum time for reading the entire prompt
- total_timeout = timeout_multiplier * 3.0
-
- prompt = self.string_type()
- begin = time.time()
- expired = 0.0
- timeout = first_char_timeout
-
- while expired < total_timeout:
- try:
- prompt += self.read_nonblocking(size=1, timeout=timeout)
- expired = time.time() - begin # updated total time expired
- timeout = inter_char_timeout
- except TIMEOUT:
- break
-
- return prompt
-
- def sync_original_prompt (self, sync_multiplier=1.0):
- '''This attempts to find the prompt. Basically, press enter and record
- the response; press enter again and record the response; if the two
- responses are similar then assume we are at the original prompt.
- This can be a slow function. Worst case with the default sync_multiplier
- can take 12 seconds. Low latency connections are more likely to fail
- with a low sync_multiplier. Best case sync time gets worse with a
- high sync multiplier (500 ms with default). '''
-
- # All of these timing pace values are magic.
- # I came up with these based on what seemed reliable for
- # connecting to a heavily loaded machine I have.
- self.sendline()
- time.sleep(0.1)
-
- try:
- # Clear the buffer before getting the prompt.
- self.try_read_prompt(sync_multiplier)
- except TIMEOUT:
- pass
-
- self.sendline()
- x = self.try_read_prompt(sync_multiplier)
-
- self.sendline()
- a = self.try_read_prompt(sync_multiplier)
-
- self.sendline()
- b = self.try_read_prompt(sync_multiplier)
-
- ld = self.levenshtein_distance(a,b)
- len_a = len(a)
- if len_a == 0:
- return False
- if float(ld)/len_a < 0.4:
- return True
- return False
-
- ### TODO: This is getting messy and I'm pretty sure this isn't perfect.
- ### TODO: I need to draw a flow chart for this.
+
+ # User defined SSH options, eg,
+ # ssh.otions = dict(StrictHostKeyChecking="no",UserKnownHostsFile="/dev/null")
+ self.options = options
+
+ def levenshtein_distance(self, a, b):
+ '''This calculates the Levenshtein distance between a and b.
+ '''
+
+ n, m = len(a), len(b)
+ if n > m:
+ a,b = b,a
+ n,m = m,n
+ current = range(n+1)
+ for i in range(1,m+1):
+ previous, current = current, [i]+[0]*n
+ for j in range(1,n+1):
+ add, delete = previous[j]+1, current[j-1]+1
+ change = previous[j-1]
+ if a[j-1] != b[i-1]:
+ change = change + 1
+ current[j] = min(add, delete, change)
+ return current[n]
+
+ def try_read_prompt(self, timeout_multiplier):
+ '''This facilitates using communication timeouts to perform
+ synchronization as quickly as possible, while supporting high latency
+ connections with a tunable worst case performance. Fast connections
+ should be read almost immediately. Worst case performance for this
+ method is timeout_multiplier * 3 seconds.
+ '''
+
+ # maximum time allowed to read the first response
+ first_char_timeout = timeout_multiplier * 0.5
+
+ # maximum time allowed between subsequent characters
+ inter_char_timeout = timeout_multiplier * 0.1
+
+ # maximum time for reading the entire prompt
+ total_timeout = timeout_multiplier * 3.0
+
+ prompt = self.string_type()
+ begin = time.time()
+ expired = 0.0
+ timeout = first_char_timeout
+
+ while expired < total_timeout:
+ try:
+ prompt += self.read_nonblocking(size=1, timeout=timeout)
+ expired = time.time() - begin # updated total time expired
+ timeout = inter_char_timeout
+ except TIMEOUT:
+ break
+
+ return prompt
+
+ def sync_original_prompt (self, sync_multiplier=1.0):
+ '''This attempts to find the prompt. Basically, press enter and record
+ the response; press enter again and record the response; if the two
+ responses are similar then assume we are at the original prompt.
+ This can be a slow function. Worst case with the default sync_multiplier
+ can take 12 seconds. Low latency connections are more likely to fail
+ with a low sync_multiplier. Best case sync time gets worse with a
+ high sync multiplier (500 ms with default). '''
+
+ # All of these timing pace values are magic.
+ # I came up with these based on what seemed reliable for
+ # connecting to a heavily loaded machine I have.
+ self.sendline()
+ time.sleep(0.1)
+
+ try:
+ # Clear the buffer before getting the prompt.
+ self.try_read_prompt(sync_multiplier)
+ except TIMEOUT:
+ pass
+
+ self.sendline()
+ x = self.try_read_prompt(sync_multiplier)
+
+ self.sendline()
+ a = self.try_read_prompt(sync_multiplier)
+
+ self.sendline()
+ b = self.try_read_prompt(sync_multiplier)
+
+ ld = self.levenshtein_distance(a,b)
+ len_a = len(a)
+ if len_a == 0:
+ return False
+ if float(ld)/len_a < 0.4:
+ return True
+ return False
+
+ ### TODO: This is getting messy and I'm pretty sure this isn't perfect.
+ ### TODO: I need to draw a flow chart for this.
### TODO: Unit tests for SSH tunnels, remote SSH command exec, disabling original prompt sync
def login (self, server, username=None, password='', terminal_type='ansi',
- original_prompt=r"[#$]", login_timeout=10, port=None,
- auto_prompt_reset=True, ssh_key=None, quiet=True,
+ original_prompt=r"[#$]", login_timeout=10, port=None,
+ auto_prompt_reset=True, ssh_key=None, quiet=True,
sync_multiplier=1, check_local_ip=True,
password_regex=r'(?i)(?:password:)|(?:passphrase for key)',
ssh_tunnels={}, spawn_local_ssh=True,
sync_original_prompt=True, ssh_config=None, cmd='ssh'):
- '''This logs the user into the given server.
-
+ '''This logs the user into the given server.
+
It uses 'original_prompt' to try to find the prompt right after login.
When it finds the prompt it immediately tries to reset the prompt to
something more easily matched. The default 'original_prompt' is very
optimistic and is easily fooled. It's more reliable to try to match the original
- prompt as exactly as possible to prevent false matches by server
- strings such as the "Message Of The Day". On many systems you can
- disable the MOTD on the remote server by creating a zero-length file
- called :file:`~/.hushlogin` on the remote server. If a prompt cannot be found
- then this will not necessarily cause the login to fail. In the case of
- a timeout when looking for the prompt we assume that the original
- prompt was so weird that we could not match it, so we use a few tricks
- to guess when we have reached the prompt. Then we hope for the best and
- blindly try to reset the prompt to something more unique. If that fails
- then login() raises an :class:`ExceptionPxssh` exception.
-
- In some situations it is not possible or desirable to reset the
- original prompt. In this case, pass ``auto_prompt_reset=False`` to
- inhibit setting the prompt to the UNIQUE_PROMPT. Remember that pxssh
- uses a unique prompt in the :meth:`prompt` method. If the original prompt is
- not reset then this will disable the :meth:`prompt` method unless you
- manually set the :attr:`PROMPT` attribute.
+ prompt as exactly as possible to prevent false matches by server
+ strings such as the "Message Of The Day". On many systems you can
+ disable the MOTD on the remote server by creating a zero-length file
+ called :file:`~/.hushlogin` on the remote server. If a prompt cannot be found
+ then this will not necessarily cause the login to fail. In the case of
+ a timeout when looking for the prompt we assume that the original
+ prompt was so weird that we could not match it, so we use a few tricks
+ to guess when we have reached the prompt. Then we hope for the best and
+ blindly try to reset the prompt to something more unique. If that fails
+ then login() raises an :class:`ExceptionPxssh` exception.
+
+ In some situations it is not possible or desirable to reset the
+ original prompt. In this case, pass ``auto_prompt_reset=False`` to
+ inhibit setting the prompt to the UNIQUE_PROMPT. Remember that pxssh
+ uses a unique prompt in the :meth:`prompt` method. If the original prompt is
+ not reset then this will disable the :meth:`prompt` method unless you
+ manually set the :attr:`PROMPT` attribute.
Set ``password_regex`` if there is a MOTD message with `password` in it.
Changing this is like playing in traffic, don't (p)expect it to match straight
@@ -306,27 +306,27 @@ class pxssh (spawn):
Alter the ``cmd`` to change the ssh client used, or to prepend it with network
namespaces. For example ```cmd="ip netns exec vlan2 ssh"``` to execute the ssh in
network namespace named ```vlan```.
- '''
+ '''
session_regex_array = ["(?i)are you sure you want to continue connecting", original_prompt, password_regex, "(?i)permission denied", "(?i)terminal type", TIMEOUT]
session_init_regex_array = []
session_init_regex_array.extend(session_regex_array)
session_init_regex_array.extend(["(?i)connection closed by remote host", EOF])
-
- ssh_options = ''.join([" -o '%s=%s'" % (o, v) for (o, v) in self.options.items()])
- if quiet:
- ssh_options = ssh_options + ' -q'
- if not check_local_ip:
- ssh_options = ssh_options + " -o'NoHostAuthenticationForLocalhost=yes'"
- if self.force_password:
- ssh_options = ssh_options + ' ' + self.SSH_OPTS
+
+ ssh_options = ''.join([" -o '%s=%s'" % (o, v) for (o, v) in self.options.items()])
+ if quiet:
+ ssh_options = ssh_options + ' -q'
+ if not check_local_ip:
+ ssh_options = ssh_options + " -o'NoHostAuthenticationForLocalhost=yes'"
+ if self.force_password:
+ ssh_options = ssh_options + ' ' + self.SSH_OPTS
if ssh_config is not None:
if spawn_local_ssh and not os.path.isfile(ssh_config):
raise ExceptionPxssh('SSH config does not exist or is not a file.')
ssh_options = ssh_options + ' -F ' + ssh_config
- if port is not None:
- ssh_options = ssh_options + ' -p %s'%(str(port))
- if ssh_key is not None:
+ if port is not None:
+ ssh_options = ssh_options + ' -p %s'%(str(port))
+ if ssh_key is not None:
# Allow forwarding our SSH key to the current session
if ssh_key==True:
ssh_options = ssh_options + ' -A'
@@ -395,143 +395,143 @@ class pxssh (spawn):
cmd += " %s %s" % (ssh_options, server)
if self.debug_command_string:
return(cmd)
-
+
# Are we asking for a local ssh command or to spawn one in another session?
if spawn_local_ssh:
spawn._spawn(self, cmd)
else:
self.sendline(cmd)
- # This does not distinguish between a remote server 'password' prompt
- # and a local ssh 'passphrase' prompt (for unlocking a private key).
+ # This does not distinguish between a remote server 'password' prompt
+ # and a local ssh 'passphrase' prompt (for unlocking a private key).
i = self.expect(session_init_regex_array, timeout=login_timeout)
-
- # First phase
- if i==0:
- # New certificate -- always accept it.
- # This is what you get if SSH does not have the remote host's
- # public key stored in the 'known_hosts' cache.
- self.sendline("yes")
+
+ # First phase
+ if i==0:
+ # New certificate -- always accept it.
+ # This is what you get if SSH does not have the remote host's
+ # public key stored in the 'known_hosts' cache.
+ self.sendline("yes")
i = self.expect(session_regex_array)
- if i==2: # password or passphrase
- self.sendline(password)
+ if i==2: # password or passphrase
+ self.sendline(password)
i = self.expect(session_regex_array)
- if i==4:
- self.sendline(terminal_type)
+ if i==4:
+ self.sendline(terminal_type)
i = self.expect(session_regex_array)
if i==7:
self.close()
raise ExceptionPxssh('Could not establish connection to host')
-
- # Second phase
- if i==0:
- # This is weird. This should not happen twice in a row.
- self.close()
- raise ExceptionPxssh('Weird error. Got "are you sure" prompt twice.')
- elif i==1: # can occur if you have a public key pair set to authenticate.
- ### TODO: May NOT be OK if expect() got tricked and matched a false prompt.
- pass
- elif i==2: # password prompt again
- # For incorrect passwords, some ssh servers will
- # ask for the password again, others return 'denied' right away.
- # If we get the password prompt again then this means
- # we didn't get the password right the first time.
- self.close()
- raise ExceptionPxssh('password refused')
- elif i==3: # permission denied -- password was bad.
- self.close()
- raise ExceptionPxssh('permission denied')
- elif i==4: # terminal type again? WTF?
- self.close()
- raise ExceptionPxssh('Weird error. Got "terminal type" prompt twice.')
- elif i==5: # Timeout
- #This is tricky... I presume that we are at the command-line prompt.
- #It may be that the shell prompt was so weird that we couldn't match
- #it. Or it may be that we couldn't log in for some other reason. I
- #can't be sure, but it's safe to guess that we did login because if
- #I presume wrong and we are not logged in then this should be caught
- #later when I try to set the shell prompt.
- pass
- elif i==6: # Connection closed by remote host
- self.close()
- raise ExceptionPxssh('connection closed')
- else: # Unexpected
- self.close()
- raise ExceptionPxssh('unexpected login response')
+
+ # Second phase
+ if i==0:
+ # This is weird. This should not happen twice in a row.
+ self.close()
+ raise ExceptionPxssh('Weird error. Got "are you sure" prompt twice.')
+ elif i==1: # can occur if you have a public key pair set to authenticate.
+ ### TODO: May NOT be OK if expect() got tricked and matched a false prompt.
+ pass
+ elif i==2: # password prompt again
+ # For incorrect passwords, some ssh servers will
+ # ask for the password again, others return 'denied' right away.
+ # If we get the password prompt again then this means
+ # we didn't get the password right the first time.
+ self.close()
+ raise ExceptionPxssh('password refused')
+ elif i==3: # permission denied -- password was bad.
+ self.close()
+ raise ExceptionPxssh('permission denied')
+ elif i==4: # terminal type again? WTF?
+ self.close()
+ raise ExceptionPxssh('Weird error. Got "terminal type" prompt twice.')
+ elif i==5: # Timeout
+ #This is tricky... I presume that we are at the command-line prompt.
+ #It may be that the shell prompt was so weird that we couldn't match
+ #it. Or it may be that we couldn't log in for some other reason. I
+ #can't be sure, but it's safe to guess that we did login because if
+ #I presume wrong and we are not logged in then this should be caught
+ #later when I try to set the shell prompt.
+ pass
+ elif i==6: # Connection closed by remote host
+ self.close()
+ raise ExceptionPxssh('connection closed')
+ else: # Unexpected
+ self.close()
+ raise ExceptionPxssh('unexpected login response')
if sync_original_prompt:
if not self.sync_original_prompt(sync_multiplier):
self.close()
raise ExceptionPxssh('could not synchronize with original prompt')
- # We appear to be in.
- # set shell prompt to something unique.
- if auto_prompt_reset:
- if not self.set_unique_prompt():
- self.close()
- raise ExceptionPxssh('could not set shell prompt '
- '(received: %r, expected: %r).' % (
- self.before, self.PROMPT,))
- return True
-
- def logout (self):
- '''Sends exit to the remote shell.
-
- If there are stopped jobs then this automatically sends exit twice.
- '''
- self.sendline("exit")
- index = self.expect([EOF, "(?i)there are stopped jobs"])
- if index==1:
- self.sendline("exit")
- self.expect(EOF)
- self.close()
-
- def prompt(self, timeout=-1):
- '''Match the next shell prompt.
-
- This is little more than a short-cut to the :meth:`~pexpect.spawn.expect`
- method. Note that if you called :meth:`login` with
- ``auto_prompt_reset=False``, then before calling :meth:`prompt` you must
- set the :attr:`PROMPT` attribute to a regex that it will use for
- matching the prompt.
-
- Calling :meth:`prompt` will erase the contents of the :attr:`before`
- attribute even if no prompt is ever matched. If timeout is not given or
- it is set to -1 then self.timeout is used.
-
- :return: True if the shell prompt was matched, False if the timeout was
- reached.
- '''
-
- if timeout == -1:
- timeout = self.timeout
- i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)
- if i==1:
- return False
- return True
-
- def set_unique_prompt(self):
- '''This sets the remote prompt to something more unique than ``#`` or ``$``.
- This makes it easier for the :meth:`prompt` method to match the shell prompt
- unambiguously. This method is called automatically by the :meth:`login`
- method, but you may want to call it manually if you somehow reset the
- shell prompt. For example, if you 'su' to a different user then you
- will need to manually reset the prompt. This sends shell commands to
- the remote host to set the prompt, so this assumes the remote host is
- ready to receive commands.
-
- Alternatively, you may use your own prompt pattern. In this case you
- should call :meth:`login` with ``auto_prompt_reset=False``; then set the
- :attr:`PROMPT` attribute to a regular expression. After that, the
- :meth:`prompt` method will try to match your prompt pattern.
- '''
-
- self.sendline("unset PROMPT_COMMAND")
- self.sendline(self.PROMPT_SET_SH) # sh-style
- i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)
- if i == 0: # csh-style
- self.sendline(self.PROMPT_SET_CSH)
- i = self.expect([TIMEOUT, self.PROMPT], timeout=10)
- if i == 0:
- return False
- return True
-
-# vi:ts=4:sw=4:expandtab:ft=python:
+ # We appear to be in.
+ # set shell prompt to something unique.
+ if auto_prompt_reset:
+ if not self.set_unique_prompt():
+ self.close()
+ raise ExceptionPxssh('could not set shell prompt '
+ '(received: %r, expected: %r).' % (
+ self.before, self.PROMPT,))
+ return True
+
+ def logout (self):
+ '''Sends exit to the remote shell.
+
+ If there are stopped jobs then this automatically sends exit twice.
+ '''
+ self.sendline("exit")
+ index = self.expect([EOF, "(?i)there are stopped jobs"])
+ if index==1:
+ self.sendline("exit")
+ self.expect(EOF)
+ self.close()
+
+ def prompt(self, timeout=-1):
+ '''Match the next shell prompt.
+
+ This is little more than a short-cut to the :meth:`~pexpect.spawn.expect`
+ method. Note that if you called :meth:`login` with
+ ``auto_prompt_reset=False``, then before calling :meth:`prompt` you must
+ set the :attr:`PROMPT` attribute to a regex that it will use for
+ matching the prompt.
+
+ Calling :meth:`prompt` will erase the contents of the :attr:`before`
+ attribute even if no prompt is ever matched. If timeout is not given or
+ it is set to -1 then self.timeout is used.
+
+ :return: True if the shell prompt was matched, False if the timeout was
+ reached.
+ '''
+
+ if timeout == -1:
+ timeout = self.timeout
+ i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)
+ if i==1:
+ return False
+ return True
+
+ def set_unique_prompt(self):
+ '''This sets the remote prompt to something more unique than ``#`` or ``$``.
+ This makes it easier for the :meth:`prompt` method to match the shell prompt
+ unambiguously. This method is called automatically by the :meth:`login`
+ method, but you may want to call it manually if you somehow reset the
+ shell prompt. For example, if you 'su' to a different user then you
+ will need to manually reset the prompt. This sends shell commands to
+ the remote host to set the prompt, so this assumes the remote host is
+ ready to receive commands.
+
+ Alternatively, you may use your own prompt pattern. In this case you
+ should call :meth:`login` with ``auto_prompt_reset=False``; then set the
+ :attr:`PROMPT` attribute to a regular expression. After that, the
+ :meth:`prompt` method will try to match your prompt pattern.
+ '''
+
+ self.sendline("unset PROMPT_COMMAND")
+ self.sendline(self.PROMPT_SET_SH) # sh-style
+ i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)
+ if i == 0: # csh-style
+ self.sendline(self.PROMPT_SET_CSH)
+ i = self.expect([TIMEOUT, self.PROMPT], timeout=10)
+ if i == 0:
+ return False
+ return True
+
+# vi:ts=4:sw=4:expandtab:ft=python:
diff --git a/contrib/python/pexpect/pexpect/replwrap.py b/contrib/python/pexpect/pexpect/replwrap.py
index 79562de4c1..c930f1e4fe 100644
--- a/contrib/python/pexpect/pexpect/replwrap.py
+++ b/contrib/python/pexpect/pexpect/replwrap.py
@@ -1,122 +1,122 @@
-"""Generic wrapper for read-eval-print-loops, a.k.a. interactive shells
-"""
-import os.path
-import signal
-import sys
-
-import pexpect
-
-PY3 = (sys.version_info[0] >= 3)
-
-if PY3:
- basestring = str
-
-PEXPECT_PROMPT = u'[PEXPECT_PROMPT>'
-PEXPECT_CONTINUATION_PROMPT = u'[PEXPECT_PROMPT+'
-
-class REPLWrapper(object):
- """Wrapper for a REPL.
-
- :param cmd_or_spawn: This can either be an instance of :class:`pexpect.spawn`
- in which a REPL has already been started, or a str command to start a new
- REPL process.
- :param str orig_prompt: The prompt to expect at first.
- :param str prompt_change: A command to change the prompt to something more
- unique. If this is ``None``, the prompt will not be changed. This will
- be formatted with the new and continuation prompts as positional
- parameters, so you can use ``{}`` style formatting to insert them into
- the command.
- :param str new_prompt: The more unique prompt to expect after the change.
- :param str extra_init_cmd: Commands to do extra initialisation, such as
- disabling pagers.
- """
- def __init__(self, cmd_or_spawn, orig_prompt, prompt_change,
- new_prompt=PEXPECT_PROMPT,
- continuation_prompt=PEXPECT_CONTINUATION_PROMPT,
- extra_init_cmd=None):
- if isinstance(cmd_or_spawn, basestring):
- self.child = pexpect.spawn(cmd_or_spawn, echo=False, encoding='utf-8')
- else:
- self.child = cmd_or_spawn
- if self.child.echo:
- # Existing spawn instance has echo enabled, disable it
- # to prevent our input from being repeated to output.
- self.child.setecho(False)
- self.child.waitnoecho()
-
- if prompt_change is None:
- self.prompt = orig_prompt
- else:
- self.set_prompt(orig_prompt,
- prompt_change.format(new_prompt, continuation_prompt))
- self.prompt = new_prompt
- self.continuation_prompt = continuation_prompt
-
- self._expect_prompt()
-
- if extra_init_cmd is not None:
- self.run_command(extra_init_cmd)
-
- def set_prompt(self, orig_prompt, prompt_change):
- self.child.expect(orig_prompt)
- self.child.sendline(prompt_change)
-
+"""Generic wrapper for read-eval-print-loops, a.k.a. interactive shells
+"""
+import os.path
+import signal
+import sys
+
+import pexpect
+
+PY3 = (sys.version_info[0] >= 3)
+
+if PY3:
+ basestring = str
+
+PEXPECT_PROMPT = u'[PEXPECT_PROMPT>'
+PEXPECT_CONTINUATION_PROMPT = u'[PEXPECT_PROMPT+'
+
+class REPLWrapper(object):
+ """Wrapper for a REPL.
+
+ :param cmd_or_spawn: This can either be an instance of :class:`pexpect.spawn`
+ in which a REPL has already been started, or a str command to start a new
+ REPL process.
+ :param str orig_prompt: The prompt to expect at first.
+ :param str prompt_change: A command to change the prompt to something more
+ unique. If this is ``None``, the prompt will not be changed. This will
+ be formatted with the new and continuation prompts as positional
+ parameters, so you can use ``{}`` style formatting to insert them into
+ the command.
+ :param str new_prompt: The more unique prompt to expect after the change.
+ :param str extra_init_cmd: Commands to do extra initialisation, such as
+ disabling pagers.
+ """
+ def __init__(self, cmd_or_spawn, orig_prompt, prompt_change,
+ new_prompt=PEXPECT_PROMPT,
+ continuation_prompt=PEXPECT_CONTINUATION_PROMPT,
+ extra_init_cmd=None):
+ if isinstance(cmd_or_spawn, basestring):
+ self.child = pexpect.spawn(cmd_or_spawn, echo=False, encoding='utf-8')
+ else:
+ self.child = cmd_or_spawn
+ if self.child.echo:
+ # Existing spawn instance has echo enabled, disable it
+ # to prevent our input from being repeated to output.
+ self.child.setecho(False)
+ self.child.waitnoecho()
+
+ if prompt_change is None:
+ self.prompt = orig_prompt
+ else:
+ self.set_prompt(orig_prompt,
+ prompt_change.format(new_prompt, continuation_prompt))
+ self.prompt = new_prompt
+ self.continuation_prompt = continuation_prompt
+
+ self._expect_prompt()
+
+ if extra_init_cmd is not None:
+ self.run_command(extra_init_cmd)
+
+ def set_prompt(self, orig_prompt, prompt_change):
+ self.child.expect(orig_prompt)
+ self.child.sendline(prompt_change)
+
def _expect_prompt(self, timeout=-1, async_=False):
- return self.child.expect_exact([self.prompt, self.continuation_prompt],
+ return self.child.expect_exact([self.prompt, self.continuation_prompt],
timeout=timeout, async_=async_)
-
+
def run_command(self, command, timeout=-1, async_=False):
- """Send a command to the REPL, wait for and return output.
-
- :param str command: The command to send. Trailing newlines are not needed.
- This should be a complete block of input that will trigger execution;
- if a continuation prompt is found after sending input, :exc:`ValueError`
- will be raised.
- :param int timeout: How long to wait for the next prompt. -1 means the
- default from the :class:`pexpect.spawn` object (default 30 seconds).
- None means to wait indefinitely.
+ """Send a command to the REPL, wait for and return output.
+
+ :param str command: The command to send. Trailing newlines are not needed.
+ This should be a complete block of input that will trigger execution;
+ if a continuation prompt is found after sending input, :exc:`ValueError`
+ will be raised.
+ :param int timeout: How long to wait for the next prompt. -1 means the
+ default from the :class:`pexpect.spawn` object (default 30 seconds).
+ None means to wait indefinitely.
:param bool async_: On Python 3.4, or Python 3.3 with asyncio
installed, passing ``async_=True`` will make this return an
:mod:`asyncio` Future, which you can yield from to get the same
result that this method would normally give directly.
- """
- # Split up multiline commands and feed them in bit-by-bit
- cmdlines = command.splitlines()
- # splitlines ignores trailing newlines - add it back in manually
- if command.endswith('\n'):
- cmdlines.append('')
- if not cmdlines:
- raise ValueError("No command was given")
-
+ """
+ # Split up multiline commands and feed them in bit-by-bit
+ cmdlines = command.splitlines()
+ # splitlines ignores trailing newlines - add it back in manually
+ if command.endswith('\n'):
+ cmdlines.append('')
+ if not cmdlines:
+ raise ValueError("No command was given")
+
if async_:
from ._async import repl_run_command_async
return repl_run_command_async(self, cmdlines, timeout)
- res = []
- self.child.sendline(cmdlines[0])
- for line in cmdlines[1:]:
- self._expect_prompt(timeout=timeout)
- res.append(self.child.before)
- self.child.sendline(line)
-
- # Command was fully submitted, now wait for the next prompt
- if self._expect_prompt(timeout=timeout) == 1:
- # We got the continuation prompt - command was incomplete
- self.child.kill(signal.SIGINT)
- self._expect_prompt(timeout=1)
- raise ValueError("Continuation prompt found - input was incomplete:\n"
- + command)
- return u''.join(res + [self.child.before])
-
-def python(command="python"):
- """Start a Python shell and return a :class:`REPLWrapper` object."""
- return REPLWrapper(command, u">>> ", u"import sys; sys.ps1={0!r}; sys.ps2={1!r}")
-
-def bash(command="bash"):
- """Start a bash shell and return a :class:`REPLWrapper` object."""
- bashrc = os.path.join(os.path.dirname(__file__), 'bashrc.sh')
- child = pexpect.spawn(command, ['--rcfile', bashrc], echo=False,
- encoding='utf-8')
+ res = []
+ self.child.sendline(cmdlines[0])
+ for line in cmdlines[1:]:
+ self._expect_prompt(timeout=timeout)
+ res.append(self.child.before)
+ self.child.sendline(line)
+
+ # Command was fully submitted, now wait for the next prompt
+ if self._expect_prompt(timeout=timeout) == 1:
+ # We got the continuation prompt - command was incomplete
+ self.child.kill(signal.SIGINT)
+ self._expect_prompt(timeout=1)
+ raise ValueError("Continuation prompt found - input was incomplete:\n"
+ + command)
+ return u''.join(res + [self.child.before])
+
+def python(command="python"):
+ """Start a Python shell and return a :class:`REPLWrapper` object."""
+ return REPLWrapper(command, u">>> ", u"import sys; sys.ps1={0!r}; sys.ps2={1!r}")
+
+def bash(command="bash"):
+ """Start a bash shell and return a :class:`REPLWrapper` object."""
+ bashrc = os.path.join(os.path.dirname(__file__), 'bashrc.sh')
+ child = pexpect.spawn(command, ['--rcfile', bashrc], echo=False,
+ encoding='utf-8')
# If the user runs 'env', the value of PS1 will be in the output. To avoid
# replwrap seeing that as the next prompt, we'll embed the marker characters
@@ -127,4 +127,4 @@ def bash(command="bash"):
prompt_change = u"PS1='{0}' PS2='{1}' PROMPT_COMMAND=''".format(ps1, ps2)
return REPLWrapper(child, u'\\$', prompt_change,
- extra_init_cmd="export PAGER=cat")
+ extra_init_cmd="export PAGER=cat")
diff --git a/contrib/python/pexpect/pexpect/run.py b/contrib/python/pexpect/pexpect/run.py
index ccfd6050d6..ff288a1246 100644
--- a/contrib/python/pexpect/pexpect/run.py
+++ b/contrib/python/pexpect/pexpect/run.py
@@ -1,157 +1,157 @@
-import sys
-import types
-
-from .exceptions import EOF, TIMEOUT
-from .pty_spawn import spawn
-
-def run(command, timeout=30, withexitstatus=False, events=None,
- extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
-
- '''
- This function runs the given command; waits for it to finish; then
- returns all output as a string. STDERR is included in output. If the full
- path to the command is not given then the path is searched.
-
- Note that lines are terminated by CR/LF (\\r\\n) combination even on
- UNIX-like systems because this is the standard for pseudottys. If you set
- 'withexitstatus' to true, then run will return a tuple of (command_output,
- exitstatus). If 'withexitstatus' is false then this returns just
- command_output.
-
- The run() function can often be used instead of creating a spawn instance.
- For example, the following code uses spawn::
-
- from pexpect import *
- child = spawn('scp foo user@example.com:.')
- child.expect('(?i)password')
- child.sendline(mypassword)
-
- The previous code can be replace with the following::
-
- from pexpect import *
- run('scp foo user@example.com:.', events={'(?i)password': mypassword})
-
- **Examples**
-
- Start the apache daemon on the local machine::
-
- from pexpect import *
- run("/usr/local/apache/bin/apachectl start")
-
- Check in a file using SVN::
-
- from pexpect import *
- run("svn ci -m 'automatic commit' my_file.py")
-
- Run a command and capture exit status::
-
- from pexpect import *
- (command_output, exitstatus) = run('ls -l /bin', withexitstatus=1)
-
- The following will run SSH and execute 'ls -l' on the remote machine. The
- password 'secret' will be sent if the '(?i)password' pattern is ever seen::
-
- run("ssh username@machine.example.com 'ls -l'",
- events={'(?i)password':'secret\\n'})
-
- This will start mencoder to rip a video from DVD. This will also display
- progress ticks every 5 seconds as it runs. For example::
-
- from pexpect import *
- def print_ticks(d):
- print d['event_count'],
- run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
- events={TIMEOUT:print_ticks}, timeout=5)
-
- The 'events' argument should be either a dictionary or a tuple list that
- contains patterns and responses. Whenever one of the patterns is seen
- in the command output, run() will send the associated response string.
- So, run() in the above example can be also written as:
-
- run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
- events=[(TIMEOUT,print_ticks)], timeout=5)
-
- Use a tuple list for events if the command output requires a delicate
- control over what pattern should be matched, since the tuple list is passed
- to pexpect() as its pattern list, with the order of patterns preserved.
-
- Note that you should put newlines in your string if Enter is necessary.
-
- Like the example above, the responses may also contain a callback, either
- a function or method. It should accept a dictionary value as an argument.
- The dictionary contains all the locals from the run() function, so you can
- access the child spawn object or any other variable defined in run()
- (event_count, child, and extra_args are the most useful). A callback may
- return True to stop the current run process. Otherwise run() continues
- until the next event. A callback may also return a string which will be
- sent to the child. 'extra_args' is not used by directly run(). It provides
- a way to pass data to a callback function through run() through the locals
- dictionary passed to a callback.
-
- Like :class:`spawn`, passing *encoding* will make it work with unicode
- instead of bytes. You can pass *codec_errors* to control how errors in
- encoding and decoding are handled.
- '''
- if timeout == -1:
- child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
- **kwargs)
- else:
- child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
- cwd=cwd, env=env, **kwargs)
- if isinstance(events, list):
- patterns= [x for x,y in events]
- responses = [y for x,y in events]
- elif isinstance(events, dict):
- patterns = list(events.keys())
- responses = list(events.values())
- else:
- # This assumes EOF or TIMEOUT will eventually cause run to terminate.
- patterns = None
- responses = None
- child_result_list = []
- event_count = 0
- while True:
- try:
- index = child.expect(patterns)
- if isinstance(child.after, child.allowed_string_types):
- child_result_list.append(child.before + child.after)
- else:
- # child.after may have been a TIMEOUT or EOF,
- # which we don't want appended to the list.
- child_result_list.append(child.before)
- if isinstance(responses[index], child.allowed_string_types):
- child.send(responses[index])
- elif (isinstance(responses[index], types.FunctionType) or
- isinstance(responses[index], types.MethodType)):
- callback_result = responses[index](locals())
- sys.stdout.flush()
- if isinstance(callback_result, child.allowed_string_types):
- child.send(callback_result)
- elif callback_result:
- break
- else:
- raise TypeError("parameter `event' at index {index} must be "
- "a string, method, or function: {value!r}"
- .format(index=index, value=responses[index]))
- event_count = event_count + 1
- except TIMEOUT:
- child_result_list.append(child.before)
- break
- except EOF:
- child_result_list.append(child.before)
- break
- child_result = child.string_type().join(child_result_list)
- if withexitstatus:
- child.close()
- return (child_result, child.exitstatus)
- else:
- return child_result
-
-def runu(command, timeout=30, withexitstatus=False, events=None,
- extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
- """Deprecated: pass encoding to run() instead.
- """
- kwargs.setdefault('encoding', 'utf-8')
- return run(command, timeout=timeout, withexitstatus=withexitstatus,
- events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
- env=env, **kwargs)
+import sys
+import types
+
+from .exceptions import EOF, TIMEOUT
+from .pty_spawn import spawn
+
+def run(command, timeout=30, withexitstatus=False, events=None,
+ extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
+
+ '''
+ This function runs the given command; waits for it to finish; then
+ returns all output as a string. STDERR is included in output. If the full
+ path to the command is not given then the path is searched.
+
+ Note that lines are terminated by CR/LF (\\r\\n) combination even on
+ UNIX-like systems because this is the standard for pseudottys. If you set
+ 'withexitstatus' to true, then run will return a tuple of (command_output,
+ exitstatus). If 'withexitstatus' is false then this returns just
+ command_output.
+
+ The run() function can often be used instead of creating a spawn instance.
+ For example, the following code uses spawn::
+
+ from pexpect import *
+ child = spawn('scp foo user@example.com:.')
+ child.expect('(?i)password')
+ child.sendline(mypassword)
+
+ The previous code can be replace with the following::
+
+ from pexpect import *
+ run('scp foo user@example.com:.', events={'(?i)password': mypassword})
+
+ **Examples**
+
+ Start the apache daemon on the local machine::
+
+ from pexpect import *
+ run("/usr/local/apache/bin/apachectl start")
+
+ Check in a file using SVN::
+
+ from pexpect import *
+ run("svn ci -m 'automatic commit' my_file.py")
+
+ Run a command and capture exit status::
+
+ from pexpect import *
+ (command_output, exitstatus) = run('ls -l /bin', withexitstatus=1)
+
+ The following will run SSH and execute 'ls -l' on the remote machine. The
+ password 'secret' will be sent if the '(?i)password' pattern is ever seen::
+
+ run("ssh username@machine.example.com 'ls -l'",
+ events={'(?i)password':'secret\\n'})
+
+ This will start mencoder to rip a video from DVD. This will also display
+ progress ticks every 5 seconds as it runs. For example::
+
+ from pexpect import *
+ def print_ticks(d):
+ print d['event_count'],
+ run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
+ events={TIMEOUT:print_ticks}, timeout=5)
+
+ The 'events' argument should be either a dictionary or a tuple list that
+ contains patterns and responses. Whenever one of the patterns is seen
+ in the command output, run() will send the associated response string.
+ So, run() in the above example can be also written as:
+
+ run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
+ events=[(TIMEOUT,print_ticks)], timeout=5)
+
+ Use a tuple list for events if the command output requires a delicate
+ control over what pattern should be matched, since the tuple list is passed
+ to pexpect() as its pattern list, with the order of patterns preserved.
+
+ Note that you should put newlines in your string if Enter is necessary.
+
+ Like the example above, the responses may also contain a callback, either
+ a function or method. It should accept a dictionary value as an argument.
+ The dictionary contains all the locals from the run() function, so you can
+ access the child spawn object or any other variable defined in run()
+ (event_count, child, and extra_args are the most useful). A callback may
+ return True to stop the current run process. Otherwise run() continues
+ until the next event. A callback may also return a string which will be
+ sent to the child. 'extra_args' is not used by directly run(). It provides
+ a way to pass data to a callback function through run() through the locals
+ dictionary passed to a callback.
+
+ Like :class:`spawn`, passing *encoding* will make it work with unicode
+ instead of bytes. You can pass *codec_errors* to control how errors in
+ encoding and decoding are handled.
+ '''
+ if timeout == -1:
+ child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
+ **kwargs)
+ else:
+ child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
+ cwd=cwd, env=env, **kwargs)
+ if isinstance(events, list):
+ patterns= [x for x,y in events]
+ responses = [y for x,y in events]
+ elif isinstance(events, dict):
+ patterns = list(events.keys())
+ responses = list(events.values())
+ else:
+ # This assumes EOF or TIMEOUT will eventually cause run to terminate.
+ patterns = None
+ responses = None
+ child_result_list = []
+ event_count = 0
+ while True:
+ try:
+ index = child.expect(patterns)
+ if isinstance(child.after, child.allowed_string_types):
+ child_result_list.append(child.before + child.after)
+ else:
+ # child.after may have been a TIMEOUT or EOF,
+ # which we don't want appended to the list.
+ child_result_list.append(child.before)
+ if isinstance(responses[index], child.allowed_string_types):
+ child.send(responses[index])
+ elif (isinstance(responses[index], types.FunctionType) or
+ isinstance(responses[index], types.MethodType)):
+ callback_result = responses[index](locals())
+ sys.stdout.flush()
+ if isinstance(callback_result, child.allowed_string_types):
+ child.send(callback_result)
+ elif callback_result:
+ break
+ else:
+ raise TypeError("parameter `event' at index {index} must be "
+ "a string, method, or function: {value!r}"
+ .format(index=index, value=responses[index]))
+ event_count = event_count + 1
+ except TIMEOUT:
+ child_result_list.append(child.before)
+ break
+ except EOF:
+ child_result_list.append(child.before)
+ break
+ child_result = child.string_type().join(child_result_list)
+ if withexitstatus:
+ child.close()
+ return (child_result, child.exitstatus)
+ else:
+ return child_result
+
+def runu(command, timeout=30, withexitstatus=False, events=None,
+ extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
+ """Deprecated: pass encoding to run() instead.
+ """
+ kwargs.setdefault('encoding', 'utf-8')
+ return run(command, timeout=timeout, withexitstatus=withexitstatus,
+ events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
+ env=env, **kwargs)
diff --git a/contrib/python/pexpect/pexpect/screen.py b/contrib/python/pexpect/pexpect/screen.py
index 21055841af..79f95c4e54 100644
--- a/contrib/python/pexpect/pexpect/screen.py
+++ b/contrib/python/pexpect/pexpect/screen.py
@@ -1,431 +1,431 @@
-'''This implements a virtual screen. This is used to support ANSI terminal
-emulation. The screen representation and state is implemented in this class.
-Most of the methods are inspired by ANSI screen control codes. The
-:class:`~pexpect.ANSI.ANSI` class extends this class to add parsing of ANSI
-escape codes.
-
-PEXPECT LICENSE
-
- This license is approved by the OSI and FSF as GPL-compatible.
- http://opensource.org/licenses/isc-license.txt
-
- Copyright (c) 2012, Noah Spurrier <noah@noah.org>
- PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
- PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
- COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-'''
-
-import codecs
-import copy
-import sys
-
-import warnings
-
-warnings.warn(("pexpect.screen and pexpect.ANSI are deprecated. "
- "We recommend using pyte to emulate a terminal screen: "
- "https://pypi.python.org/pypi/pyte"),
- stacklevel=2)
-
-NUL = 0 # Fill character; ignored on input.
-ENQ = 5 # Transmit answerback message.
-BEL = 7 # Ring the bell.
-BS = 8 # Move cursor left.
-HT = 9 # Move cursor to next tab stop.
-LF = 10 # Line feed.
-VT = 11 # Same as LF.
-FF = 12 # Same as LF.
-CR = 13 # Move cursor to left margin or newline.
-SO = 14 # Invoke G1 character set.
-SI = 15 # Invoke G0 character set.
-XON = 17 # Resume transmission.
-XOFF = 19 # Halt transmission.
-CAN = 24 # Cancel escape sequence.
-SUB = 26 # Same as CAN.
-ESC = 27 # Introduce a control sequence.
-DEL = 127 # Fill character; ignored on input.
-SPACE = u' ' # Space or blank character.
-
-PY3 = (sys.version_info[0] >= 3)
-if PY3:
- unicode = str
-
-def constrain (n, min, max):
-
- '''This returns a number, n constrained to the min and max bounds. '''
-
- if n < min:
- return min
- if n > max:
- return max
- return n
-
-class screen:
- '''This object maintains the state of a virtual text screen as a
+'''This implements a virtual screen. This is used to support ANSI terminal
+emulation. The screen representation and state is implemented in this class.
+Most of the methods are inspired by ANSI screen control codes. The
+:class:`~pexpect.ANSI.ANSI` class extends this class to add parsing of ANSI
+escape codes.
+
+PEXPECT LICENSE
+
+ This license is approved by the OSI and FSF as GPL-compatible.
+ http://opensource.org/licenses/isc-license.txt
+
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''
+
+import codecs
+import copy
+import sys
+
+import warnings
+
+warnings.warn(("pexpect.screen and pexpect.ANSI are deprecated. "
+ "We recommend using pyte to emulate a terminal screen: "
+ "https://pypi.python.org/pypi/pyte"),
+ stacklevel=2)
+
+NUL = 0 # Fill character; ignored on input.
+ENQ = 5 # Transmit answerback message.
+BEL = 7 # Ring the bell.
+BS = 8 # Move cursor left.
+HT = 9 # Move cursor to next tab stop.
+LF = 10 # Line feed.
+VT = 11 # Same as LF.
+FF = 12 # Same as LF.
+CR = 13 # Move cursor to left margin or newline.
+SO = 14 # Invoke G1 character set.
+SI = 15 # Invoke G0 character set.
+XON = 17 # Resume transmission.
+XOFF = 19 # Halt transmission.
+CAN = 24 # Cancel escape sequence.
+SUB = 26 # Same as CAN.
+ESC = 27 # Introduce a control sequence.
+DEL = 127 # Fill character; ignored on input.
+SPACE = u' ' # Space or blank character.
+
+PY3 = (sys.version_info[0] >= 3)
+if PY3:
+ unicode = str
+
+def constrain (n, min, max):
+
+ '''This returns a number, n constrained to the min and max bounds. '''
+
+ if n < min:
+ return min
+ if n > max:
+ return max
+ return n
+
+class screen:
+ '''This object maintains the state of a virtual text screen as a
rectangular array. This maintains a virtual cursor position and handles
- scrolling as characters are added. This supports most of the methods needed
- by an ANSI text screen. Row and column indexes are 1-based (not zero-based,
- like arrays).
-
- Characters are represented internally using unicode. Methods that accept
- input characters, when passed 'bytes' (which in Python 2 is equivalent to
- 'str'), convert them from the encoding specified in the 'encoding'
- parameter to the constructor. Methods that return screen contents return
- unicode strings, with the exception of __str__() under Python 2. Passing
- ``encoding=None`` limits the API to only accept unicode input, so passing
- bytes in will raise :exc:`TypeError`.
- '''
- def __init__(self, r=24, c=80, encoding='latin-1', encoding_errors='replace'):
- '''This initializes a blank screen of the given dimensions.'''
-
- self.rows = r
- self.cols = c
- self.encoding = encoding
- self.encoding_errors = encoding_errors
- if encoding is not None:
+ scrolling as characters are added. This supports most of the methods needed
+ by an ANSI text screen. Row and column indexes are 1-based (not zero-based,
+ like arrays).
+
+ Characters are represented internally using unicode. Methods that accept
+ input characters, when passed 'bytes' (which in Python 2 is equivalent to
+ 'str'), convert them from the encoding specified in the 'encoding'
+ parameter to the constructor. Methods that return screen contents return
+ unicode strings, with the exception of __str__() under Python 2. Passing
+ ``encoding=None`` limits the API to only accept unicode input, so passing
+ bytes in will raise :exc:`TypeError`.
+ '''
+ def __init__(self, r=24, c=80, encoding='latin-1', encoding_errors='replace'):
+ '''This initializes a blank screen of the given dimensions.'''
+
+ self.rows = r
+ self.cols = c
+ self.encoding = encoding
+ self.encoding_errors = encoding_errors
+ if encoding is not None:
self.decoder = codecs.getincrementaldecoder(encoding)(encoding_errors)
- else:
- self.decoder = None
- self.cur_r = 1
- self.cur_c = 1
- self.cur_saved_r = 1
- self.cur_saved_c = 1
- self.scroll_row_start = 1
- self.scroll_row_end = self.rows
- self.w = [ [SPACE] * self.cols for _ in range(self.rows)]
-
- def _decode(self, s):
- '''This converts from the external coding system (as passed to
- the constructor) to the internal one (unicode). '''
- if self.decoder is not None:
- return self.decoder.decode(s)
- else:
- raise TypeError("This screen was constructed with encoding=None, "
- "so it does not handle bytes.")
-
- def _unicode(self):
- '''This returns a printable representation of the screen as a unicode
- string (which, under Python 3.x, is the same as 'str'). The end of each
- screen line is terminated by a newline.'''
-
- return u'\n'.join ([ u''.join(c) for c in self.w ])
-
- if PY3:
- __str__ = _unicode
- else:
- __unicode__ = _unicode
-
- def __str__(self):
- '''This returns a printable representation of the screen. The end of
- each screen line is terminated by a newline. '''
- encoding = self.encoding or 'ascii'
- return self._unicode().encode(encoding, 'replace')
-
- def dump (self):
- '''This returns a copy of the screen as a unicode string. This is similar to
- __str__/__unicode__ except that lines are not terminated with line
- feeds.'''
-
- return u''.join ([ u''.join(c) for c in self.w ])
-
- def pretty (self):
- '''This returns a copy of the screen as a unicode string with an ASCII
- text box around the screen border. This is similar to
- __str__/__unicode__ except that it adds a box.'''
-
- top_bot = u'+' + u'-'*self.cols + u'+\n'
- return top_bot + u'\n'.join([u'|'+line+u'|' for line in unicode(self).split(u'\n')]) + u'\n' + top_bot
-
- def fill (self, ch=SPACE):
-
- if isinstance(ch, bytes):
- ch = self._decode(ch)
-
- self.fill_region (1,1,self.rows,self.cols, ch)
-
- def fill_region (self, rs,cs, re,ce, ch=SPACE):
-
- if isinstance(ch, bytes):
- ch = self._decode(ch)
-
- rs = constrain (rs, 1, self.rows)
- re = constrain (re, 1, self.rows)
- cs = constrain (cs, 1, self.cols)
- ce = constrain (ce, 1, self.cols)
- if rs > re:
- rs, re = re, rs
- if cs > ce:
- cs, ce = ce, cs
- for r in range (rs, re+1):
- for c in range (cs, ce + 1):
- self.put_abs (r,c,ch)
-
- def cr (self):
- '''This moves the cursor to the beginning (col 1) of the current row.
- '''
-
- self.cursor_home (self.cur_r, 1)
-
- def lf (self):
- '''This moves the cursor down with scrolling.
- '''
-
- old_r = self.cur_r
- self.cursor_down()
- if old_r == self.cur_r:
- self.scroll_up ()
- self.erase_line()
-
- def crlf (self):
- '''This advances the cursor with CRLF properties.
- The cursor will line wrap and the screen may scroll.
- '''
-
- self.cr ()
- self.lf ()
-
- def newline (self):
- '''This is an alias for crlf().
- '''
-
- self.crlf()
-
- def put_abs (self, r, c, ch):
- '''Screen array starts at 1 index.'''
-
- r = constrain (r, 1, self.rows)
- c = constrain (c, 1, self.cols)
- if isinstance(ch, bytes):
- ch = self._decode(ch)[0]
- else:
- ch = ch[0]
- self.w[r-1][c-1] = ch
-
- def put (self, ch):
- '''This puts a characters at the current cursor position.
- '''
-
- if isinstance(ch, bytes):
- ch = self._decode(ch)
-
- self.put_abs (self.cur_r, self.cur_c, ch)
-
- def insert_abs (self, r, c, ch):
- '''This inserts a character at (r,c). Everything under
- and to the right is shifted right one character.
- The last character of the line is lost.
- '''
-
- if isinstance(ch, bytes):
- ch = self._decode(ch)
-
- r = constrain (r, 1, self.rows)
- c = constrain (c, 1, self.cols)
- for ci in range (self.cols, c, -1):
- self.put_abs (r,ci, self.get_abs(r,ci-1))
- self.put_abs (r,c,ch)
-
- def insert (self, ch):
-
- if isinstance(ch, bytes):
- ch = self._decode(ch)
-
- self.insert_abs (self.cur_r, self.cur_c, ch)
-
- def get_abs (self, r, c):
-
- r = constrain (r, 1, self.rows)
- c = constrain (c, 1, self.cols)
- return self.w[r-1][c-1]
-
- def get (self):
-
- self.get_abs (self.cur_r, self.cur_c)
-
- def get_region (self, rs,cs, re,ce):
- '''This returns a list of lines representing the region.
- '''
-
- rs = constrain (rs, 1, self.rows)
- re = constrain (re, 1, self.rows)
- cs = constrain (cs, 1, self.cols)
- ce = constrain (ce, 1, self.cols)
- if rs > re:
- rs, re = re, rs
- if cs > ce:
- cs, ce = ce, cs
- sc = []
- for r in range (rs, re+1):
- line = u''
- for c in range (cs, ce + 1):
- ch = self.get_abs (r,c)
- line = line + ch
- sc.append (line)
- return sc
-
- def cursor_constrain (self):
- '''This keeps the cursor within the screen area.
- '''
-
- self.cur_r = constrain (self.cur_r, 1, self.rows)
- self.cur_c = constrain (self.cur_c, 1, self.cols)
-
- def cursor_home (self, r=1, c=1): # <ESC>[{ROW};{COLUMN}H
-
- self.cur_r = r
- self.cur_c = c
- self.cursor_constrain ()
-
- def cursor_back (self,count=1): # <ESC>[{COUNT}D (not confused with down)
-
- self.cur_c = self.cur_c - count
- self.cursor_constrain ()
-
- def cursor_down (self,count=1): # <ESC>[{COUNT}B (not confused with back)
-
- self.cur_r = self.cur_r + count
- self.cursor_constrain ()
-
- def cursor_forward (self,count=1): # <ESC>[{COUNT}C
-
- self.cur_c = self.cur_c + count
- self.cursor_constrain ()
-
- def cursor_up (self,count=1): # <ESC>[{COUNT}A
-
- self.cur_r = self.cur_r - count
- self.cursor_constrain ()
-
- def cursor_up_reverse (self): # <ESC> M (called RI -- Reverse Index)
-
- old_r = self.cur_r
- self.cursor_up()
- if old_r == self.cur_r:
- self.scroll_up()
-
- def cursor_force_position (self, r, c): # <ESC>[{ROW};{COLUMN}f
- '''Identical to Cursor Home.'''
-
- self.cursor_home (r, c)
-
- def cursor_save (self): # <ESC>[s
- '''Save current cursor position.'''
-
- self.cursor_save_attrs()
-
- def cursor_unsave (self): # <ESC>[u
- '''Restores cursor position after a Save Cursor.'''
-
- self.cursor_restore_attrs()
-
- def cursor_save_attrs (self): # <ESC>7
- '''Save current cursor position.'''
-
- self.cur_saved_r = self.cur_r
- self.cur_saved_c = self.cur_c
-
- def cursor_restore_attrs (self): # <ESC>8
- '''Restores cursor position after a Save Cursor.'''
-
- self.cursor_home (self.cur_saved_r, self.cur_saved_c)
-
- def scroll_constrain (self):
- '''This keeps the scroll region within the screen region.'''
-
- if self.scroll_row_start <= 0:
- self.scroll_row_start = 1
- if self.scroll_row_end > self.rows:
- self.scroll_row_end = self.rows
-
- def scroll_screen (self): # <ESC>[r
- '''Enable scrolling for entire display.'''
-
- self.scroll_row_start = 1
- self.scroll_row_end = self.rows
-
- def scroll_screen_rows (self, rs, re): # <ESC>[{start};{end}r
- '''Enable scrolling from row {start} to row {end}.'''
-
- self.scroll_row_start = rs
- self.scroll_row_end = re
- self.scroll_constrain()
-
- def scroll_down (self): # <ESC>D
- '''Scroll display down one line.'''
-
- # Screen is indexed from 1, but arrays are indexed from 0.
- s = self.scroll_row_start - 1
- e = self.scroll_row_end - 1
- self.w[s+1:e+1] = copy.deepcopy(self.w[s:e])
-
- def scroll_up (self): # <ESC>M
- '''Scroll display up one line.'''
-
- # Screen is indexed from 1, but arrays are indexed from 0.
- s = self.scroll_row_start - 1
- e = self.scroll_row_end - 1
- self.w[s:e] = copy.deepcopy(self.w[s+1:e+1])
-
- def erase_end_of_line (self): # <ESC>[0K -or- <ESC>[K
- '''Erases from the current cursor position to the end of the current
- line.'''
-
- self.fill_region (self.cur_r, self.cur_c, self.cur_r, self.cols)
-
- def erase_start_of_line (self): # <ESC>[1K
- '''Erases from the current cursor position to the start of the current
- line.'''
-
- self.fill_region (self.cur_r, 1, self.cur_r, self.cur_c)
-
- def erase_line (self): # <ESC>[2K
- '''Erases the entire current line.'''
-
- self.fill_region (self.cur_r, 1, self.cur_r, self.cols)
-
- def erase_down (self): # <ESC>[0J -or- <ESC>[J
- '''Erases the screen from the current line down to the bottom of the
- screen.'''
-
- self.erase_end_of_line ()
- self.fill_region (self.cur_r + 1, 1, self.rows, self.cols)
-
- def erase_up (self): # <ESC>[1J
- '''Erases the screen from the current line up to the top of the
- screen.'''
-
- self.erase_start_of_line ()
- self.fill_region (self.cur_r-1, 1, 1, self.cols)
-
- def erase_screen (self): # <ESC>[2J
- '''Erases the screen with the background color.'''
-
- self.fill ()
-
- def set_tab (self): # <ESC>H
- '''Sets a tab at the current position.'''
-
- pass
-
- def clear_tab (self): # <ESC>[g
- '''Clears tab at the current position.'''
-
- pass
-
- def clear_all_tabs (self): # <ESC>[3g
- '''Clears all tabs.'''
-
- pass
-
-# Insert line Esc [ Pn L
-# Delete line Esc [ Pn M
-# Delete character Esc [ Pn P
-# Scrolling region Esc [ Pn(top);Pn(bot) r
-
+ else:
+ self.decoder = None
+ self.cur_r = 1
+ self.cur_c = 1
+ self.cur_saved_r = 1
+ self.cur_saved_c = 1
+ self.scroll_row_start = 1
+ self.scroll_row_end = self.rows
+ self.w = [ [SPACE] * self.cols for _ in range(self.rows)]
+
+ def _decode(self, s):
+ '''This converts from the external coding system (as passed to
+ the constructor) to the internal one (unicode). '''
+ if self.decoder is not None:
+ return self.decoder.decode(s)
+ else:
+ raise TypeError("This screen was constructed with encoding=None, "
+ "so it does not handle bytes.")
+
+ def _unicode(self):
+ '''This returns a printable representation of the screen as a unicode
+ string (which, under Python 3.x, is the same as 'str'). The end of each
+ screen line is terminated by a newline.'''
+
+ return u'\n'.join ([ u''.join(c) for c in self.w ])
+
+ if PY3:
+ __str__ = _unicode
+ else:
+ __unicode__ = _unicode
+
+ def __str__(self):
+ '''This returns a printable representation of the screen. The end of
+ each screen line is terminated by a newline. '''
+ encoding = self.encoding or 'ascii'
+ return self._unicode().encode(encoding, 'replace')
+
+ def dump (self):
+ '''This returns a copy of the screen as a unicode string. This is similar to
+ __str__/__unicode__ except that lines are not terminated with line
+ feeds.'''
+
+ return u''.join ([ u''.join(c) for c in self.w ])
+
+ def pretty (self):
+ '''This returns a copy of the screen as a unicode string with an ASCII
+ text box around the screen border. This is similar to
+ __str__/__unicode__ except that it adds a box.'''
+
+ top_bot = u'+' + u'-'*self.cols + u'+\n'
+ return top_bot + u'\n'.join([u'|'+line+u'|' for line in unicode(self).split(u'\n')]) + u'\n' + top_bot
+
+ def fill (self, ch=SPACE):
+
+ if isinstance(ch, bytes):
+ ch = self._decode(ch)
+
+ self.fill_region (1,1,self.rows,self.cols, ch)
+
+ def fill_region (self, rs,cs, re,ce, ch=SPACE):
+
+ if isinstance(ch, bytes):
+ ch = self._decode(ch)
+
+ rs = constrain (rs, 1, self.rows)
+ re = constrain (re, 1, self.rows)
+ cs = constrain (cs, 1, self.cols)
+ ce = constrain (ce, 1, self.cols)
+ if rs > re:
+ rs, re = re, rs
+ if cs > ce:
+ cs, ce = ce, cs
+ for r in range (rs, re+1):
+ for c in range (cs, ce + 1):
+ self.put_abs (r,c,ch)
+
+ def cr (self):
+ '''This moves the cursor to the beginning (col 1) of the current row.
+ '''
+
+ self.cursor_home (self.cur_r, 1)
+
+ def lf (self):
+ '''This moves the cursor down with scrolling.
+ '''
+
+ old_r = self.cur_r
+ self.cursor_down()
+ if old_r == self.cur_r:
+ self.scroll_up ()
+ self.erase_line()
+
+ def crlf (self):
+ '''This advances the cursor with CRLF properties.
+ The cursor will line wrap and the screen may scroll.
+ '''
+
+ self.cr ()
+ self.lf ()
+
+ def newline (self):
+ '''This is an alias for crlf().
+ '''
+
+ self.crlf()
+
+ def put_abs (self, r, c, ch):
+ '''Screen array starts at 1 index.'''
+
+ r = constrain (r, 1, self.rows)
+ c = constrain (c, 1, self.cols)
+ if isinstance(ch, bytes):
+ ch = self._decode(ch)[0]
+ else:
+ ch = ch[0]
+ self.w[r-1][c-1] = ch
+
+ def put (self, ch):
+ '''This puts a characters at the current cursor position.
+ '''
+
+ if isinstance(ch, bytes):
+ ch = self._decode(ch)
+
+ self.put_abs (self.cur_r, self.cur_c, ch)
+
+ def insert_abs (self, r, c, ch):
+ '''This inserts a character at (r,c). Everything under
+ and to the right is shifted right one character.
+ The last character of the line is lost.
+ '''
+
+ if isinstance(ch, bytes):
+ ch = self._decode(ch)
+
+ r = constrain (r, 1, self.rows)
+ c = constrain (c, 1, self.cols)
+ for ci in range (self.cols, c, -1):
+ self.put_abs (r,ci, self.get_abs(r,ci-1))
+ self.put_abs (r,c,ch)
+
+ def insert (self, ch):
+
+ if isinstance(ch, bytes):
+ ch = self._decode(ch)
+
+ self.insert_abs (self.cur_r, self.cur_c, ch)
+
+ def get_abs (self, r, c):
+
+ r = constrain (r, 1, self.rows)
+ c = constrain (c, 1, self.cols)
+ return self.w[r-1][c-1]
+
+ def get (self):
+
+ self.get_abs (self.cur_r, self.cur_c)
+
+ def get_region (self, rs,cs, re,ce):
+ '''This returns a list of lines representing the region.
+ '''
+
+ rs = constrain (rs, 1, self.rows)
+ re = constrain (re, 1, self.rows)
+ cs = constrain (cs, 1, self.cols)
+ ce = constrain (ce, 1, self.cols)
+ if rs > re:
+ rs, re = re, rs
+ if cs > ce:
+ cs, ce = ce, cs
+ sc = []
+ for r in range (rs, re+1):
+ line = u''
+ for c in range (cs, ce + 1):
+ ch = self.get_abs (r,c)
+ line = line + ch
+ sc.append (line)
+ return sc
+
+ def cursor_constrain (self):
+ '''This keeps the cursor within the screen area.
+ '''
+
+ self.cur_r = constrain (self.cur_r, 1, self.rows)
+ self.cur_c = constrain (self.cur_c, 1, self.cols)
+
+ def cursor_home (self, r=1, c=1): # <ESC>[{ROW};{COLUMN}H
+
+ self.cur_r = r
+ self.cur_c = c
+ self.cursor_constrain ()
+
+ def cursor_back (self,count=1): # <ESC>[{COUNT}D (not confused with down)
+
+ self.cur_c = self.cur_c - count
+ self.cursor_constrain ()
+
+ def cursor_down (self,count=1): # <ESC>[{COUNT}B (not confused with back)
+
+ self.cur_r = self.cur_r + count
+ self.cursor_constrain ()
+
+ def cursor_forward (self,count=1): # <ESC>[{COUNT}C
+
+ self.cur_c = self.cur_c + count
+ self.cursor_constrain ()
+
+ def cursor_up (self,count=1): # <ESC>[{COUNT}A
+
+ self.cur_r = self.cur_r - count
+ self.cursor_constrain ()
+
+ def cursor_up_reverse (self): # <ESC> M (called RI -- Reverse Index)
+
+ old_r = self.cur_r
+ self.cursor_up()
+ if old_r == self.cur_r:
+ self.scroll_up()
+
+ def cursor_force_position (self, r, c): # <ESC>[{ROW};{COLUMN}f
+ '''Identical to Cursor Home.'''
+
+ self.cursor_home (r, c)
+
+ def cursor_save (self): # <ESC>[s
+ '''Save current cursor position.'''
+
+ self.cursor_save_attrs()
+
+ def cursor_unsave (self): # <ESC>[u
+ '''Restores cursor position after a Save Cursor.'''
+
+ self.cursor_restore_attrs()
+
+ def cursor_save_attrs (self): # <ESC>7
+ '''Save current cursor position.'''
+
+ self.cur_saved_r = self.cur_r
+ self.cur_saved_c = self.cur_c
+
+ def cursor_restore_attrs (self): # <ESC>8
+ '''Restores cursor position after a Save Cursor.'''
+
+ self.cursor_home (self.cur_saved_r, self.cur_saved_c)
+
+ def scroll_constrain (self):
+ '''This keeps the scroll region within the screen region.'''
+
+ if self.scroll_row_start <= 0:
+ self.scroll_row_start = 1
+ if self.scroll_row_end > self.rows:
+ self.scroll_row_end = self.rows
+
+ def scroll_screen (self): # <ESC>[r
+ '''Enable scrolling for entire display.'''
+
+ self.scroll_row_start = 1
+ self.scroll_row_end = self.rows
+
+ def scroll_screen_rows (self, rs, re): # <ESC>[{start};{end}r
+ '''Enable scrolling from row {start} to row {end}.'''
+
+ self.scroll_row_start = rs
+ self.scroll_row_end = re
+ self.scroll_constrain()
+
+ def scroll_down (self): # <ESC>D
+ '''Scroll display down one line.'''
+
+ # Screen is indexed from 1, but arrays are indexed from 0.
+ s = self.scroll_row_start - 1
+ e = self.scroll_row_end - 1
+ self.w[s+1:e+1] = copy.deepcopy(self.w[s:e])
+
+ def scroll_up (self): # <ESC>M
+ '''Scroll display up one line.'''
+
+ # Screen is indexed from 1, but arrays are indexed from 0.
+ s = self.scroll_row_start - 1
+ e = self.scroll_row_end - 1
+ self.w[s:e] = copy.deepcopy(self.w[s+1:e+1])
+
+ def erase_end_of_line (self): # <ESC>[0K -or- <ESC>[K
+ '''Erases from the current cursor position to the end of the current
+ line.'''
+
+ self.fill_region (self.cur_r, self.cur_c, self.cur_r, self.cols)
+
+ def erase_start_of_line (self): # <ESC>[1K
+ '''Erases from the current cursor position to the start of the current
+ line.'''
+
+ self.fill_region (self.cur_r, 1, self.cur_r, self.cur_c)
+
+ def erase_line (self): # <ESC>[2K
+ '''Erases the entire current line.'''
+
+ self.fill_region (self.cur_r, 1, self.cur_r, self.cols)
+
+ def erase_down (self): # <ESC>[0J -or- <ESC>[J
+ '''Erases the screen from the current line down to the bottom of the
+ screen.'''
+
+ self.erase_end_of_line ()
+ self.fill_region (self.cur_r + 1, 1, self.rows, self.cols)
+
+ def erase_up (self): # <ESC>[1J
+ '''Erases the screen from the current line up to the top of the
+ screen.'''
+
+ self.erase_start_of_line ()
+ self.fill_region (self.cur_r-1, 1, 1, self.cols)
+
+ def erase_screen (self): # <ESC>[2J
+ '''Erases the screen with the background color.'''
+
+ self.fill ()
+
+ def set_tab (self): # <ESC>H
+ '''Sets a tab at the current position.'''
+
+ pass
+
+ def clear_tab (self): # <ESC>[g
+ '''Clears tab at the current position.'''
+
+ pass
+
+ def clear_all_tabs (self): # <ESC>[3g
+ '''Clears all tabs.'''
+
+ pass
+
+# Insert line Esc [ Pn L
+# Delete line Esc [ Pn M
+# Delete character Esc [ Pn P
+# Scrolling region Esc [ Pn(top);Pn(bot) r
+
diff --git a/contrib/python/pexpect/pexpect/spawnbase.py b/contrib/python/pexpect/pexpect/spawnbase.py
index b33eadd069..59e905764c 100644
--- a/contrib/python/pexpect/pexpect/spawnbase.py
+++ b/contrib/python/pexpect/pexpect/spawnbase.py
@@ -1,121 +1,121 @@
from io import StringIO, BytesIO
-import codecs
-import os
-import sys
-import re
-import errno
-from .exceptions import ExceptionPexpect, EOF, TIMEOUT
-from .expect import Expecter, searcher_string, searcher_re
-
-PY3 = (sys.version_info[0] >= 3)
-text_type = str if PY3 else unicode
-
-class _NullCoder(object):
- """Pass bytes through unchanged."""
- @staticmethod
- def encode(b, final=False):
- return b
-
- @staticmethod
- def decode(b, final=False):
- return b
-
-class SpawnBase(object):
- """A base class providing the backwards-compatible spawn API for Pexpect.
-
- This should not be instantiated directly: use :class:`pexpect.spawn` or
- :class:`pexpect.fdpexpect.fdspawn`.
- """
- encoding = None
- pid = None
- flag_eof = False
-
- def __init__(self, timeout=30, maxread=2000, searchwindowsize=None,
- logfile=None, encoding=None, codec_errors='strict'):
- self.stdin = sys.stdin
- self.stdout = sys.stdout
- self.stderr = sys.stderr
-
- self.searcher = None
- self.ignorecase = False
- self.before = None
- self.after = None
- self.match = None
- self.match_index = None
- self.terminated = True
- self.exitstatus = None
- self.signalstatus = None
- # status returned by os.waitpid
- self.status = None
- # the child file descriptor is initially closed
- self.child_fd = -1
- self.timeout = timeout
- self.delimiter = EOF
- self.logfile = logfile
- # input from child (read_nonblocking)
- self.logfile_read = None
- # output to send (send, sendline)
- self.logfile_send = None
- # max bytes to read at one time into buffer
- self.maxread = maxread
- # Data before searchwindowsize point is preserved, but not searched.
- self.searchwindowsize = searchwindowsize
- # Delay used before sending data to child. Time in seconds.
+import codecs
+import os
+import sys
+import re
+import errno
+from .exceptions import ExceptionPexpect, EOF, TIMEOUT
+from .expect import Expecter, searcher_string, searcher_re
+
+PY3 = (sys.version_info[0] >= 3)
+text_type = str if PY3 else unicode
+
+class _NullCoder(object):
+ """Pass bytes through unchanged."""
+ @staticmethod
+ def encode(b, final=False):
+ return b
+
+ @staticmethod
+ def decode(b, final=False):
+ return b
+
+class SpawnBase(object):
+ """A base class providing the backwards-compatible spawn API for Pexpect.
+
+ This should not be instantiated directly: use :class:`pexpect.spawn` or
+ :class:`pexpect.fdpexpect.fdspawn`.
+ """
+ encoding = None
+ pid = None
+ flag_eof = False
+
+ def __init__(self, timeout=30, maxread=2000, searchwindowsize=None,
+ logfile=None, encoding=None, codec_errors='strict'):
+ self.stdin = sys.stdin
+ self.stdout = sys.stdout
+ self.stderr = sys.stderr
+
+ self.searcher = None
+ self.ignorecase = False
+ self.before = None
+ self.after = None
+ self.match = None
+ self.match_index = None
+ self.terminated = True
+ self.exitstatus = None
+ self.signalstatus = None
+ # status returned by os.waitpid
+ self.status = None
+ # the child file descriptor is initially closed
+ self.child_fd = -1
+ self.timeout = timeout
+ self.delimiter = EOF
+ self.logfile = logfile
+ # input from child (read_nonblocking)
+ self.logfile_read = None
+ # output to send (send, sendline)
+ self.logfile_send = None
+ # max bytes to read at one time into buffer
+ self.maxread = maxread
+ # Data before searchwindowsize point is preserved, but not searched.
+ self.searchwindowsize = searchwindowsize
+ # Delay used before sending data to child. Time in seconds.
# Set this to None to skip the time.sleep() call completely.
- self.delaybeforesend = 0.05
- # Used by close() to give kernel time to update process status.
- # Time in seconds.
- self.delayafterclose = 0.1
- # Used by terminate() to give kernel time to update process status.
- # Time in seconds.
- self.delayafterterminate = 0.1
+ self.delaybeforesend = 0.05
+ # Used by close() to give kernel time to update process status.
+ # Time in seconds.
+ self.delayafterclose = 0.1
+ # Used by terminate() to give kernel time to update process status.
+ # Time in seconds.
+ self.delayafterterminate = 0.1
# Delay in seconds to sleep after each call to read_nonblocking().
# Set this to None to skip the time.sleep() call completely: that
# would restore the behavior from pexpect-2.0 (for performance
# reasons or because you don't want to release Python's global
# interpreter lock).
self.delayafterread = 0.0001
- self.softspace = False
- self.name = '<' + repr(self) + '>'
- self.closed = True
-
- # Unicode interface
- self.encoding = encoding
- self.codec_errors = codec_errors
- if encoding is None:
- # bytes mode (accepts some unicode for backwards compatibility)
- self._encoder = self._decoder = _NullCoder()
- self.string_type = bytes
+ self.softspace = False
+ self.name = '<' + repr(self) + '>'
+ self.closed = True
+
+ # Unicode interface
+ self.encoding = encoding
+ self.codec_errors = codec_errors
+ if encoding is None:
+ # bytes mode (accepts some unicode for backwards compatibility)
+ self._encoder = self._decoder = _NullCoder()
+ self.string_type = bytes
self.buffer_type = BytesIO
- self.crlf = b'\r\n'
- if PY3:
- self.allowed_string_types = (bytes, str)
- self.linesep = os.linesep.encode('ascii')
- def write_to_stdout(b):
- try:
- return sys.stdout.buffer.write(b)
- except AttributeError:
- # If stdout has been replaced, it may not have .buffer
- return sys.stdout.write(b.decode('ascii', 'replace'))
- self.write_to_stdout = write_to_stdout
- else:
- self.allowed_string_types = (basestring,) # analysis:ignore
- self.linesep = os.linesep
- self.write_to_stdout = sys.stdout.write
- else:
- # unicode mode
- self._encoder = codecs.getincrementalencoder(encoding)(codec_errors)
- self._decoder = codecs.getincrementaldecoder(encoding)(codec_errors)
- self.string_type = text_type
+ self.crlf = b'\r\n'
+ if PY3:
+ self.allowed_string_types = (bytes, str)
+ self.linesep = os.linesep.encode('ascii')
+ def write_to_stdout(b):
+ try:
+ return sys.stdout.buffer.write(b)
+ except AttributeError:
+ # If stdout has been replaced, it may not have .buffer
+ return sys.stdout.write(b.decode('ascii', 'replace'))
+ self.write_to_stdout = write_to_stdout
+ else:
+ self.allowed_string_types = (basestring,) # analysis:ignore
+ self.linesep = os.linesep
+ self.write_to_stdout = sys.stdout.write
+ else:
+ # unicode mode
+ self._encoder = codecs.getincrementalencoder(encoding)(codec_errors)
+ self._decoder = codecs.getincrementaldecoder(encoding)(codec_errors)
+ self.string_type = text_type
self.buffer_type = StringIO
- self.crlf = u'\r\n'
- self.allowed_string_types = (text_type, )
- if PY3:
- self.linesep = os.linesep
- else:
- self.linesep = os.linesep.decode('ascii')
- # This can handle unicode in both Python 2 and 3
- self.write_to_stdout = sys.stdout.write
+ self.crlf = u'\r\n'
+ self.allowed_string_types = (text_type, )
+ if PY3:
+ self.linesep = os.linesep
+ else:
+ self.linesep = os.linesep.decode('ascii')
+ # This can handle unicode in both Python 2 and 3
+ self.write_to_stdout = sys.stdout.write
# storage for async transport
self.async_pw_transport = None
# This is the read buffer. See maxread.
@@ -123,29 +123,29 @@ class SpawnBase(object):
# The buffer may be trimmed for efficiency reasons. This is the
# untrimmed buffer, used to create the before attribute.
self._before = self.buffer_type()
-
- def _log(self, s, direction):
- if self.logfile is not None:
- self.logfile.write(s)
- self.logfile.flush()
- second_log = self.logfile_send if (direction=='send') else self.logfile_read
- if second_log is not None:
- second_log.write(s)
- second_log.flush()
-
- # For backwards compatibility, in bytes mode (when encoding is None)
- # unicode is accepted for send and expect. Unicode mode is strictly unicode
- # only.
- def _coerce_expect_string(self, s):
- if self.encoding is None and not isinstance(s, bytes):
- return s.encode('ascii')
- return s
-
- def _coerce_send_string(self, s):
- if self.encoding is None and not isinstance(s, bytes):
- return s.encode('utf-8')
- return s
-
+
+ def _log(self, s, direction):
+ if self.logfile is not None:
+ self.logfile.write(s)
+ self.logfile.flush()
+ second_log = self.logfile_send if (direction=='send') else self.logfile_read
+ if second_log is not None:
+ second_log.write(s)
+ second_log.flush()
+
+ # For backwards compatibility, in bytes mode (when encoding is None)
+ # unicode is accepted for send and expect. Unicode mode is strictly unicode
+ # only.
+ def _coerce_expect_string(self, s):
+ if self.encoding is None and not isinstance(s, bytes):
+ return s.encode('ascii')
+ return s
+
+ def _coerce_send_string(self, s):
+ if self.encoding is None and not isinstance(s, bytes):
+ return s.encode('utf-8')
+ return s
+
def _get_buffer(self):
return self._buffer.getvalue()
@@ -157,369 +157,369 @@ class SpawnBase(object):
# to be a string/bytes object)
buffer = property(_get_buffer, _set_buffer)
- def read_nonblocking(self, size=1, timeout=None):
- """This reads data from the file descriptor.
-
- This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
-
- The timeout parameter is ignored.
- """
-
- try:
- s = os.read(self.child_fd, size)
- except OSError as err:
- if err.args[0] == errno.EIO:
- # Linux-style EOF
- self.flag_eof = True
- raise EOF('End Of File (EOF). Exception style platform.')
- raise
- if s == b'':
- # BSD-style EOF
- self.flag_eof = True
- raise EOF('End Of File (EOF). Empty string style platform.')
-
- s = self._decoder.decode(s, final=False)
- self._log(s, 'read')
- return s
-
- def _pattern_type_err(self, pattern):
- raise TypeError('got {badtype} ({badobj!r}) as pattern, must be one'
- ' of: {goodtypes}, pexpect.EOF, pexpect.TIMEOUT'\
- .format(badtype=type(pattern),
- badobj=pattern,
- goodtypes=', '.join([str(ast)\
- for ast in self.allowed_string_types])
- )
- )
-
- def compile_pattern_list(self, patterns):
- '''This compiles a pattern-string or a list of pattern-strings.
- Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
- those. Patterns may also be None which results in an empty list (you
- might do this if waiting for an EOF or TIMEOUT condition without
- expecting any pattern).
-
- This is used by expect() when calling expect_list(). Thus expect() is
- nothing more than::
-
- cpl = self.compile_pattern_list(pl)
- return self.expect_list(cpl, timeout)
-
- If you are using expect() within a loop it may be more
- efficient to compile the patterns first and then call expect_list().
- This avoid calls in a loop to compile_pattern_list()::
-
- cpl = self.compile_pattern_list(my_pattern)
- while some_condition:
- ...
- i = self.expect_list(cpl, timeout)
- ...
- '''
-
- if patterns is None:
- return []
- if not isinstance(patterns, list):
- patterns = [patterns]
-
- # Allow dot to match \n
- compile_flags = re.DOTALL
- if self.ignorecase:
- compile_flags = compile_flags | re.IGNORECASE
- compiled_pattern_list = []
- for idx, p in enumerate(patterns):
- if isinstance(p, self.allowed_string_types):
- p = self._coerce_expect_string(p)
- compiled_pattern_list.append(re.compile(p, compile_flags))
- elif p is EOF:
- compiled_pattern_list.append(EOF)
- elif p is TIMEOUT:
- compiled_pattern_list.append(TIMEOUT)
- elif isinstance(p, type(re.compile(''))):
- compiled_pattern_list.append(p)
- else:
- self._pattern_type_err(p)
- return compiled_pattern_list
-
+ def read_nonblocking(self, size=1, timeout=None):
+ """This reads data from the file descriptor.
+
+ This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
+
+ The timeout parameter is ignored.
+ """
+
+ try:
+ s = os.read(self.child_fd, size)
+ except OSError as err:
+ if err.args[0] == errno.EIO:
+ # Linux-style EOF
+ self.flag_eof = True
+ raise EOF('End Of File (EOF). Exception style platform.')
+ raise
+ if s == b'':
+ # BSD-style EOF
+ self.flag_eof = True
+ raise EOF('End Of File (EOF). Empty string style platform.')
+
+ s = self._decoder.decode(s, final=False)
+ self._log(s, 'read')
+ return s
+
+ def _pattern_type_err(self, pattern):
+ raise TypeError('got {badtype} ({badobj!r}) as pattern, must be one'
+ ' of: {goodtypes}, pexpect.EOF, pexpect.TIMEOUT'\
+ .format(badtype=type(pattern),
+ badobj=pattern,
+ goodtypes=', '.join([str(ast)\
+ for ast in self.allowed_string_types])
+ )
+ )
+
+ def compile_pattern_list(self, patterns):
+ '''This compiles a pattern-string or a list of pattern-strings.
+ Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
+ those. Patterns may also be None which results in an empty list (you
+ might do this if waiting for an EOF or TIMEOUT condition without
+ expecting any pattern).
+
+ This is used by expect() when calling expect_list(). Thus expect() is
+ nothing more than::
+
+ cpl = self.compile_pattern_list(pl)
+ return self.expect_list(cpl, timeout)
+
+ If you are using expect() within a loop it may be more
+ efficient to compile the patterns first and then call expect_list().
+ This avoid calls in a loop to compile_pattern_list()::
+
+ cpl = self.compile_pattern_list(my_pattern)
+ while some_condition:
+ ...
+ i = self.expect_list(cpl, timeout)
+ ...
+ '''
+
+ if patterns is None:
+ return []
+ if not isinstance(patterns, list):
+ patterns = [patterns]
+
+ # Allow dot to match \n
+ compile_flags = re.DOTALL
+ if self.ignorecase:
+ compile_flags = compile_flags | re.IGNORECASE
+ compiled_pattern_list = []
+ for idx, p in enumerate(patterns):
+ if isinstance(p, self.allowed_string_types):
+ p = self._coerce_expect_string(p)
+ compiled_pattern_list.append(re.compile(p, compile_flags))
+ elif p is EOF:
+ compiled_pattern_list.append(EOF)
+ elif p is TIMEOUT:
+ compiled_pattern_list.append(TIMEOUT)
+ elif isinstance(p, type(re.compile(''))):
+ compiled_pattern_list.append(p)
+ else:
+ self._pattern_type_err(p)
+ return compiled_pattern_list
+
def expect(self, pattern, timeout=-1, searchwindowsize=-1, async_=False, **kw):
- '''This seeks through the stream until a pattern is matched. The
- pattern is overloaded and may take several types. The pattern can be a
- StringType, EOF, a compiled re, or a list of any of those types.
- Strings will be compiled to re types. This returns the index into the
- pattern list. If the pattern was not a list this returns index 0 on a
- successful match. This may raise exceptions for EOF or TIMEOUT. To
- avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
- list. That will cause expect to match an EOF or TIMEOUT condition
- instead of raising an exception.
-
- If you pass a list of patterns and more than one matches, the first
- match in the stream is chosen. If more than one pattern matches at that
- point, the leftmost in the pattern list is chosen. For example::
-
- # the input is 'foobar'
- index = p.expect(['bar', 'foo', 'foobar'])
- # returns 1('foo') even though 'foobar' is a "better" match
-
- Please note, however, that buffering can affect this behavior, since
- input arrives in unpredictable chunks. For example::
-
- # the input is 'foobar'
- index = p.expect(['foobar', 'foo'])
- # returns 0('foobar') if all input is available at once,
+ '''This seeks through the stream until a pattern is matched. The
+ pattern is overloaded and may take several types. The pattern can be a
+ StringType, EOF, a compiled re, or a list of any of those types.
+ Strings will be compiled to re types. This returns the index into the
+ pattern list. If the pattern was not a list this returns index 0 on a
+ successful match. This may raise exceptions for EOF or TIMEOUT. To
+ avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
+ list. That will cause expect to match an EOF or TIMEOUT condition
+ instead of raising an exception.
+
+ If you pass a list of patterns and more than one matches, the first
+ match in the stream is chosen. If more than one pattern matches at that
+ point, the leftmost in the pattern list is chosen. For example::
+
+ # the input is 'foobar'
+ index = p.expect(['bar', 'foo', 'foobar'])
+ # returns 1('foo') even though 'foobar' is a "better" match
+
+ Please note, however, that buffering can affect this behavior, since
+ input arrives in unpredictable chunks. For example::
+
+ # the input is 'foobar'
+ index = p.expect(['foobar', 'foo'])
+ # returns 0('foobar') if all input is available at once,
# but returns 1('foo') if parts of the final 'bar' arrive late
-
- When a match is found for the given pattern, the class instance
- attribute *match* becomes an re.MatchObject result. Should an EOF
- or TIMEOUT pattern match, then the match attribute will be an instance
- of that exception class. The pairing before and after class
- instance attributes are views of the data preceding and following
- the matching pattern. On general exception, class attribute
- *before* is all data received up to the exception, while *match* and
- *after* attributes are value None.
-
- When the keyword argument timeout is -1 (default), then TIMEOUT will
- raise after the default value specified by the class timeout
- attribute. When None, TIMEOUT will not be raised and may block
- indefinitely until match.
-
- When the keyword argument searchwindowsize is -1 (default), then the
- value specified by the class maxread attribute is used.
-
- A list entry may be EOF or TIMEOUT instead of a string. This will
- catch these exceptions and return the index of the list entry instead
- of raising the exception. The attribute 'after' will be set to the
- exception type. The attribute 'match' will be None. This allows you to
- write code like this::
-
- index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
- if index == 0:
- do_something()
- elif index == 1:
- do_something_else()
- elif index == 2:
- do_some_other_thing()
- elif index == 3:
- do_something_completely_different()
-
- instead of code like this::
-
- try:
- index = p.expect(['good', 'bad'])
- if index == 0:
- do_something()
- elif index == 1:
- do_something_else()
- except EOF:
- do_some_other_thing()
- except TIMEOUT:
- do_something_completely_different()
-
- These two forms are equivalent. It all depends on what you want. You
- can also just expect the EOF if you are waiting for all output of a
- child to finish. For example::
-
- p = pexpect.spawn('/bin/ls')
- p.expect(pexpect.EOF)
- print p.before
-
- If you are trying to optimize for speed then see expect_list().
-
- On Python 3.4, or Python 3.3 with asyncio installed, passing
+
+ When a match is found for the given pattern, the class instance
+ attribute *match* becomes an re.MatchObject result. Should an EOF
+ or TIMEOUT pattern match, then the match attribute will be an instance
+ of that exception class. The pairing before and after class
+ instance attributes are views of the data preceding and following
+ the matching pattern. On general exception, class attribute
+ *before* is all data received up to the exception, while *match* and
+ *after* attributes are value None.
+
+ When the keyword argument timeout is -1 (default), then TIMEOUT will
+ raise after the default value specified by the class timeout
+ attribute. When None, TIMEOUT will not be raised and may block
+ indefinitely until match.
+
+ When the keyword argument searchwindowsize is -1 (default), then the
+ value specified by the class maxread attribute is used.
+
+ A list entry may be EOF or TIMEOUT instead of a string. This will
+ catch these exceptions and return the index of the list entry instead
+ of raising the exception. The attribute 'after' will be set to the
+ exception type. The attribute 'match' will be None. This allows you to
+ write code like this::
+
+ index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
+ if index == 0:
+ do_something()
+ elif index == 1:
+ do_something_else()
+ elif index == 2:
+ do_some_other_thing()
+ elif index == 3:
+ do_something_completely_different()
+
+ instead of code like this::
+
+ try:
+ index = p.expect(['good', 'bad'])
+ if index == 0:
+ do_something()
+ elif index == 1:
+ do_something_else()
+ except EOF:
+ do_some_other_thing()
+ except TIMEOUT:
+ do_something_completely_different()
+
+ These two forms are equivalent. It all depends on what you want. You
+ can also just expect the EOF if you are waiting for all output of a
+ child to finish. For example::
+
+ p = pexpect.spawn('/bin/ls')
+ p.expect(pexpect.EOF)
+ print p.before
+
+ If you are trying to optimize for speed then see expect_list().
+
+ On Python 3.4, or Python 3.3 with asyncio installed, passing
``async_=True`` will make this return an :mod:`asyncio` coroutine,
- which you can yield from to get the same result that this method would
- normally give directly. So, inside a coroutine, you can replace this code::
-
- index = p.expect(patterns)
-
- With this non-blocking form::
-
+ which you can yield from to get the same result that this method would
+ normally give directly. So, inside a coroutine, you can replace this code::
+
+ index = p.expect(patterns)
+
+ With this non-blocking form::
+
index = yield from p.expect(patterns, async_=True)
- '''
+ '''
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
-
- compiled_pattern_list = self.compile_pattern_list(pattern)
- return self.expect_list(compiled_pattern_list,
+
+ compiled_pattern_list = self.compile_pattern_list(pattern)
+ return self.expect_list(compiled_pattern_list,
timeout, searchwindowsize, async_)
-
- def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1,
+
+ def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1,
async_=False, **kw):
- '''This takes a list of compiled regular expressions and returns the
- index into the pattern_list that matched the child output. The list may
- also contain EOF or TIMEOUT(which are not compiled regular
- expressions). This method is similar to the expect() method except that
- expect_list() does not recompile the pattern list on every call. This
- may help if you are trying to optimize for speed, otherwise just use
- the expect() method. This is called by expect().
-
-
+ '''This takes a list of compiled regular expressions and returns the
+ index into the pattern_list that matched the child output. The list may
+ also contain EOF or TIMEOUT(which are not compiled regular
+ expressions). This method is similar to the expect() method except that
+ expect_list() does not recompile the pattern list on every call. This
+ may help if you are trying to optimize for speed, otherwise just use
+ the expect() method. This is called by expect().
+
+
Like :meth:`expect`, passing ``async_=True`` will make this return an
- asyncio coroutine.
- '''
- if timeout == -1:
- timeout = self.timeout
+ asyncio coroutine.
+ '''
+ if timeout == -1:
+ timeout = self.timeout
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
-
- exp = Expecter(self, searcher_re(pattern_list), searchwindowsize)
+
+ exp = Expecter(self, searcher_re(pattern_list), searchwindowsize)
if async_:
from ._async import expect_async
- return expect_async(exp, timeout)
- else:
- return exp.expect_loop(timeout)
-
- def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1,
+ return expect_async(exp, timeout)
+ else:
+ return exp.expect_loop(timeout)
+
+ def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1,
async_=False, **kw):
-
- '''This is similar to expect(), but uses plain string matching instead
- of compiled regular expressions in 'pattern_list'. The 'pattern_list'
- may be a string; a list or other sequence of strings; or TIMEOUT and
- EOF.
-
- This call might be faster than expect() for two reasons: string
- searching is faster than RE matching and it is possible to limit the
- search to just the end of the input buffer.
-
- This method is also useful when you don't want to have to worry about
- escaping regular expression characters that you want to match.
-
+
+ '''This is similar to expect(), but uses plain string matching instead
+ of compiled regular expressions in 'pattern_list'. The 'pattern_list'
+ may be a string; a list or other sequence of strings; or TIMEOUT and
+ EOF.
+
+ This call might be faster than expect() for two reasons: string
+ searching is faster than RE matching and it is possible to limit the
+ search to just the end of the input buffer.
+
+ This method is also useful when you don't want to have to worry about
+ escaping regular expression characters that you want to match.
+
Like :meth:`expect`, passing ``async_=True`` will make this return an
- asyncio coroutine.
- '''
- if timeout == -1:
- timeout = self.timeout
+ asyncio coroutine.
+ '''
+ if timeout == -1:
+ timeout = self.timeout
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
-
- if (isinstance(pattern_list, self.allowed_string_types) or
- pattern_list in (TIMEOUT, EOF)):
- pattern_list = [pattern_list]
-
- def prepare_pattern(pattern):
- if pattern in (TIMEOUT, EOF):
- return pattern
- if isinstance(pattern, self.allowed_string_types):
- return self._coerce_expect_string(pattern)
- self._pattern_type_err(pattern)
-
- try:
- pattern_list = iter(pattern_list)
- except TypeError:
- self._pattern_type_err(pattern_list)
- pattern_list = [prepare_pattern(p) for p in pattern_list]
-
- exp = Expecter(self, searcher_string(pattern_list), searchwindowsize)
+
+ if (isinstance(pattern_list, self.allowed_string_types) or
+ pattern_list in (TIMEOUT, EOF)):
+ pattern_list = [pattern_list]
+
+ def prepare_pattern(pattern):
+ if pattern in (TIMEOUT, EOF):
+ return pattern
+ if isinstance(pattern, self.allowed_string_types):
+ return self._coerce_expect_string(pattern)
+ self._pattern_type_err(pattern)
+
+ try:
+ pattern_list = iter(pattern_list)
+ except TypeError:
+ self._pattern_type_err(pattern_list)
+ pattern_list = [prepare_pattern(p) for p in pattern_list]
+
+ exp = Expecter(self, searcher_string(pattern_list), searchwindowsize)
if async_:
from ._async import expect_async
- return expect_async(exp, timeout)
- else:
- return exp.expect_loop(timeout)
-
- def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
- '''This is the common loop used inside expect. The 'searcher' should be
- an instance of searcher_re or searcher_string, which describes how and
- what to search for in the input.
-
- See expect() for other arguments, return value and exceptions. '''
-
- exp = Expecter(self, searcher, searchwindowsize)
- return exp.expect_loop(timeout)
-
- def read(self, size=-1):
- '''This reads at most "size" bytes from the file (less if the read hits
- EOF before obtaining size bytes). If the size argument is negative or
- omitted, read all data until EOF is reached. The bytes are returned as
- a string object. An empty string is returned when EOF is encountered
- immediately. '''
-
- if size == 0:
- return self.string_type()
- if size < 0:
- # delimiter default is EOF
- self.expect(self.delimiter)
- return self.before
-
- # I could have done this more directly by not using expect(), but
- # I deliberately decided to couple read() to expect() so that
+ return expect_async(exp, timeout)
+ else:
+ return exp.expect_loop(timeout)
+
+ def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
+ '''This is the common loop used inside expect. The 'searcher' should be
+ an instance of searcher_re or searcher_string, which describes how and
+ what to search for in the input.
+
+ See expect() for other arguments, return value and exceptions. '''
+
+ exp = Expecter(self, searcher, searchwindowsize)
+ return exp.expect_loop(timeout)
+
+ def read(self, size=-1):
+ '''This reads at most "size" bytes from the file (less if the read hits
+ EOF before obtaining size bytes). If the size argument is negative or
+ omitted, read all data until EOF is reached. The bytes are returned as
+ a string object. An empty string is returned when EOF is encountered
+ immediately. '''
+
+ if size == 0:
+ return self.string_type()
+ if size < 0:
+ # delimiter default is EOF
+ self.expect(self.delimiter)
+ return self.before
+
+ # I could have done this more directly by not using expect(), but
+ # I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistent behavior.
- # It's a little less efficient, but there is less for me to
- # worry about if I have to later modify read() or expect().
- # Note, it's OK if size==-1 in the regex. That just means it
- # will never match anything in which case we stop only on EOF.
- cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL)
- # delimiter default is EOF
- index = self.expect([cre, self.delimiter])
- if index == 0:
- ### FIXME self.before should be ''. Should I assert this?
- return self.after
- return self.before
-
- def readline(self, size=-1):
- '''This reads and returns one entire line. The newline at the end of
- line is returned as part of the string, unless the file ends without a
- newline. An empty string is returned if EOF is encountered immediately.
- This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
- this is what the pseudotty device returns. So contrary to what you may
- expect you will receive newlines as \\r\\n.
-
- If the size argument is 0 then an empty string is returned. In all
- other cases the size argument is ignored, which is not standard
- behavior for a file-like object. '''
-
- if size == 0:
- return self.string_type()
- # delimiter default is EOF
- index = self.expect([self.crlf, self.delimiter])
- if index == 0:
- return self.before + self.crlf
- else:
- return self.before
-
- def __iter__(self):
- '''This is to support iterators over a file-like object.
- '''
- return iter(self.readline, self.string_type())
-
- def readlines(self, sizehint=-1):
- '''This reads until EOF using readline() and returns a list containing
- the lines thus read. The optional 'sizehint' argument is ignored.
- Remember, because this reads until EOF that means the child
- process should have closed its stdout. If you run this method on
- a child that is still running with its stdout open then this
- method will block until it timesout.'''
-
- lines = []
- while True:
- line = self.readline()
- if not line:
- break
- lines.append(line)
- return lines
-
- def fileno(self):
- '''Expose file descriptor for a file-like interface
- '''
- return self.child_fd
-
- def flush(self):
- '''This does nothing. It is here to support the interface for a
- File-like object. '''
- pass
-
- def isatty(self):
- """Overridden in subclass using tty"""
- return False
-
- # For 'with spawn(...) as child:'
- def __enter__(self):
- return self
-
- def __exit__(self, etype, evalue, tb):
- # We rely on subclasses to implement close(). If they don't, it's not
- # clear what a context manager should do.
- self.close()
+ # It's a little less efficient, but there is less for me to
+ # worry about if I have to later modify read() or expect().
+ # Note, it's OK if size==-1 in the regex. That just means it
+ # will never match anything in which case we stop only on EOF.
+ cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL)
+ # delimiter default is EOF
+ index = self.expect([cre, self.delimiter])
+ if index == 0:
+ ### FIXME self.before should be ''. Should I assert this?
+ return self.after
+ return self.before
+
+ def readline(self, size=-1):
+ '''This reads and returns one entire line. The newline at the end of
+ line is returned as part of the string, unless the file ends without a
+ newline. An empty string is returned if EOF is encountered immediately.
+ This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
+ this is what the pseudotty device returns. So contrary to what you may
+ expect you will receive newlines as \\r\\n.
+
+ If the size argument is 0 then an empty string is returned. In all
+ other cases the size argument is ignored, which is not standard
+ behavior for a file-like object. '''
+
+ if size == 0:
+ return self.string_type()
+ # delimiter default is EOF
+ index = self.expect([self.crlf, self.delimiter])
+ if index == 0:
+ return self.before + self.crlf
+ else:
+ return self.before
+
+ def __iter__(self):
+ '''This is to support iterators over a file-like object.
+ '''
+ return iter(self.readline, self.string_type())
+
+ def readlines(self, sizehint=-1):
+ '''This reads until EOF using readline() and returns a list containing
+ the lines thus read. The optional 'sizehint' argument is ignored.
+ Remember, because this reads until EOF that means the child
+ process should have closed its stdout. If you run this method on
+ a child that is still running with its stdout open then this
+ method will block until it timesout.'''
+
+ lines = []
+ while True:
+ line = self.readline()
+ if not line:
+ break
+ lines.append(line)
+ return lines
+
+ def fileno(self):
+ '''Expose file descriptor for a file-like interface
+ '''
+ return self.child_fd
+
+ def flush(self):
+ '''This does nothing. It is here to support the interface for a
+ File-like object. '''
+ pass
+
+ def isatty(self):
+ """Overridden in subclass using tty"""
+ return False
+
+ # For 'with spawn(...) as child:'
+ def __enter__(self):
+ return self
+
+ def __exit__(self, etype, evalue, tb):
+ # We rely on subclasses to implement close(). If they don't, it's not
+ # clear what a context manager should do.
+ self.close()
diff --git a/contrib/python/pexpect/pexpect/utils.py b/contrib/python/pexpect/pexpect/utils.py
index 13a7849a0e..f774519609 100644
--- a/contrib/python/pexpect/pexpect/utils.py
+++ b/contrib/python/pexpect/pexpect/utils.py
@@ -1,130 +1,130 @@
-import os
-import sys
-import stat
+import os
+import sys
+import stat
import select
import time
import errno
-
+
try:
InterruptedError
except NameError:
# Alias Python2 exception to Python3
InterruptedError = select.error
-
+
if sys.version_info[0] >= 3:
string_types = (str,)
else:
string_types = (unicode, str)
-def is_executable_file(path):
- """Checks that path is an executable regular file, or a symlink towards one.
-
- This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
- """
- # follow symlinks,
- fpath = os.path.realpath(path)
-
- if not os.path.isfile(fpath):
- # non-files (directories, fifo, etc.)
- return False
-
- mode = os.stat(fpath).st_mode
-
- if (sys.platform.startswith('sunos')
- and os.getuid() == 0):
- # When root on Solaris, os.X_OK is True for *all* files, irregardless
- # of their executability -- instead, any permission bit of any user,
- # group, or other is fine enough.
- #
- # (This may be true for other "Unix98" OS's such as HP-UX and AIX)
- return bool(mode & (stat.S_IXUSR |
- stat.S_IXGRP |
- stat.S_IXOTH))
-
- return os.access(fpath, os.X_OK)
-
-
+def is_executable_file(path):
+ """Checks that path is an executable regular file, or a symlink towards one.
+
+ This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
+ """
+ # follow symlinks,
+ fpath = os.path.realpath(path)
+
+ if not os.path.isfile(fpath):
+ # non-files (directories, fifo, etc.)
+ return False
+
+ mode = os.stat(fpath).st_mode
+
+ if (sys.platform.startswith('sunos')
+ and os.getuid() == 0):
+ # When root on Solaris, os.X_OK is True for *all* files, irregardless
+ # of their executability -- instead, any permission bit of any user,
+ # group, or other is fine enough.
+ #
+ # (This may be true for other "Unix98" OS's such as HP-UX and AIX)
+ return bool(mode & (stat.S_IXUSR |
+ stat.S_IXGRP |
+ stat.S_IXOTH))
+
+ return os.access(fpath, os.X_OK)
+
+
def which(filename, env=None):
- '''This takes a given filename; tries to find it in the environment path;
- then checks if it is executable. This returns the full path to the filename
- if found and executable. Otherwise this returns None.'''
-
- # Special case where filename contains an explicit path.
- if os.path.dirname(filename) != '' and is_executable_file(filename):
- return filename
+ '''This takes a given filename; tries to find it in the environment path;
+ then checks if it is executable. This returns the full path to the filename
+ if found and executable. Otherwise this returns None.'''
+
+ # Special case where filename contains an explicit path.
+ if os.path.dirname(filename) != '' and is_executable_file(filename):
+ return filename
if env is None:
env = os.environ
p = env.get('PATH')
if not p:
- p = os.defpath
- pathlist = p.split(os.pathsep)
- for path in pathlist:
- ff = os.path.join(path, filename)
- if is_executable_file(ff):
- return ff
- return None
-
-
-def split_command_line(command_line):
-
- '''This splits a command line into a list of arguments. It splits arguments
- on spaces, but handles embedded quotes, doublequotes, and escaped
- characters. It's impossible to do this with a regular expression, so I
- wrote a little state machine to parse the command line. '''
-
- arg_list = []
- arg = ''
-
- # Constants to name the states we can be in.
- state_basic = 0
- state_esc = 1
- state_singlequote = 2
- state_doublequote = 3
- # The state when consuming whitespace between commands.
- state_whitespace = 4
- state = state_basic
-
- for c in command_line:
- if state == state_basic or state == state_whitespace:
- if c == '\\':
- # Escape the next character
- state = state_esc
- elif c == r"'":
- # Handle single quote
- state = state_singlequote
- elif c == r'"':
- # Handle double quote
- state = state_doublequote
- elif c.isspace():
- # Add arg to arg_list if we aren't in the middle of whitespace.
- if state == state_whitespace:
- # Do nothing.
- None
- else:
- arg_list.append(arg)
- arg = ''
- state = state_whitespace
- else:
- arg = arg + c
- state = state_basic
- elif state == state_esc:
- arg = arg + c
- state = state_basic
- elif state == state_singlequote:
- if c == r"'":
- state = state_basic
- else:
- arg = arg + c
- elif state == state_doublequote:
- if c == r'"':
- state = state_basic
- else:
- arg = arg + c
-
- if arg != '':
- arg_list.append(arg)
- return arg_list
+ p = os.defpath
+ pathlist = p.split(os.pathsep)
+ for path in pathlist:
+ ff = os.path.join(path, filename)
+ if is_executable_file(ff):
+ return ff
+ return None
+
+
+def split_command_line(command_line):
+
+ '''This splits a command line into a list of arguments. It splits arguments
+ on spaces, but handles embedded quotes, doublequotes, and escaped
+ characters. It's impossible to do this with a regular expression, so I
+ wrote a little state machine to parse the command line. '''
+
+ arg_list = []
+ arg = ''
+
+ # Constants to name the states we can be in.
+ state_basic = 0
+ state_esc = 1
+ state_singlequote = 2
+ state_doublequote = 3
+ # The state when consuming whitespace between commands.
+ state_whitespace = 4
+ state = state_basic
+
+ for c in command_line:
+ if state == state_basic or state == state_whitespace:
+ if c == '\\':
+ # Escape the next character
+ state = state_esc
+ elif c == r"'":
+ # Handle single quote
+ state = state_singlequote
+ elif c == r'"':
+ # Handle double quote
+ state = state_doublequote
+ elif c.isspace():
+ # Add arg to arg_list if we aren't in the middle of whitespace.
+ if state == state_whitespace:
+ # Do nothing.
+ None
+ else:
+ arg_list.append(arg)
+ arg = ''
+ state = state_whitespace
+ else:
+ arg = arg + c
+ state = state_basic
+ elif state == state_esc:
+ arg = arg + c
+ state = state_basic
+ elif state == state_singlequote:
+ if c == r"'":
+ state = state_basic
+ else:
+ arg = arg + c
+ elif state == state_doublequote:
+ if c == r'"':
+ state = state_basic
+ else:
+ arg = arg + c
+
+ if arg != '':
+ arg_list.append(arg)
+ return arg_list
def select_ignore_interrupts(iwtd, owtd, ewtd, timeout=None):
diff --git a/contrib/python/pexpect/ya.make b/contrib/python/pexpect/ya.make
index 2fdba71fba..a5bb92fcac 100644
--- a/contrib/python/pexpect/ya.make
+++ b/contrib/python/pexpect/ya.make
@@ -1,35 +1,35 @@
PY23_LIBRARY()
-
+
LICENSE(ISC)
OWNER(g:python-contrib borman)
VERSION(4.8.0)
-PEERDIR(
+PEERDIR(
contrib/python/ptyprocess
-)
-
+)
+
NO_LINT()
-PY_SRCS(
- TOP_LEVEL
- pexpect/ANSI.py
- pexpect/FSM.py
- pexpect/__init__.py
- pexpect/exceptions.py
- pexpect/expect.py
- pexpect/fdpexpect.py
- pexpect/popen_spawn.py
- pexpect/pty_spawn.py
- pexpect/pxssh.py
- pexpect/replwrap.py
- pexpect/run.py
- pexpect/screen.py
- pexpect/spawnbase.py
- pexpect/utils.py
-)
-
+PY_SRCS(
+ TOP_LEVEL
+ pexpect/ANSI.py
+ pexpect/FSM.py
+ pexpect/__init__.py
+ pexpect/exceptions.py
+ pexpect/expect.py
+ pexpect/fdpexpect.py
+ pexpect/popen_spawn.py
+ pexpect/pty_spawn.py
+ pexpect/pxssh.py
+ pexpect/replwrap.py
+ pexpect/run.py
+ pexpect/screen.py
+ pexpect/spawnbase.py
+ pexpect/utils.py
+)
+
IF (PYTHON3)
PY_SRCS(
TOP_LEVEL
@@ -43,4 +43,4 @@ RESOURCE_FILES(
.dist-info/top_level.txt
)
-END()
+END()
diff --git a/contrib/python/pickleshare/pickleshare.py b/contrib/python/pickleshare/pickleshare.py
index c5145f3c95..086f84f6ea 100644
--- a/contrib/python/pickleshare/pickleshare.py
+++ b/contrib/python/pickleshare/pickleshare.py
@@ -1,75 +1,75 @@
-#!/usr/bin/env python
-
-""" PickleShare - a small 'shelve' like datastore with concurrency support
-
-Like shelve, a PickleShareDB object acts like a normal dictionary. Unlike
-shelve, many processes can access the database simultaneously. Changing a
-value in database is immediately visible to other processes accessing the
-same database.
-
-Concurrency is possible because the values are stored in separate files. Hence
-the "database" is a directory where *all* files are governed by PickleShare.
-
-Example usage::
-
- from pickleshare import *
- db = PickleShareDB('~/testpickleshare')
- db.clear()
- print "Should be empty:",db.items()
- db['hello'] = 15
- db['aku ankka'] = [1,2,313]
- db['paths/are/ok/key'] = [1,(5,46)]
- print db.keys()
- del db['aku ankka']
-
-This module is certainly not ZODB, but can be used for low-load
-(non-mission-critical) situations where tiny code size trumps the
-advanced features of a "real" object database.
-
+#!/usr/bin/env python
+
+""" PickleShare - a small 'shelve' like datastore with concurrency support
+
+Like shelve, a PickleShareDB object acts like a normal dictionary. Unlike
+shelve, many processes can access the database simultaneously. Changing a
+value in database is immediately visible to other processes accessing the
+same database.
+
+Concurrency is possible because the values are stored in separate files. Hence
+the "database" is a directory where *all* files are governed by PickleShare.
+
+Example usage::
+
+ from pickleshare import *
+ db = PickleShareDB('~/testpickleshare')
+ db.clear()
+ print "Should be empty:",db.items()
+ db['hello'] = 15
+ db['aku ankka'] = [1,2,313]
+ db['paths/are/ok/key'] = [1,(5,46)]
+ print db.keys()
+ del db['aku ankka']
+
+This module is certainly not ZODB, but can be used for low-load
+(non-mission-critical) situations where tiny code size trumps the
+advanced features of a "real" object database.
+
Installation guide: pip install pickleshare
-
-Author: Ville Vainio <vivainio@gmail.com>
-License: MIT open source license.
-
-"""
-
-from __future__ import print_function
-
-
+
+Author: Ville Vainio <vivainio@gmail.com>
+License: MIT open source license.
+
+"""
+
+from __future__ import print_function
+
+
__version__ = "0.7.5"
-
+
try:
from pathlib import Path
except ImportError:
# Python 2 backport
from pathlib2 import Path
-import os,stat,time
-try:
+import os,stat,time
+try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
try:
- import cPickle as pickle
-except ImportError:
- import pickle
-import errno
+ import cPickle as pickle
+except ImportError:
+ import pickle
+import errno
import sys
-
+
if sys.version_info[0] >= 3:
string_types = (str,)
else:
string_types = (str, unicode)
-def gethashfile(key):
- return ("%02x" % abs(hash(key) % 256))[-2:]
-
-_sentinel = object()
-
+def gethashfile(key):
+ return ("%02x" % abs(hash(key) % 256))[-2:]
+
+_sentinel = object()
+
class PickleShareDB(collections_abc.MutableMapping):
- """ The main 'connection' object for PickleShare database """
- def __init__(self,root):
- """ Return a db object that will manage the specied directory"""
+ """ The main 'connection' object for PickleShare database """
+ def __init__(self,root):
+ """ Return a db object that will manage the specied directory"""
if not isinstance(root, string_types):
root = str(root)
root = os.path.abspath(os.path.expanduser(root))
@@ -82,271 +82,271 @@ class PickleShareDB(collections_abc.MutableMapping):
except OSError as e:
if e.errno != errno.EEXIST:
raise
- # cache has { 'key' : (obj, orig_mod_time) }
- self.cache = {}
-
-
- def __getitem__(self,key):
- """ db['key'] reading """
- fil = self.root / key
- try:
- mtime = (fil.stat()[stat.ST_MTIME])
- except OSError:
- raise KeyError(key)
-
- if fil in self.cache and mtime == self.cache[fil][1]:
- return self.cache[fil][0]
- try:
- # The cached item has expired, need to read
- with fil.open("rb") as f:
- obj = pickle.loads(f.read())
- except:
- raise KeyError(key)
-
- self.cache[fil] = (obj,mtime)
- return obj
-
- def __setitem__(self,key,value):
- """ db['key'] = 5 """
- fil = self.root / key
- parent = fil.parent
+ # cache has { 'key' : (obj, orig_mod_time) }
+ self.cache = {}
+
+
+ def __getitem__(self,key):
+ """ db['key'] reading """
+ fil = self.root / key
+ try:
+ mtime = (fil.stat()[stat.ST_MTIME])
+ except OSError:
+ raise KeyError(key)
+
+ if fil in self.cache and mtime == self.cache[fil][1]:
+ return self.cache[fil][0]
+ try:
+ # The cached item has expired, need to read
+ with fil.open("rb") as f:
+ obj = pickle.loads(f.read())
+ except:
+ raise KeyError(key)
+
+ self.cache[fil] = (obj,mtime)
+ return obj
+
+ def __setitem__(self,key,value):
+ """ db['key'] = 5 """
+ fil = self.root / key
+ parent = fil.parent
if parent and not parent.is_dir():
parent.mkdir(parents=True)
- # We specify protocol 2, so that we can mostly go between Python 2
- # and Python 3. We can upgrade to protocol 3 when Python 2 is obsolete.
- with fil.open('wb') as f:
- pickle.dump(value, f, protocol=2)
- try:
+ # We specify protocol 2, so that we can mostly go between Python 2
+ # and Python 3. We can upgrade to protocol 3 when Python 2 is obsolete.
+ with fil.open('wb') as f:
+ pickle.dump(value, f, protocol=2)
+ try:
self.cache[fil] = (value, fil.stat().st_mtime)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
-
- def hset(self, hashroot, key, value):
- """ hashed set """
- hroot = self.root / hashroot
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ def hset(self, hashroot, key, value):
+ """ hashed set """
+ hroot = self.root / hashroot
if not hroot.is_dir():
hroot.mkdir()
- hfile = hroot / gethashfile(key)
- d = self.get(hfile, {})
- d.update( {key : value})
- self[hfile] = d
-
-
-
- def hget(self, hashroot, key, default = _sentinel, fast_only = True):
- """ hashed get """
- hroot = self.root / hashroot
- hfile = hroot / gethashfile(key)
-
- d = self.get(hfile, _sentinel )
- #print "got dict",d,"from",hfile
- if d is _sentinel:
- if fast_only:
- if default is _sentinel:
- raise KeyError(key)
-
- return default
-
- # slow mode ok, works even after hcompress()
- d = self.hdict(hashroot)
-
- return d.get(key, default)
-
- def hdict(self, hashroot):
- """ Get all data contained in hashed category 'hashroot' as dict """
- hfiles = self.keys(hashroot + "/*")
- hfiles.sort()
- last = len(hfiles) and hfiles[-1] or ''
- if last.endswith('xx'):
- # print "using xx"
- hfiles = [last] + hfiles[:-1]
-
- all = {}
-
- for f in hfiles:
- # print "using",f
- try:
- all.update(self[f])
- except KeyError:
- print("Corrupt",f,"deleted - hset is not threadsafe!")
- del self[f]
-
- self.uncache(f)
-
- return all
-
- def hcompress(self, hashroot):
- """ Compress category 'hashroot', so hset is fast again
-
- hget will fail if fast_only is True for compressed items (that were
- hset before hcompress).
-
- """
- hfiles = self.keys(hashroot + "/*")
- all = {}
- for f in hfiles:
- # print "using",f
- all.update(self[f])
- self.uncache(f)
-
- self[hashroot + '/xx'] = all
- for f in hfiles:
- p = self.root / f
+ hfile = hroot / gethashfile(key)
+ d = self.get(hfile, {})
+ d.update( {key : value})
+ self[hfile] = d
+
+
+
+ def hget(self, hashroot, key, default = _sentinel, fast_only = True):
+ """ hashed get """
+ hroot = self.root / hashroot
+ hfile = hroot / gethashfile(key)
+
+ d = self.get(hfile, _sentinel )
+ #print "got dict",d,"from",hfile
+ if d is _sentinel:
+ if fast_only:
+ if default is _sentinel:
+ raise KeyError(key)
+
+ return default
+
+ # slow mode ok, works even after hcompress()
+ d = self.hdict(hashroot)
+
+ return d.get(key, default)
+
+ def hdict(self, hashroot):
+ """ Get all data contained in hashed category 'hashroot' as dict """
+ hfiles = self.keys(hashroot + "/*")
+ hfiles.sort()
+ last = len(hfiles) and hfiles[-1] or ''
+ if last.endswith('xx'):
+ # print "using xx"
+ hfiles = [last] + hfiles[:-1]
+
+ all = {}
+
+ for f in hfiles:
+ # print "using",f
+ try:
+ all.update(self[f])
+ except KeyError:
+ print("Corrupt",f,"deleted - hset is not threadsafe!")
+ del self[f]
+
+ self.uncache(f)
+
+ return all
+
+ def hcompress(self, hashroot):
+ """ Compress category 'hashroot', so hset is fast again
+
+ hget will fail if fast_only is True for compressed items (that were
+ hset before hcompress).
+
+ """
+ hfiles = self.keys(hashroot + "/*")
+ all = {}
+ for f in hfiles:
+ # print "using",f
+ all.update(self[f])
+ self.uncache(f)
+
+ self[hashroot + '/xx'] = all
+ for f in hfiles:
+ p = self.root / f
if p.name == 'xx':
- continue
+ continue
p.unlink()
-
-
-
- def __delitem__(self,key):
- """ del db["key"] """
- fil = self.root / key
- self.cache.pop(fil,None)
- try:
+
+
+
+ def __delitem__(self,key):
+ """ del db["key"] """
+ fil = self.root / key
+ self.cache.pop(fil,None)
+ try:
fil.unlink()
- except OSError:
- # notfound and permission denied are ok - we
- # lost, the other process wins the conflict
- pass
-
- def _normalized(self, p):
- """ Make a key suitable for user's eyes """
+ except OSError:
+ # notfound and permission denied are ok - we
+ # lost, the other process wins the conflict
+ pass
+
+ def _normalized(self, p):
+ """ Make a key suitable for user's eyes """
return str(p.relative_to(self.root)).replace('\\','/')
-
- def keys(self, globpat = None):
- """ All keys in DB, or all keys matching a glob"""
-
- if globpat is None:
+
+ def keys(self, globpat = None):
+ """ All keys in DB, or all keys matching a glob"""
+
+ if globpat is None:
files = self.root.rglob('*')
- else:
+ else:
files = self.root.glob(globpat)
return [self._normalized(p) for p in files if p.is_file()]
-
- def __iter__(self):
- return iter(self.keys())
-
- def __len__(self):
- return len(self.keys())
-
- def uncache(self,*items):
- """ Removes all, or specified items from cache
-
- Use this after reading a large amount of large objects
- to free up memory, when you won't be needing the objects
- for a while.
-
- """
- if not items:
- self.cache = {}
- for it in items:
- self.cache.pop(it,None)
-
- def waitget(self,key, maxwaittime = 60 ):
- """ Wait (poll) for a key to get a value
-
- Will wait for `maxwaittime` seconds before raising a KeyError.
- The call exits normally if the `key` field in db gets a value
- within the timeout period.
-
- Use this for synchronizing different processes or for ensuring
- that an unfortunately timed "db['key'] = newvalue" operation
- in another process (which causes all 'get' operation to cause a
- KeyError for the duration of pickling) won't screw up your program
- logic.
- """
-
- wtimes = [0.2] * 3 + [0.5] * 2 + [1]
- tries = 0
- waited = 0
- while 1:
- try:
- val = self[key]
- return val
- except KeyError:
- pass
-
- if waited > maxwaittime:
- raise KeyError(key)
-
- time.sleep(wtimes[tries])
- waited+=wtimes[tries]
- if tries < len(wtimes) -1:
- tries+=1
-
- def getlink(self,folder):
- """ Get a convenient link for accessing items """
- return PickleShareLink(self, folder)
-
- def __repr__(self):
- return "PickleShareDB('%s')" % self.root
-
-
-
-class PickleShareLink:
- """ A shortdand for accessing nested PickleShare data conveniently.
-
- Created through PickleShareDB.getlink(), example::
-
- lnk = db.getlink('myobjects/test')
- lnk.foo = 2
- lnk.bar = lnk.foo + 5
-
- """
- def __init__(self, db, keydir ):
- self.__dict__.update(locals())
-
- def __getattr__(self,key):
- return self.__dict__['db'][self.__dict__['keydir']+'/' + key]
- def __setattr__(self,key,val):
- self.db[self.keydir+'/' + key] = val
- def __repr__(self):
- db = self.__dict__['db']
- keys = db.keys( self.__dict__['keydir'] +"/*")
- return "<PickleShareLink '%s': %s>" % (
- self.__dict__['keydir'],
- ";".join([Path(k).basename() for k in keys]))
-
-def main():
- import textwrap
- usage = textwrap.dedent("""\
- pickleshare - manage PickleShare databases
-
- Usage:
-
- pickleshare dump /path/to/db > dump.txt
- pickleshare load /path/to/db < dump.txt
- pickleshare test /path/to/db
- """)
- DB = PickleShareDB
- import sys
- if len(sys.argv) < 2:
- print(usage)
- return
-
- cmd = sys.argv[1]
- args = sys.argv[2:]
- if cmd == 'dump':
- if not args: args= ['.']
- db = DB(args[0])
- import pprint
- pprint.pprint(db.items())
- elif cmd == 'load':
- cont = sys.stdin.read()
- db = DB(args[0])
- data = eval(cont)
- db.clear()
- for k,v in db.items():
- db[k] = v
- elif cmd == 'testwait':
- db = DB(args[0])
- db.clear()
- print(db.waitget('250'))
- elif cmd == 'test':
- test()
- stress()
-
-if __name__== "__main__":
- main()
-
-
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def __len__(self):
+ return len(self.keys())
+
+ def uncache(self,*items):
+ """ Removes all, or specified items from cache
+
+ Use this after reading a large amount of large objects
+ to free up memory, when you won't be needing the objects
+ for a while.
+
+ """
+ if not items:
+ self.cache = {}
+ for it in items:
+ self.cache.pop(it,None)
+
+ def waitget(self,key, maxwaittime = 60 ):
+ """ Wait (poll) for a key to get a value
+
+ Will wait for `maxwaittime` seconds before raising a KeyError.
+ The call exits normally if the `key` field in db gets a value
+ within the timeout period.
+
+ Use this for synchronizing different processes or for ensuring
+ that an unfortunately timed "db['key'] = newvalue" operation
+ in another process (which causes all 'get' operation to cause a
+ KeyError for the duration of pickling) won't screw up your program
+ logic.
+ """
+
+ wtimes = [0.2] * 3 + [0.5] * 2 + [1]
+ tries = 0
+ waited = 0
+ while 1:
+ try:
+ val = self[key]
+ return val
+ except KeyError:
+ pass
+
+ if waited > maxwaittime:
+ raise KeyError(key)
+
+ time.sleep(wtimes[tries])
+ waited+=wtimes[tries]
+ if tries < len(wtimes) -1:
+ tries+=1
+
+ def getlink(self,folder):
+ """ Get a convenient link for accessing items """
+ return PickleShareLink(self, folder)
+
+ def __repr__(self):
+ return "PickleShareDB('%s')" % self.root
+
+
+
+class PickleShareLink:
+ """ A shortdand for accessing nested PickleShare data conveniently.
+
+ Created through PickleShareDB.getlink(), example::
+
+ lnk = db.getlink('myobjects/test')
+ lnk.foo = 2
+ lnk.bar = lnk.foo + 5
+
+ """
+ def __init__(self, db, keydir ):
+ self.__dict__.update(locals())
+
+ def __getattr__(self,key):
+ return self.__dict__['db'][self.__dict__['keydir']+'/' + key]
+ def __setattr__(self,key,val):
+ self.db[self.keydir+'/' + key] = val
+ def __repr__(self):
+ db = self.__dict__['db']
+ keys = db.keys( self.__dict__['keydir'] +"/*")
+ return "<PickleShareLink '%s': %s>" % (
+ self.__dict__['keydir'],
+ ";".join([Path(k).basename() for k in keys]))
+
+def main():
+ import textwrap
+ usage = textwrap.dedent("""\
+ pickleshare - manage PickleShare databases
+
+ Usage:
+
+ pickleshare dump /path/to/db > dump.txt
+ pickleshare load /path/to/db < dump.txt
+ pickleshare test /path/to/db
+ """)
+ DB = PickleShareDB
+ import sys
+ if len(sys.argv) < 2:
+ print(usage)
+ return
+
+ cmd = sys.argv[1]
+ args = sys.argv[2:]
+ if cmd == 'dump':
+ if not args: args= ['.']
+ db = DB(args[0])
+ import pprint
+ pprint.pprint(db.items())
+ elif cmd == 'load':
+ cont = sys.stdin.read()
+ db = DB(args[0])
+ data = eval(cont)
+ db.clear()
+ for k,v in db.items():
+ db[k] = v
+ elif cmd == 'testwait':
+ db = DB(args[0])
+ db.clear()
+ print(db.waitget('250'))
+ elif cmd == 'test':
+ test()
+ stress()
+
+if __name__== "__main__":
+ main()
+
+
diff --git a/contrib/python/pickleshare/ya.make b/contrib/python/pickleshare/ya.make
index 73437edb09..e24c2cdad7 100644
--- a/contrib/python/pickleshare/ya.make
+++ b/contrib/python/pickleshare/ya.make
@@ -1,5 +1,5 @@
# Generated by devtools/yamaker (pypi).
-
+
PY23_LIBRARY()
OWNER(borman g:python-contrib)
@@ -7,7 +7,7 @@ OWNER(borman g:python-contrib)
VERSION(0.7.5)
LICENSE(MIT)
-
+
IF (PYTHON2)
PEERDIR(
contrib/python/pathlib2
@@ -16,15 +16,15 @@ ENDIF()
NO_LINT()
-PY_SRCS(
- TOP_LEVEL
- pickleshare.py
-)
-
+PY_SRCS(
+ TOP_LEVEL
+ pickleshare.py
+)
+
RESOURCE_FILES(
PREFIX contrib/python/pickleshare/
.dist-info/METADATA
.dist-info/top_level.txt
)
-END()
+END()
diff --git a/contrib/python/prompt-toolkit/py2/ya.make b/contrib/python/prompt-toolkit/py2/ya.make
index 10a9424492..db22aa0a70 100644
--- a/contrib/python/prompt-toolkit/py2/ya.make
+++ b/contrib/python/prompt-toolkit/py2/ya.make
@@ -6,11 +6,11 @@ VERSION(1.0.18)
LICENSE(BSD-3-Clause)
-PEERDIR(
+PEERDIR(
contrib/python/six
contrib/python/wcwidth
-)
-
+)
+
NO_LINT()
NO_CHECK_IMPORTS(
diff --git a/contrib/python/prompt-toolkit/ya.make b/contrib/python/prompt-toolkit/ya.make
index bf12940159..f1f936eb3f 100644
--- a/contrib/python/prompt-toolkit/ya.make
+++ b/contrib/python/prompt-toolkit/ya.make
@@ -9,7 +9,7 @@ IF (PYTHON2)
ELSE()
PEERDIR(contrib/python/prompt-toolkit/py3)
ENDIF()
-
+
NO_LINT()
END()
diff --git a/contrib/python/ptyprocess/ptyprocess/__init__.py b/contrib/python/ptyprocess/ptyprocess/__init__.py
index 4dd4f630c5..3a6268e8a6 100644
--- a/contrib/python/ptyprocess/ptyprocess/__init__.py
+++ b/contrib/python/ptyprocess/ptyprocess/__init__.py
@@ -1,4 +1,4 @@
"""Run a subprocess in a pseudo terminal"""
-from .ptyprocess import PtyProcess, PtyProcessUnicode, PtyProcessError
-
+from .ptyprocess import PtyProcess, PtyProcessUnicode, PtyProcessError
+
__version__ = '0.7.0'
diff --git a/contrib/python/ptyprocess/ptyprocess/_fork_pty.py b/contrib/python/ptyprocess/ptyprocess/_fork_pty.py
index 151e097c40..a8d05fe5a3 100644
--- a/contrib/python/ptyprocess/ptyprocess/_fork_pty.py
+++ b/contrib/python/ptyprocess/ptyprocess/_fork_pty.py
@@ -1,78 +1,78 @@
-"""Substitute for the forkpty system call, to support Solaris.
-"""
-import os
-import errno
-
-from pty import (STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO, CHILD)
+"""Substitute for the forkpty system call, to support Solaris.
+"""
+import os
+import errno
+
+from pty import (STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO, CHILD)
from .util import PtyProcessError
-
-def fork_pty():
- '''This implements a substitute for the forkpty system call. This
- should be more portable than the pty.fork() function. Specifically,
- this should work on Solaris.
-
- Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
- resolve the issue with Python's pty.fork() not supporting Solaris,
- particularly ssh. Based on patch to posixmodule.c authored by Noah
- Spurrier::
-
- http://mail.python.org/pipermail/python-dev/2003-May/035281.html
-
- '''
-
- parent_fd, child_fd = os.openpty()
- if parent_fd < 0 or child_fd < 0:
- raise OSError("os.openpty() failed")
-
- pid = os.fork()
- if pid == CHILD:
- # Child.
- os.close(parent_fd)
- pty_make_controlling_tty(child_fd)
-
- os.dup2(child_fd, STDIN_FILENO)
- os.dup2(child_fd, STDOUT_FILENO)
- os.dup2(child_fd, STDERR_FILENO)
-
- else:
- # Parent.
- os.close(child_fd)
-
- return pid, parent_fd
-
-def pty_make_controlling_tty(tty_fd):
- '''This makes the pseudo-terminal the controlling tty. This should be
- more portable than the pty.fork() function. Specifically, this should
- work on Solaris. '''
-
- child_name = os.ttyname(tty_fd)
-
- # Disconnect from controlling tty, if any. Raises OSError of ENXIO
- # if there was no controlling tty to begin with, such as when
- # executed by a cron(1) job.
- try:
- fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
- os.close(fd)
- except OSError as err:
- if err.errno != errno.ENXIO:
- raise
-
- os.setsid()
-
- # Verify we are disconnected from controlling tty by attempting to open
- # it again. We expect that OSError of ENXIO should always be raised.
- try:
- fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
- os.close(fd)
+
+def fork_pty():
+ '''This implements a substitute for the forkpty system call. This
+ should be more portable than the pty.fork() function. Specifically,
+ this should work on Solaris.
+
+ Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
+ resolve the issue with Python's pty.fork() not supporting Solaris,
+ particularly ssh. Based on patch to posixmodule.c authored by Noah
+ Spurrier::
+
+ http://mail.python.org/pipermail/python-dev/2003-May/035281.html
+
+ '''
+
+ parent_fd, child_fd = os.openpty()
+ if parent_fd < 0 or child_fd < 0:
+ raise OSError("os.openpty() failed")
+
+ pid = os.fork()
+ if pid == CHILD:
+ # Child.
+ os.close(parent_fd)
+ pty_make_controlling_tty(child_fd)
+
+ os.dup2(child_fd, STDIN_FILENO)
+ os.dup2(child_fd, STDOUT_FILENO)
+ os.dup2(child_fd, STDERR_FILENO)
+
+ else:
+ # Parent.
+ os.close(child_fd)
+
+ return pid, parent_fd
+
+def pty_make_controlling_tty(tty_fd):
+ '''This makes the pseudo-terminal the controlling tty. This should be
+ more portable than the pty.fork() function. Specifically, this should
+ work on Solaris. '''
+
+ child_name = os.ttyname(tty_fd)
+
+ # Disconnect from controlling tty, if any. Raises OSError of ENXIO
+ # if there was no controlling tty to begin with, such as when
+ # executed by a cron(1) job.
+ try:
+ fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
+ os.close(fd)
+ except OSError as err:
+ if err.errno != errno.ENXIO:
+ raise
+
+ os.setsid()
+
+ # Verify we are disconnected from controlling tty by attempting to open
+ # it again. We expect that OSError of ENXIO should always be raised.
+ try:
+ fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
+ os.close(fd)
raise PtyProcessError("OSError of errno.ENXIO should be raised.")
- except OSError as err:
- if err.errno != errno.ENXIO:
- raise
-
- # Verify we can open child pty.
- fd = os.open(child_name, os.O_RDWR)
- os.close(fd)
-
- # Verify we now have a controlling tty.
- fd = os.open("/dev/tty", os.O_WRONLY)
+ except OSError as err:
+ if err.errno != errno.ENXIO:
+ raise
+
+ # Verify we can open child pty.
+ fd = os.open(child_name, os.O_RDWR)
+ os.close(fd)
+
+ # Verify we now have a controlling tty.
+ fd = os.open("/dev/tty", os.O_WRONLY)
os.close(fd)
diff --git a/contrib/python/ptyprocess/ptyprocess/ptyprocess.py b/contrib/python/ptyprocess/ptyprocess/ptyprocess.py
index ede9ec8e3b..78d19fdf8f 100644
--- a/contrib/python/ptyprocess/ptyprocess/ptyprocess.py
+++ b/contrib/python/ptyprocess/ptyprocess/ptyprocess.py
@@ -1,65 +1,65 @@
-import codecs
-import errno
-import fcntl
-import io
-import os
-import pty
-import resource
-import signal
-import struct
-import sys
-import termios
-import time
-
-try:
- import builtins # Python 3
-except ImportError:
- import __builtin__ as builtins # Python 2
-
-# Constants
-from pty import (STDIN_FILENO, CHILD)
-
+import codecs
+import errno
+import fcntl
+import io
+import os
+import pty
+import resource
+import signal
+import struct
+import sys
+import termios
+import time
+
+try:
+ import builtins # Python 3
+except ImportError:
+ import __builtin__ as builtins # Python 2
+
+# Constants
+from pty import (STDIN_FILENO, CHILD)
+
from .util import which, PtyProcessError
-
-_platform = sys.platform.lower()
-
-# Solaris uses internal __fork_pty(). All others use pty.fork().
-_is_solaris = (
- _platform.startswith('solaris') or
- _platform.startswith('sunos'))
-
-if _is_solaris:
- use_native_pty_fork = False
- from . import _fork_pty
-else:
- use_native_pty_fork = True
-
-PY3 = sys.version_info[0] >= 3
-
-if PY3:
- def _byte(i):
- return bytes([i])
-else:
- def _byte(i):
- return chr(i)
-
- class FileNotFoundError(OSError): pass
- class TimeoutError(OSError): pass
-
-_EOF, _INTR = None, None
-
-def _make_eof_intr():
- """Set constants _EOF and _INTR.
-
- This avoids doing potentially costly operations on module load.
- """
- global _EOF, _INTR
- if (_EOF is not None) and (_INTR is not None):
+
+_platform = sys.platform.lower()
+
+# Solaris uses internal __fork_pty(). All others use pty.fork().
+_is_solaris = (
+ _platform.startswith('solaris') or
+ _platform.startswith('sunos'))
+
+if _is_solaris:
+ use_native_pty_fork = False
+ from . import _fork_pty
+else:
+ use_native_pty_fork = True
+
+PY3 = sys.version_info[0] >= 3
+
+if PY3:
+ def _byte(i):
+ return bytes([i])
+else:
+ def _byte(i):
+ return chr(i)
+
+ class FileNotFoundError(OSError): pass
+ class TimeoutError(OSError): pass
+
+_EOF, _INTR = None, None
+
+def _make_eof_intr():
+ """Set constants _EOF and _INTR.
+
+ This avoids doing potentially costly operations on module load.
+ """
+ global _EOF, _INTR
+ if (_EOF is not None) and (_INTR is not None):
return
-
- # inherit EOF and INTR definitions from controlling process.
- try:
- from termios import VEOF, VINTR
+
+ # inherit EOF and INTR definitions from controlling process.
+ try:
+ from termios import VEOF, VINTR
fd = None
for name in 'stdin', 'stdout':
stream = getattr(sys, '__%s__' % name, None)
@@ -72,193 +72,193 @@ def _make_eof_intr():
if fd is None:
# no fd, raise ValueError to fallback on CEOF, CINTR
raise ValueError("No stream has a fileno")
- intr = ord(termios.tcgetattr(fd)[6][VINTR])
- eof = ord(termios.tcgetattr(fd)[6][VEOF])
- except (ImportError, OSError, IOError, ValueError, termios.error):
- # unless the controlling process is also not a terminal,
- # such as cron(1), or when stdin and stdout are both closed.
- # Fall-back to using CEOF and CINTR. There
- try:
- from termios import CEOF, CINTR
- (intr, eof) = (CINTR, CEOF)
- except ImportError:
- # ^C, ^D
- (intr, eof) = (3, 4)
-
- _INTR = _byte(intr)
- _EOF = _byte(eof)
-
-# setecho and setwinsize are pulled out here because on some platforms, we need
-# to do this from the child before we exec()
-
-def _setecho(fd, state):
+ intr = ord(termios.tcgetattr(fd)[6][VINTR])
+ eof = ord(termios.tcgetattr(fd)[6][VEOF])
+ except (ImportError, OSError, IOError, ValueError, termios.error):
+ # unless the controlling process is also not a terminal,
+ # such as cron(1), or when stdin and stdout are both closed.
+ # Fall-back to using CEOF and CINTR. There
+ try:
+ from termios import CEOF, CINTR
+ (intr, eof) = (CINTR, CEOF)
+ except ImportError:
+ # ^C, ^D
+ (intr, eof) = (3, 4)
+
+ _INTR = _byte(intr)
+ _EOF = _byte(eof)
+
+# setecho and setwinsize are pulled out here because on some platforms, we need
+# to do this from the child before we exec()
+
+def _setecho(fd, state):
errmsg = 'setecho() may not be called on this platform (it may still be possible to enable/disable echo when spawning the child process)'
-
- try:
- attr = termios.tcgetattr(fd)
- except termios.error as err:
- if err.args[0] == errno.EINVAL:
- raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
- raise
-
- if state:
- attr[3] = attr[3] | termios.ECHO
- else:
- attr[3] = attr[3] & ~termios.ECHO
-
- try:
- # I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent and
- # blocked on some platforms. TCSADRAIN would probably be ideal.
- termios.tcsetattr(fd, termios.TCSANOW, attr)
- except IOError as err:
- if err.args[0] == errno.EINVAL:
- raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
- raise
-
-def _setwinsize(fd, rows, cols):
- # Some very old platforms have a bug that causes the value for
- # termios.TIOCSWINSZ to be truncated. There was a hack here to work
- # around this, but it caused problems with newer platforms so has been
- # removed. For details see https://github.com/pexpect/pexpect/issues/39
- TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
- # Note, assume ws_xpixel and ws_ypixel are zero.
- s = struct.pack('HHHH', rows, cols, 0, 0)
- fcntl.ioctl(fd, TIOCSWINSZ, s)
-
-class PtyProcess(object):
- '''This class represents a process running in a pseudoterminal.
-
- The main constructor is the :meth:`spawn` classmethod.
- '''
- string_type = bytes
- if PY3:
- linesep = os.linesep.encode('ascii')
- crlf = '\r\n'.encode('ascii')
-
- @staticmethod
- def write_to_stdout(b):
- try:
- return sys.stdout.buffer.write(b)
- except AttributeError:
- # If stdout has been replaced, it may not have .buffer
- return sys.stdout.write(b.decode('ascii', 'replace'))
- else:
- linesep = os.linesep
- crlf = '\r\n'
- write_to_stdout = sys.stdout.write
-
- encoding = None
-
- argv = None
- env = None
- launch_dir = None
-
- def __init__(self, pid, fd):
- _make_eof_intr() # Ensure _EOF and _INTR are calculated
- self.pid = pid
- self.fd = fd
+
+ try:
+ attr = termios.tcgetattr(fd)
+ except termios.error as err:
+ if err.args[0] == errno.EINVAL:
+ raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
+ raise
+
+ if state:
+ attr[3] = attr[3] | termios.ECHO
+ else:
+ attr[3] = attr[3] & ~termios.ECHO
+
+ try:
+ # I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent and
+ # blocked on some platforms. TCSADRAIN would probably be ideal.
+ termios.tcsetattr(fd, termios.TCSANOW, attr)
+ except IOError as err:
+ if err.args[0] == errno.EINVAL:
+ raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
+ raise
+
+def _setwinsize(fd, rows, cols):
+ # Some very old platforms have a bug that causes the value for
+ # termios.TIOCSWINSZ to be truncated. There was a hack here to work
+ # around this, but it caused problems with newer platforms so has been
+ # removed. For details see https://github.com/pexpect/pexpect/issues/39
+ TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
+ # Note, assume ws_xpixel and ws_ypixel are zero.
+ s = struct.pack('HHHH', rows, cols, 0, 0)
+ fcntl.ioctl(fd, TIOCSWINSZ, s)
+
+class PtyProcess(object):
+ '''This class represents a process running in a pseudoterminal.
+
+ The main constructor is the :meth:`spawn` classmethod.
+ '''
+ string_type = bytes
+ if PY3:
+ linesep = os.linesep.encode('ascii')
+ crlf = '\r\n'.encode('ascii')
+
+ @staticmethod
+ def write_to_stdout(b):
+ try:
+ return sys.stdout.buffer.write(b)
+ except AttributeError:
+ # If stdout has been replaced, it may not have .buffer
+ return sys.stdout.write(b.decode('ascii', 'replace'))
+ else:
+ linesep = os.linesep
+ crlf = '\r\n'
+ write_to_stdout = sys.stdout.write
+
+ encoding = None
+
+ argv = None
+ env = None
+ launch_dir = None
+
+ def __init__(self, pid, fd):
+ _make_eof_intr() # Ensure _EOF and _INTR are calculated
+ self.pid = pid
+ self.fd = fd
readf = io.open(fd, 'rb', buffering=0)
writef = io.open(fd, 'wb', buffering=0, closefd=False)
self.fileobj = io.BufferedRWPair(readf, writef)
-
- self.terminated = False
- self.closed = False
- self.exitstatus = None
- self.signalstatus = None
- # status returned by os.waitpid
- self.status = None
- self.flag_eof = False
- # Used by close() to give kernel time to update process status.
- # Time in seconds.
- self.delayafterclose = 0.1
- # Used by terminate() to give kernel time to update process status.
- # Time in seconds.
- self.delayafterterminate = 0.1
-
- @classmethod
- def spawn(
- cls, argv, cwd=None, env=None, echo=True, preexec_fn=None,
+
+ self.terminated = False
+ self.closed = False
+ self.exitstatus = None
+ self.signalstatus = None
+ # status returned by os.waitpid
+ self.status = None
+ self.flag_eof = False
+ # Used by close() to give kernel time to update process status.
+ # Time in seconds.
+ self.delayafterclose = 0.1
+ # Used by terminate() to give kernel time to update process status.
+ # Time in seconds.
+ self.delayafterterminate = 0.1
+
+ @classmethod
+ def spawn(
+ cls, argv, cwd=None, env=None, echo=True, preexec_fn=None,
dimensions=(24, 80), pass_fds=()):
- '''Start the given command in a child process in a pseudo terminal.
-
- This does all the fork/exec type of stuff for a pty, and returns an
- instance of PtyProcess.
-
- If preexec_fn is supplied, it will be called with no arguments in the
- child process before exec-ing the specified command.
- It may, for instance, set signal handlers to SIG_DFL or SIG_IGN.
-
- Dimensions of the psuedoterminal used for the subprocess can be
- specified as a tuple (rows, cols), or the default (24, 80) will be used.
+ '''Start the given command in a child process in a pseudo terminal.
+
+ This does all the fork/exec type of stuff for a pty, and returns an
+ instance of PtyProcess.
+
+ If preexec_fn is supplied, it will be called with no arguments in the
+ child process before exec-ing the specified command.
+ It may, for instance, set signal handlers to SIG_DFL or SIG_IGN.
+
+ Dimensions of the psuedoterminal used for the subprocess can be
+ specified as a tuple (rows, cols), or the default (24, 80) will be used.
By default, all file descriptors except 0, 1 and 2 are closed. This
behavior can be overridden with pass_fds, a list of file descriptors to
keep open between the parent and the child.
- '''
- # Note that it is difficult for this method to fail.
- # You cannot detect if the child process cannot start.
- # So the only way you can tell if the child process started
- # or not is to try to read from the file descriptor. If you get
- # EOF immediately then it means that the child is already dead.
- # That may not necessarily be bad because you may have spawned a child
- # that performs some task; creates no stdout output; and then dies.
-
- if not isinstance(argv, (list, tuple)):
- raise TypeError("Expected a list or tuple for argv, got %r" % argv)
-
- # Shallow copy of argv so we can modify it
- argv = argv[:]
- command = argv[0]
-
- command_with_path = which(command)
- if command_with_path is None:
- raise FileNotFoundError('The command was not found or was not ' +
- 'executable: %s.' % command)
- command = command_with_path
- argv[0] = command
-
- # [issue #119] To prevent the case where exec fails and the user is
- # stuck interacting with a python child process instead of whatever
- # was expected, we implement the solution from
- # http://stackoverflow.com/a/3703179 to pass the exception to the
- # parent process
-
- # [issue #119] 1. Before forking, open a pipe in the parent process.
- exec_err_pipe_read, exec_err_pipe_write = os.pipe()
-
- if use_native_pty_fork:
- pid, fd = pty.fork()
- else:
- # Use internal fork_pty, for Solaris
- pid, fd = _fork_pty.fork_pty()
-
- # Some platforms must call setwinsize() and setecho() from the
- # child process, and others from the master process. We do both,
- # allowing IOError for either.
-
- if pid == CHILD:
- # set window size
- try:
- _setwinsize(STDIN_FILENO, *dimensions)
- except IOError as err:
- if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
- raise
-
- # disable echo if spawn argument echo was unset
- if not echo:
- try:
- _setecho(STDIN_FILENO, False)
- except (IOError, termios.error) as err:
- if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
- raise
-
- # [issue #119] 3. The child closes the reading end and sets the
- # close-on-exec flag for the writing end.
- os.close(exec_err_pipe_read)
- fcntl.fcntl(exec_err_pipe_write, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
-
- # Do not allow child to inherit open file descriptors from parent,
- # with the exception of the exec_err_pipe_write of the pipe
+ '''
+ # Note that it is difficult for this method to fail.
+ # You cannot detect if the child process cannot start.
+ # So the only way you can tell if the child process started
+ # or not is to try to read from the file descriptor. If you get
+ # EOF immediately then it means that the child is already dead.
+ # That may not necessarily be bad because you may have spawned a child
+ # that performs some task; creates no stdout output; and then dies.
+
+ if not isinstance(argv, (list, tuple)):
+ raise TypeError("Expected a list or tuple for argv, got %r" % argv)
+
+ # Shallow copy of argv so we can modify it
+ argv = argv[:]
+ command = argv[0]
+
+ command_with_path = which(command)
+ if command_with_path is None:
+ raise FileNotFoundError('The command was not found or was not ' +
+ 'executable: %s.' % command)
+ command = command_with_path
+ argv[0] = command
+
+ # [issue #119] To prevent the case where exec fails and the user is
+ # stuck interacting with a python child process instead of whatever
+ # was expected, we implement the solution from
+ # http://stackoverflow.com/a/3703179 to pass the exception to the
+ # parent process
+
+ # [issue #119] 1. Before forking, open a pipe in the parent process.
+ exec_err_pipe_read, exec_err_pipe_write = os.pipe()
+
+ if use_native_pty_fork:
+ pid, fd = pty.fork()
+ else:
+ # Use internal fork_pty, for Solaris
+ pid, fd = _fork_pty.fork_pty()
+
+ # Some platforms must call setwinsize() and setecho() from the
+ # child process, and others from the master process. We do both,
+ # allowing IOError for either.
+
+ if pid == CHILD:
+ # set window size
+ try:
+ _setwinsize(STDIN_FILENO, *dimensions)
+ except IOError as err:
+ if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
+ raise
+
+ # disable echo if spawn argument echo was unset
+ if not echo:
+ try:
+ _setecho(STDIN_FILENO, False)
+ except (IOError, termios.error) as err:
+ if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
+ raise
+
+ # [issue #119] 3. The child closes the reading end and sets the
+ # close-on-exec flag for the writing end.
+ os.close(exec_err_pipe_read)
+ fcntl.fcntl(exec_err_pipe_write, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
+
+ # Do not allow child to inherit open file descriptors from parent,
+ # with the exception of the exec_err_pipe_write of the pipe
# and pass_fds.
# Impose ceiling on max_fd: AIX bugfix for users with unlimited
# nofiles where resource.RLIMIT_NOFILE is 2^63-1 and os.closerange()
@@ -267,293 +267,293 @@ class PtyProcess(object):
spass_fds = sorted(set(pass_fds) | {exec_err_pipe_write})
for pair in zip([2] + spass_fds, spass_fds + [max_fd]):
os.closerange(pair[0]+1, pair[1])
-
- if cwd is not None:
- os.chdir(cwd)
-
- if preexec_fn is not None:
- try:
- preexec_fn()
- except Exception as e:
- ename = type(e).__name__
- tosend = '{}:0:{}'.format(ename, str(e))
- if PY3:
- tosend = tosend.encode('utf-8')
-
- os.write(exec_err_pipe_write, tosend)
- os.close(exec_err_pipe_write)
- os._exit(1)
-
- try:
- if env is None:
- os.execv(command, argv)
- else:
- os.execvpe(command, argv, env)
- except OSError as err:
- # [issue #119] 5. If exec fails, the child writes the error
- # code back to the parent using the pipe, then exits.
- tosend = 'OSError:{}:{}'.format(err.errno, str(err))
- if PY3:
- tosend = tosend.encode('utf-8')
- os.write(exec_err_pipe_write, tosend)
- os.close(exec_err_pipe_write)
- os._exit(os.EX_OSERR)
-
- # Parent
- inst = cls(pid, fd)
-
- # Set some informational attributes
- inst.argv = argv
- if env is not None:
- inst.env = env
- if cwd is not None:
- inst.launch_dir = cwd
-
- # [issue #119] 2. After forking, the parent closes the writing end
- # of the pipe and reads from the reading end.
- os.close(exec_err_pipe_write)
- exec_err_data = os.read(exec_err_pipe_read, 4096)
- os.close(exec_err_pipe_read)
-
- # [issue #119] 6. The parent reads eof (a zero-length read) if the
- # child successfully performed exec, since close-on-exec made
- # successful exec close the writing end of the pipe. Or, if exec
- # failed, the parent reads the error code and can proceed
- # accordingly. Either way, the parent blocks until the child calls
- # exec.
- if len(exec_err_data) != 0:
- try:
- errclass, errno_s, errmsg = exec_err_data.split(b':', 2)
- exctype = getattr(builtins, errclass.decode('ascii'), Exception)
-
- exception = exctype(errmsg.decode('utf-8', 'replace'))
- if exctype is OSError:
- exception.errno = int(errno_s)
- except:
- raise Exception('Subprocess failed, got bad error data: %r'
- % exec_err_data)
- else:
- raise exception
-
- try:
- inst.setwinsize(*dimensions)
- except IOError as err:
- if err.args[0] not in (errno.EINVAL, errno.ENOTTY, errno.ENXIO):
- raise
-
- return inst
-
- def __repr__(self):
- clsname = type(self).__name__
- if self.argv is not None:
- args = [repr(self.argv)]
- if self.env is not None:
- args.append("env=%r" % self.env)
- if self.launch_dir is not None:
- args.append("cwd=%r" % self.launch_dir)
-
- return "{}.spawn({})".format(clsname, ", ".join(args))
-
- else:
- return "{}(pid={}, fd={})".format(clsname, self.pid, self.fd)
-
- @staticmethod
- def _coerce_send_string(s):
- if not isinstance(s, bytes):
- return s.encode('utf-8')
- return s
-
- @staticmethod
- def _coerce_read_string(s):
- return s
-
- def __del__(self):
- '''This makes sure that no system resources are left open. Python only
- garbage collects Python objects. OS file descriptors are not Python
- objects, so they must be handled explicitly. If the child file
- descriptor was opened outside of this class (passed to the constructor)
- then this does not close it. '''
-
- if not self.closed:
- # It is possible for __del__ methods to execute during the
- # teardown of the Python VM itself. Thus self.close() may
- # trigger an exception because os.close may be None.
- try:
- self.close()
+
+ if cwd is not None:
+ os.chdir(cwd)
+
+ if preexec_fn is not None:
+ try:
+ preexec_fn()
+ except Exception as e:
+ ename = type(e).__name__
+ tosend = '{}:0:{}'.format(ename, str(e))
+ if PY3:
+ tosend = tosend.encode('utf-8')
+
+ os.write(exec_err_pipe_write, tosend)
+ os.close(exec_err_pipe_write)
+ os._exit(1)
+
+ try:
+ if env is None:
+ os.execv(command, argv)
+ else:
+ os.execvpe(command, argv, env)
+ except OSError as err:
+ # [issue #119] 5. If exec fails, the child writes the error
+ # code back to the parent using the pipe, then exits.
+ tosend = 'OSError:{}:{}'.format(err.errno, str(err))
+ if PY3:
+ tosend = tosend.encode('utf-8')
+ os.write(exec_err_pipe_write, tosend)
+ os.close(exec_err_pipe_write)
+ os._exit(os.EX_OSERR)
+
+ # Parent
+ inst = cls(pid, fd)
+
+ # Set some informational attributes
+ inst.argv = argv
+ if env is not None:
+ inst.env = env
+ if cwd is not None:
+ inst.launch_dir = cwd
+
+ # [issue #119] 2. After forking, the parent closes the writing end
+ # of the pipe and reads from the reading end.
+ os.close(exec_err_pipe_write)
+ exec_err_data = os.read(exec_err_pipe_read, 4096)
+ os.close(exec_err_pipe_read)
+
+ # [issue #119] 6. The parent reads eof (a zero-length read) if the
+ # child successfully performed exec, since close-on-exec made
+ # successful exec close the writing end of the pipe. Or, if exec
+ # failed, the parent reads the error code and can proceed
+ # accordingly. Either way, the parent blocks until the child calls
+ # exec.
+ if len(exec_err_data) != 0:
+ try:
+ errclass, errno_s, errmsg = exec_err_data.split(b':', 2)
+ exctype = getattr(builtins, errclass.decode('ascii'), Exception)
+
+ exception = exctype(errmsg.decode('utf-8', 'replace'))
+ if exctype is OSError:
+ exception.errno = int(errno_s)
+ except:
+ raise Exception('Subprocess failed, got bad error data: %r'
+ % exec_err_data)
+ else:
+ raise exception
+
+ try:
+ inst.setwinsize(*dimensions)
+ except IOError as err:
+ if err.args[0] not in (errno.EINVAL, errno.ENOTTY, errno.ENXIO):
+ raise
+
+ return inst
+
+ def __repr__(self):
+ clsname = type(self).__name__
+ if self.argv is not None:
+ args = [repr(self.argv)]
+ if self.env is not None:
+ args.append("env=%r" % self.env)
+ if self.launch_dir is not None:
+ args.append("cwd=%r" % self.launch_dir)
+
+ return "{}.spawn({})".format(clsname, ", ".join(args))
+
+ else:
+ return "{}(pid={}, fd={})".format(clsname, self.pid, self.fd)
+
+ @staticmethod
+ def _coerce_send_string(s):
+ if not isinstance(s, bytes):
+ return s.encode('utf-8')
+ return s
+
+ @staticmethod
+ def _coerce_read_string(s):
+ return s
+
+ def __del__(self):
+ '''This makes sure that no system resources are left open. Python only
+ garbage collects Python objects. OS file descriptors are not Python
+ objects, so they must be handled explicitly. If the child file
+ descriptor was opened outside of this class (passed to the constructor)
+ then this does not close it. '''
+
+ if not self.closed:
+ # It is possible for __del__ methods to execute during the
+ # teardown of the Python VM itself. Thus self.close() may
+ # trigger an exception because os.close may be None.
+ try:
+ self.close()
# which exception, shouldn't we catch explicitly .. ?
- except:
- pass
-
-
- def fileno(self):
- '''This returns the file descriptor of the pty for the child.
- '''
- return self.fd
-
- def close(self, force=True):
- '''This closes the connection with the child application. Note that
- calling close() more than once is valid. This emulates standard Python
- behavior with files. Set force to True if you want to make sure that
- the child is terminated (SIGKILL is sent if the child ignores SIGHUP
- and SIGINT). '''
- if not self.closed:
- self.flush()
- self.fileobj.close() # Closes the file descriptor
- # Give kernel time to update process status.
- time.sleep(self.delayafterclose)
- if self.isalive():
- if not self.terminate(force):
- raise PtyProcessError('Could not terminate the child.')
- self.fd = -1
- self.closed = True
- #self.pid = None
-
- def flush(self):
- '''This does nothing. It is here to support the interface for a
- File-like object. '''
-
- pass
-
- def isatty(self):
- '''This returns True if the file descriptor is open and connected to a
- tty(-like) device, else False.
-
- On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
- the child pty may not appear as a terminal device. This means
- methods such as setecho(), setwinsize(), getwinsize() may raise an
- IOError. '''
-
- return os.isatty(self.fd)
-
- def waitnoecho(self, timeout=None):
- '''This waits until the terminal ECHO flag is set False. This returns
- True if the echo mode is off. This returns False if the ECHO flag was
- not set False before the timeout. This can be used to detect when the
- child is waiting for a password. Usually a child application will turn
- off echo mode when it is waiting for the user to enter a password. For
- example, instead of expecting the "password:" prompt you can wait for
- the child to set ECHO off::
-
- p = pexpect.spawn('ssh user@example.com')
- p.waitnoecho()
- p.sendline(mypassword)
-
- If timeout==None then this method to block until ECHO flag is False.
- '''
-
- if timeout is not None:
- end_time = time.time() + timeout
- while True:
- if not self.getecho():
- return True
- if timeout < 0 and timeout is not None:
- return False
- if timeout is not None:
- timeout = end_time - time.time()
- time.sleep(0.1)
-
- def getecho(self):
- '''This returns the terminal echo mode. This returns True if echo is
- on or False if echo is off. Child applications that are expecting you
- to enter a password often set ECHO False. See waitnoecho().
-
- Not supported on platforms where ``isatty()`` returns False. '''
-
- try:
- attr = termios.tcgetattr(self.fd)
- except termios.error as err:
- errmsg = 'getecho() may not be called on this platform'
- if err.args[0] == errno.EINVAL:
- raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
- raise
-
- self.echo = bool(attr[3] & termios.ECHO)
- return self.echo
-
- def setecho(self, state):
- '''This sets the terminal echo mode on or off. Note that anything the
- child sent before the echo will be lost, so you should be sure that
- your input buffer is empty before you call setecho(). For example, the
- following will work as expected::
-
- p = pexpect.spawn('cat') # Echo is on by default.
- p.sendline('1234') # We expect see this twice from the child...
- p.expect(['1234']) # ... once from the tty echo...
- p.expect(['1234']) # ... and again from cat itself.
- p.setecho(False) # Turn off tty echo
- p.sendline('abcd') # We will set this only once (echoed by cat).
- p.sendline('wxyz') # We will set this only once (echoed by cat)
- p.expect(['abcd'])
- p.expect(['wxyz'])
-
- The following WILL NOT WORK because the lines sent before the setecho
- will be lost::
-
- p = pexpect.spawn('cat')
- p.sendline('1234')
- p.setecho(False) # Turn off tty echo
- p.sendline('abcd') # We will set this only once (echoed by cat).
- p.sendline('wxyz') # We will set this only once (echoed by cat)
- p.expect(['1234'])
- p.expect(['1234'])
- p.expect(['abcd'])
- p.expect(['wxyz'])
-
-
- Not supported on platforms where ``isatty()`` returns False.
- '''
- _setecho(self.fd, state)
-
- self.echo = state
-
- def read(self, size=1024):
- """Read and return at most ``size`` bytes from the pty.
-
- Can block if there is nothing to read. Raises :exc:`EOFError` if the
- terminal was closed.
-
- Unlike Pexpect's ``read_nonblocking`` method, this doesn't try to deal
- with the vagaries of EOF on platforms that do strange things, like IRIX
- or older Solaris systems. It handles the errno=EIO pattern used on
- Linux, and the empty-string return used on BSD platforms and (seemingly)
- on recent Solaris.
- """
- try:
+ except:
+ pass
+
+
+ def fileno(self):
+ '''This returns the file descriptor of the pty for the child.
+ '''
+ return self.fd
+
+ def close(self, force=True):
+ '''This closes the connection with the child application. Note that
+ calling close() more than once is valid. This emulates standard Python
+ behavior with files. Set force to True if you want to make sure that
+ the child is terminated (SIGKILL is sent if the child ignores SIGHUP
+ and SIGINT). '''
+ if not self.closed:
+ self.flush()
+ self.fileobj.close() # Closes the file descriptor
+ # Give kernel time to update process status.
+ time.sleep(self.delayafterclose)
+ if self.isalive():
+ if not self.terminate(force):
+ raise PtyProcessError('Could not terminate the child.')
+ self.fd = -1
+ self.closed = True
+ #self.pid = None
+
+ def flush(self):
+ '''This does nothing. It is here to support the interface for a
+ File-like object. '''
+
+ pass
+
+ def isatty(self):
+ '''This returns True if the file descriptor is open and connected to a
+ tty(-like) device, else False.
+
+ On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
+ the child pty may not appear as a terminal device. This means
+ methods such as setecho(), setwinsize(), getwinsize() may raise an
+ IOError. '''
+
+ return os.isatty(self.fd)
+
+ def waitnoecho(self, timeout=None):
+ '''This waits until the terminal ECHO flag is set False. This returns
+ True if the echo mode is off. This returns False if the ECHO flag was
+ not set False before the timeout. This can be used to detect when the
+ child is waiting for a password. Usually a child application will turn
+ off echo mode when it is waiting for the user to enter a password. For
+ example, instead of expecting the "password:" prompt you can wait for
+ the child to set ECHO off::
+
+ p = pexpect.spawn('ssh user@example.com')
+ p.waitnoecho()
+ p.sendline(mypassword)
+
+ If timeout==None then this method to block until ECHO flag is False.
+ '''
+
+ if timeout is not None:
+ end_time = time.time() + timeout
+ while True:
+ if not self.getecho():
+ return True
+ if timeout < 0 and timeout is not None:
+ return False
+ if timeout is not None:
+ timeout = end_time - time.time()
+ time.sleep(0.1)
+
+ def getecho(self):
+ '''This returns the terminal echo mode. This returns True if echo is
+ on or False if echo is off. Child applications that are expecting you
+ to enter a password often set ECHO False. See waitnoecho().
+
+ Not supported on platforms where ``isatty()`` returns False. '''
+
+ try:
+ attr = termios.tcgetattr(self.fd)
+ except termios.error as err:
+ errmsg = 'getecho() may not be called on this platform'
+ if err.args[0] == errno.EINVAL:
+ raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
+ raise
+
+ self.echo = bool(attr[3] & termios.ECHO)
+ return self.echo
+
+ def setecho(self, state):
+ '''This sets the terminal echo mode on or off. Note that anything the
+ child sent before the echo will be lost, so you should be sure that
+ your input buffer is empty before you call setecho(). For example, the
+ following will work as expected::
+
+ p = pexpect.spawn('cat') # Echo is on by default.
+ p.sendline('1234') # We expect see this twice from the child...
+ p.expect(['1234']) # ... once from the tty echo...
+ p.expect(['1234']) # ... and again from cat itself.
+ p.setecho(False) # Turn off tty echo
+ p.sendline('abcd') # We will set this only once (echoed by cat).
+ p.sendline('wxyz') # We will set this only once (echoed by cat)
+ p.expect(['abcd'])
+ p.expect(['wxyz'])
+
+ The following WILL NOT WORK because the lines sent before the setecho
+ will be lost::
+
+ p = pexpect.spawn('cat')
+ p.sendline('1234')
+ p.setecho(False) # Turn off tty echo
+ p.sendline('abcd') # We will set this only once (echoed by cat).
+ p.sendline('wxyz') # We will set this only once (echoed by cat)
+ p.expect(['1234'])
+ p.expect(['1234'])
+ p.expect(['abcd'])
+ p.expect(['wxyz'])
+
+
+ Not supported on platforms where ``isatty()`` returns False.
+ '''
+ _setecho(self.fd, state)
+
+ self.echo = state
+
+ def read(self, size=1024):
+ """Read and return at most ``size`` bytes from the pty.
+
+ Can block if there is nothing to read. Raises :exc:`EOFError` if the
+ terminal was closed.
+
+ Unlike Pexpect's ``read_nonblocking`` method, this doesn't try to deal
+ with the vagaries of EOF on platforms that do strange things, like IRIX
+ or older Solaris systems. It handles the errno=EIO pattern used on
+ Linux, and the empty-string return used on BSD platforms and (seemingly)
+ on recent Solaris.
+ """
+ try:
s = self.fileobj.read1(size)
- except (OSError, IOError) as err:
- if err.args[0] == errno.EIO:
- # Linux-style EOF
- self.flag_eof = True
- raise EOFError('End Of File (EOF). Exception style platform.')
- raise
- if s == b'':
- # BSD-style EOF (also appears to work on recent Solaris (OpenIndiana))
- self.flag_eof = True
- raise EOFError('End Of File (EOF). Empty string style platform.')
-
- return s
-
- def readline(self):
- """Read one line from the pseudoterminal, and return it as unicode.
-
- Can block if there is nothing to read. Raises :exc:`EOFError` if the
- terminal was closed.
- """
- try:
- s = self.fileobj.readline()
- except (OSError, IOError) as err:
- if err.args[0] == errno.EIO:
- # Linux-style EOF
- self.flag_eof = True
- raise EOFError('End Of File (EOF). Exception style platform.')
- raise
- if s == b'':
- # BSD-style EOF (also appears to work on recent Solaris (OpenIndiana))
- self.flag_eof = True
- raise EOFError('End Of File (EOF). Empty string style platform.')
-
- return s
-
+ except (OSError, IOError) as err:
+ if err.args[0] == errno.EIO:
+ # Linux-style EOF
+ self.flag_eof = True
+ raise EOFError('End Of File (EOF). Exception style platform.')
+ raise
+ if s == b'':
+ # BSD-style EOF (also appears to work on recent Solaris (OpenIndiana))
+ self.flag_eof = True
+ raise EOFError('End Of File (EOF). Empty string style platform.')
+
+ return s
+
+ def readline(self):
+ """Read one line from the pseudoterminal, and return it as unicode.
+
+ Can block if there is nothing to read. Raises :exc:`EOFError` if the
+ terminal was closed.
+ """
+ try:
+ s = self.fileobj.readline()
+ except (OSError, IOError) as err:
+ if err.args[0] == errno.EIO:
+ # Linux-style EOF
+ self.flag_eof = True
+ raise EOFError('End Of File (EOF). Exception style platform.')
+ raise
+ if s == b'':
+ # BSD-style EOF (also appears to work on recent Solaris (OpenIndiana))
+ self.flag_eof = True
+ raise EOFError('End Of File (EOF). Empty string style platform.')
+
+ return s
+
def _writeb(self, b, flush=True):
n = self.fileobj.write(b)
if flush:
@@ -561,282 +561,282 @@ class PtyProcess(object):
return n
def write(self, s, flush=True):
- """Write bytes to the pseudoterminal.
-
- Returns the number of bytes written.
- """
+ """Write bytes to the pseudoterminal.
+
+ Returns the number of bytes written.
+ """
return self._writeb(s, flush=flush)
-
- def sendcontrol(self, char):
- '''Helper method that wraps send() with mnemonic access for sending control
- character to the child (such as Ctrl-C or Ctrl-D). For example, to send
- Ctrl-G (ASCII 7, bell, '\a')::
-
- child.sendcontrol('g')
-
- See also, sendintr() and sendeof().
- '''
- char = char.lower()
- a = ord(char)
- if 97 <= a <= 122:
- a = a - ord('a') + 1
- byte = _byte(a)
+
+ def sendcontrol(self, char):
+ '''Helper method that wraps send() with mnemonic access for sending control
+ character to the child (such as Ctrl-C or Ctrl-D). For example, to send
+ Ctrl-G (ASCII 7, bell, '\a')::
+
+ child.sendcontrol('g')
+
+ See also, sendintr() and sendeof().
+ '''
+ char = char.lower()
+ a = ord(char)
+ if 97 <= a <= 122:
+ a = a - ord('a') + 1
+ byte = _byte(a)
return self._writeb(byte), byte
- d = {'@': 0, '`': 0,
- '[': 27, '{': 27,
- '\\': 28, '|': 28,
- ']': 29, '}': 29,
- '^': 30, '~': 30,
- '_': 31,
- '?': 127}
- if char not in d:
- return 0, b''
-
- byte = _byte(d[char])
+ d = {'@': 0, '`': 0,
+ '[': 27, '{': 27,
+ '\\': 28, '|': 28,
+ ']': 29, '}': 29,
+ '^': 30, '~': 30,
+ '_': 31,
+ '?': 127}
+ if char not in d:
+ return 0, b''
+
+ byte = _byte(d[char])
return self._writeb(byte), byte
-
- def sendeof(self):
- '''This sends an EOF to the child. This sends a character which causes
- the pending parent output buffer to be sent to the waiting child
- program without waiting for end-of-line. If it is the first character
- of the line, the read() in the user program returns 0, which signifies
- end-of-file. This means to work as expected a sendeof() has to be
- called at the beginning of a line. This method does not send a newline.
- It is the responsibility of the caller to ensure the eof is sent at the
- beginning of a line. '''
-
+
+ def sendeof(self):
+ '''This sends an EOF to the child. This sends a character which causes
+ the pending parent output buffer to be sent to the waiting child
+ program without waiting for end-of-line. If it is the first character
+ of the line, the read() in the user program returns 0, which signifies
+ end-of-file. This means to work as expected a sendeof() has to be
+ called at the beginning of a line. This method does not send a newline.
+ It is the responsibility of the caller to ensure the eof is sent at the
+ beginning of a line. '''
+
return self._writeb(_EOF), _EOF
-
- def sendintr(self):
- '''This sends a SIGINT to the child. It does not require
- the SIGINT to be the first character on a line. '''
-
+
+ def sendintr(self):
+ '''This sends a SIGINT to the child. It does not require
+ the SIGINT to be the first character on a line. '''
+
return self._writeb(_INTR), _INTR
-
- def eof(self):
- '''This returns True if the EOF exception was ever raised.
- '''
-
- return self.flag_eof
-
- def terminate(self, force=False):
- '''This forces a child process to terminate. It starts nicely with
- SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
- returns True if the child was terminated. This returns False if the
- child could not be terminated. '''
-
- if not self.isalive():
- return True
- try:
- self.kill(signal.SIGHUP)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- self.kill(signal.SIGCONT)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- self.kill(signal.SIGINT)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- if force:
- self.kill(signal.SIGKILL)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- else:
- return False
- return False
- except OSError:
- # I think there are kernel timing issues that sometimes cause
- # this to happen. I think isalive() reports True, but the
- # process is dead to the kernel.
- # Make one last attempt to see if the kernel is up to date.
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- else:
- return False
-
- def wait(self):
- '''This waits until the child exits. This is a blocking call. This will
- not read any data from the child, so this will block forever if the
- child has unread output and has terminated. In other words, the child
- may have printed output then called exit(), but, the child is
- technically still alive until its output is read by the parent. '''
-
- if self.isalive():
- pid, status = os.waitpid(self.pid, 0)
- else:
- return self.exitstatus
- self.exitstatus = os.WEXITSTATUS(status)
- if os.WIFEXITED(status):
- self.status = status
- self.exitstatus = os.WEXITSTATUS(status)
- self.signalstatus = None
- self.terminated = True
- elif os.WIFSIGNALED(status):
- self.status = status
- self.exitstatus = None
- self.signalstatus = os.WTERMSIG(status)
- self.terminated = True
- elif os.WIFSTOPPED(status): # pragma: no cover
- # You can't call wait() on a child process in the stopped state.
- raise PtyProcessError('Called wait() on a stopped child ' +
- 'process. This is not supported. Is some other ' +
- 'process attempting job control with our child pid?')
- return self.exitstatus
-
- def isalive(self):
- '''This tests if the child process is running or not. This is
- non-blocking. If the child was terminated then this will read the
- exitstatus or signalstatus of the child. This returns True if the child
- process appears to be running or False if not. It can take literally
- SECONDS for Solaris to return the right status. '''
-
- if self.terminated:
- return False
-
- if self.flag_eof:
- # This is for Linux, which requires the blocking form
- # of waitpid to get the status of a defunct process.
- # This is super-lame. The flag_eof would have been set
- # in read_nonblocking(), so this should be safe.
- waitpid_options = 0
- else:
- waitpid_options = os.WNOHANG
-
- try:
- pid, status = os.waitpid(self.pid, waitpid_options)
- except OSError as e:
- # No child processes
- if e.errno == errno.ECHILD:
- raise PtyProcessError('isalive() encountered condition ' +
- 'where "terminated" is 0, but there was no child ' +
- 'process. Did someone else call waitpid() ' +
- 'on our process?')
- else:
- raise
-
- # I have to do this twice for Solaris.
- # I can't even believe that I figured this out...
- # If waitpid() returns 0 it means that no child process
- # wishes to report, and the value of status is undefined.
- if pid == 0:
- try:
- ### os.WNOHANG) # Solaris!
- pid, status = os.waitpid(self.pid, waitpid_options)
- except OSError as e: # pragma: no cover
- # This should never happen...
- if e.errno == errno.ECHILD:
- raise PtyProcessError('isalive() encountered condition ' +
- 'that should never happen. There was no child ' +
- 'process. Did someone else call waitpid() ' +
- 'on our process?')
- else:
- raise
-
- # If pid is still 0 after two calls to waitpid() then the process
- # really is alive. This seems to work on all platforms, except for
- # Irix which seems to require a blocking call on waitpid or select,
- # so I let read_nonblocking take care of this situation
- # (unfortunately, this requires waiting through the timeout).
- if pid == 0:
- return True
-
- if pid == 0:
- return True
-
- if os.WIFEXITED(status):
- self.status = status
- self.exitstatus = os.WEXITSTATUS(status)
- self.signalstatus = None
- self.terminated = True
- elif os.WIFSIGNALED(status):
- self.status = status
- self.exitstatus = None
- self.signalstatus = os.WTERMSIG(status)
- self.terminated = True
- elif os.WIFSTOPPED(status):
- raise PtyProcessError('isalive() encountered condition ' +
- 'where child process is stopped. This is not ' +
- 'supported. Is some other process attempting ' +
- 'job control with our child pid?')
- return False
-
- def kill(self, sig):
- """Send the given signal to the child application.
-
- In keeping with UNIX tradition it has a misleading name. It does not
- necessarily kill the child unless you send the right signal. See the
- :mod:`signal` module for constants representing signal numbers.
- """
-
- # Same as os.kill, but the pid is given for you.
- if self.isalive():
- os.kill(self.pid, sig)
-
- def getwinsize(self):
- """Return the window size of the pseudoterminal as a tuple (rows, cols).
- """
- TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912)
- s = struct.pack('HHHH', 0, 0, 0, 0)
- x = fcntl.ioctl(self.fd, TIOCGWINSZ, s)
- return struct.unpack('HHHH', x)[0:2]
-
- def setwinsize(self, rows, cols):
- """Set the terminal window size of the child tty.
-
- This will cause a SIGWINCH signal to be sent to the child. This does not
- change the physical window size. It changes the size reported to
- TTY-aware applications like vi or curses -- applications that respond to
- the SIGWINCH signal.
- """
- return _setwinsize(self.fd, rows, cols)
-
-
-class PtyProcessUnicode(PtyProcess):
- """Unicode wrapper around a process running in a pseudoterminal.
-
- This class exposes a similar interface to :class:`PtyProcess`, but its read
- methods return unicode, and its :meth:`write` accepts unicode.
- """
- if PY3:
- string_type = str
- else:
- string_type = unicode # analysis:ignore
-
- def __init__(self, pid, fd, encoding='utf-8', codec_errors='strict'):
- super(PtyProcessUnicode, self).__init__(pid, fd)
- self.encoding = encoding
- self.codec_errors = codec_errors
- self.decoder = codecs.getincrementaldecoder(encoding)(errors=codec_errors)
-
- def read(self, size=1024):
- """Read at most ``size`` bytes from the pty, return them as unicode.
-
- Can block if there is nothing to read. Raises :exc:`EOFError` if the
- terminal was closed.
-
- The size argument still refers to bytes, not unicode code points.
- """
- b = super(PtyProcessUnicode, self).read(size)
- return self.decoder.decode(b, final=False)
-
- def readline(self):
- """Read one line from the pseudoterminal, and return it as unicode.
-
- Can block if there is nothing to read. Raises :exc:`EOFError` if the
- terminal was closed.
- """
- b = super(PtyProcessUnicode, self).readline()
- return self.decoder.decode(b, final=False)
-
- def write(self, s):
- """Write the unicode string ``s`` to the pseudoterminal.
-
- Returns the number of bytes written.
- """
- b = s.encode(self.encoding)
- return super(PtyProcessUnicode, self).write(b)
+
+ def eof(self):
+ '''This returns True if the EOF exception was ever raised.
+ '''
+
+ return self.flag_eof
+
+ def terminate(self, force=False):
+ '''This forces a child process to terminate. It starts nicely with
+ SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
+ returns True if the child was terminated. This returns False if the
+ child could not be terminated. '''
+
+ if not self.isalive():
+ return True
+ try:
+ self.kill(signal.SIGHUP)
+ time.sleep(self.delayafterterminate)
+ if not self.isalive():
+ return True
+ self.kill(signal.SIGCONT)
+ time.sleep(self.delayafterterminate)
+ if not self.isalive():
+ return True
+ self.kill(signal.SIGINT)
+ time.sleep(self.delayafterterminate)
+ if not self.isalive():
+ return True
+ if force:
+ self.kill(signal.SIGKILL)
+ time.sleep(self.delayafterterminate)
+ if not self.isalive():
+ return True
+ else:
+ return False
+ return False
+ except OSError:
+ # I think there are kernel timing issues that sometimes cause
+ # this to happen. I think isalive() reports True, but the
+ # process is dead to the kernel.
+ # Make one last attempt to see if the kernel is up to date.
+ time.sleep(self.delayafterterminate)
+ if not self.isalive():
+ return True
+ else:
+ return False
+
+ def wait(self):
+ '''This waits until the child exits. This is a blocking call. This will
+ not read any data from the child, so this will block forever if the
+ child has unread output and has terminated. In other words, the child
+ may have printed output then called exit(), but, the child is
+ technically still alive until its output is read by the parent. '''
+
+ if self.isalive():
+ pid, status = os.waitpid(self.pid, 0)
+ else:
+ return self.exitstatus
+ self.exitstatus = os.WEXITSTATUS(status)
+ if os.WIFEXITED(status):
+ self.status = status
+ self.exitstatus = os.WEXITSTATUS(status)
+ self.signalstatus = None
+ self.terminated = True
+ elif os.WIFSIGNALED(status):
+ self.status = status
+ self.exitstatus = None
+ self.signalstatus = os.WTERMSIG(status)
+ self.terminated = True
+ elif os.WIFSTOPPED(status): # pragma: no cover
+ # You can't call wait() on a child process in the stopped state.
+ raise PtyProcessError('Called wait() on a stopped child ' +
+ 'process. This is not supported. Is some other ' +
+ 'process attempting job control with our child pid?')
+ return self.exitstatus
+
+ def isalive(self):
+ '''This tests if the child process is running or not. This is
+ non-blocking. If the child was terminated then this will read the
+ exitstatus or signalstatus of the child. This returns True if the child
+ process appears to be running or False if not. It can take literally
+ SECONDS for Solaris to return the right status. '''
+
+ if self.terminated:
+ return False
+
+ if self.flag_eof:
+ # This is for Linux, which requires the blocking form
+ # of waitpid to get the status of a defunct process.
+ # This is super-lame. The flag_eof would have been set
+ # in read_nonblocking(), so this should be safe.
+ waitpid_options = 0
+ else:
+ waitpid_options = os.WNOHANG
+
+ try:
+ pid, status = os.waitpid(self.pid, waitpid_options)
+ except OSError as e:
+ # No child processes
+ if e.errno == errno.ECHILD:
+ raise PtyProcessError('isalive() encountered condition ' +
+ 'where "terminated" is 0, but there was no child ' +
+ 'process. Did someone else call waitpid() ' +
+ 'on our process?')
+ else:
+ raise
+
+ # I have to do this twice for Solaris.
+ # I can't even believe that I figured this out...
+ # If waitpid() returns 0 it means that no child process
+ # wishes to report, and the value of status is undefined.
+ if pid == 0:
+ try:
+ ### os.WNOHANG) # Solaris!
+ pid, status = os.waitpid(self.pid, waitpid_options)
+ except OSError as e: # pragma: no cover
+ # This should never happen...
+ if e.errno == errno.ECHILD:
+ raise PtyProcessError('isalive() encountered condition ' +
+ 'that should never happen. There was no child ' +
+ 'process. Did someone else call waitpid() ' +
+ 'on our process?')
+ else:
+ raise
+
+ # If pid is still 0 after two calls to waitpid() then the process
+ # really is alive. This seems to work on all platforms, except for
+ # Irix which seems to require a blocking call on waitpid or select,
+ # so I let read_nonblocking take care of this situation
+ # (unfortunately, this requires waiting through the timeout).
+ if pid == 0:
+ return True
+
+ if pid == 0:
+ return True
+
+ if os.WIFEXITED(status):
+ self.status = status
+ self.exitstatus = os.WEXITSTATUS(status)
+ self.signalstatus = None
+ self.terminated = True
+ elif os.WIFSIGNALED(status):
+ self.status = status
+ self.exitstatus = None
+ self.signalstatus = os.WTERMSIG(status)
+ self.terminated = True
+ elif os.WIFSTOPPED(status):
+ raise PtyProcessError('isalive() encountered condition ' +
+ 'where child process is stopped. This is not ' +
+ 'supported. Is some other process attempting ' +
+ 'job control with our child pid?')
+ return False
+
+ def kill(self, sig):
+ """Send the given signal to the child application.
+
+ In keeping with UNIX tradition it has a misleading name. It does not
+ necessarily kill the child unless you send the right signal. See the
+ :mod:`signal` module for constants representing signal numbers.
+ """
+
+ # Same as os.kill, but the pid is given for you.
+ if self.isalive():
+ os.kill(self.pid, sig)
+
+ def getwinsize(self):
+ """Return the window size of the pseudoterminal as a tuple (rows, cols).
+ """
+ TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912)
+ s = struct.pack('HHHH', 0, 0, 0, 0)
+ x = fcntl.ioctl(self.fd, TIOCGWINSZ, s)
+ return struct.unpack('HHHH', x)[0:2]
+
+ def setwinsize(self, rows, cols):
+ """Set the terminal window size of the child tty.
+
+ This will cause a SIGWINCH signal to be sent to the child. This does not
+ change the physical window size. It changes the size reported to
+ TTY-aware applications like vi or curses -- applications that respond to
+ the SIGWINCH signal.
+ """
+ return _setwinsize(self.fd, rows, cols)
+
+
+class PtyProcessUnicode(PtyProcess):
+ """Unicode wrapper around a process running in a pseudoterminal.
+
+ This class exposes a similar interface to :class:`PtyProcess`, but its read
+ methods return unicode, and its :meth:`write` accepts unicode.
+ """
+ if PY3:
+ string_type = str
+ else:
+ string_type = unicode # analysis:ignore
+
+ def __init__(self, pid, fd, encoding='utf-8', codec_errors='strict'):
+ super(PtyProcessUnicode, self).__init__(pid, fd)
+ self.encoding = encoding
+ self.codec_errors = codec_errors
+ self.decoder = codecs.getincrementaldecoder(encoding)(errors=codec_errors)
+
+ def read(self, size=1024):
+ """Read at most ``size`` bytes from the pty, return them as unicode.
+
+ Can block if there is nothing to read. Raises :exc:`EOFError` if the
+ terminal was closed.
+
+ The size argument still refers to bytes, not unicode code points.
+ """
+ b = super(PtyProcessUnicode, self).read(size)
+ return self.decoder.decode(b, final=False)
+
+ def readline(self):
+ """Read one line from the pseudoterminal, and return it as unicode.
+
+ Can block if there is nothing to read. Raises :exc:`EOFError` if the
+ terminal was closed.
+ """
+ b = super(PtyProcessUnicode, self).readline()
+ return self.decoder.decode(b, final=False)
+
+ def write(self, s):
+ """Write the unicode string ``s`` to the pseudoterminal.
+
+ Returns the number of bytes written.
+ """
+ b = s.encode(self.encoding)
+ return super(PtyProcessUnicode, self).write(b)
diff --git a/contrib/python/ptyprocess/ptyprocess/util.py b/contrib/python/ptyprocess/ptyprocess/util.py
index 9bd6c4c34c..aadbd62c80 100644
--- a/contrib/python/ptyprocess/ptyprocess/util.py
+++ b/contrib/python/ptyprocess/ptyprocess/util.py
@@ -1,69 +1,69 @@
-try:
- from shutil import which # Python >= 3.3
-except ImportError:
- import os, sys
-
- # This is copied from Python 3.4.1
- def which(cmd, mode=os.F_OK | os.X_OK, path=None):
- """Given a command, mode, and a PATH string, return the path which
- conforms to the given mode on the PATH, or None if there is no such
- file.
-
- `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
- of os.environ.get("PATH"), or can be overridden with a custom search
- path.
-
- """
- # Check that a given file can be accessed with the correct mode.
- # Additionally check that `file` is not a directory, as on Windows
- # directories pass the os.access check.
- def _access_check(fn, mode):
- return (os.path.exists(fn) and os.access(fn, mode)
- and not os.path.isdir(fn))
-
- # If we're given a path with a directory part, look it up directly rather
- # than referring to PATH directories. This includes checking relative to the
- # current directory, e.g. ./script
- if os.path.dirname(cmd):
- if _access_check(cmd, mode):
- return cmd
- return None
-
- if path is None:
- path = os.environ.get("PATH", os.defpath)
- if not path:
- return None
- path = path.split(os.pathsep)
-
- if sys.platform == "win32":
- # The current directory takes precedence on Windows.
- if not os.curdir in path:
- path.insert(0, os.curdir)
-
- # PATHEXT is necessary to check on Windows.
- pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
- # See if the given file matches any of the expected path extensions.
- # This will allow us to short circuit when given "python.exe".
- # If it does match, only test that one, otherwise we have to try
- # others.
- if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
- files = [cmd]
- else:
- files = [cmd + ext for ext in pathext]
- else:
- # On other platforms you don't have things like PATHEXT to tell you
- # what file suffixes are executable, so just pass on cmd as-is.
- files = [cmd]
-
- seen = set()
- for dir in path:
- normdir = os.path.normcase(dir)
- if not normdir in seen:
- seen.add(normdir)
- for thefile in files:
- name = os.path.join(dir, thefile)
- if _access_check(name, mode):
- return name
+try:
+ from shutil import which # Python >= 3.3
+except ImportError:
+ import os, sys
+
+ # This is copied from Python 3.4.1
+ def which(cmd, mode=os.F_OK | os.X_OK, path=None):
+ """Given a command, mode, and a PATH string, return the path which
+ conforms to the given mode on the PATH, or None if there is no such
+ file.
+
+ `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
+ of os.environ.get("PATH"), or can be overridden with a custom search
+ path.
+
+ """
+ # Check that a given file can be accessed with the correct mode.
+ # Additionally check that `file` is not a directory, as on Windows
+ # directories pass the os.access check.
+ def _access_check(fn, mode):
+ return (os.path.exists(fn) and os.access(fn, mode)
+ and not os.path.isdir(fn))
+
+ # If we're given a path with a directory part, look it up directly rather
+ # than referring to PATH directories. This includes checking relative to the
+ # current directory, e.g. ./script
+ if os.path.dirname(cmd):
+ if _access_check(cmd, mode):
+ return cmd
+ return None
+
+ if path is None:
+ path = os.environ.get("PATH", os.defpath)
+ if not path:
+ return None
+ path = path.split(os.pathsep)
+
+ if sys.platform == "win32":
+ # The current directory takes precedence on Windows.
+ if not os.curdir in path:
+ path.insert(0, os.curdir)
+
+ # PATHEXT is necessary to check on Windows.
+ pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
+ # See if the given file matches any of the expected path extensions.
+ # This will allow us to short circuit when given "python.exe".
+ # If it does match, only test that one, otherwise we have to try
+ # others.
+ if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
+ files = [cmd]
+ else:
+ files = [cmd + ext for ext in pathext]
+ else:
+ # On other platforms you don't have things like PATHEXT to tell you
+ # what file suffixes are executable, so just pass on cmd as-is.
+ files = [cmd]
+
+ seen = set()
+ for dir in path:
+ normdir = os.path.normcase(dir)
+ if not normdir in seen:
+ seen.add(normdir)
+ for thefile in files:
+ name = os.path.join(dir, thefile)
+ if _access_check(name, mode):
+ return name
return None
diff --git a/contrib/python/ptyprocess/ya.make b/contrib/python/ptyprocess/ya.make
index b80a63187e..5e780214f2 100644
--- a/contrib/python/ptyprocess/ya.make
+++ b/contrib/python/ptyprocess/ya.make
@@ -1,7 +1,7 @@
# Generated by devtools/yamaker (pypi).
PY23_LIBRARY()
-
+
OWNER(borman g:python-contrib)
VERSION(0.7.0)
@@ -10,18 +10,18 @@ LICENSE(ISC)
NO_LINT()
-PY_SRCS(
- TOP_LEVEL
- ptyprocess/__init__.py
- ptyprocess/_fork_pty.py
- ptyprocess/ptyprocess.py
- ptyprocess/util.py
-)
-
+PY_SRCS(
+ TOP_LEVEL
+ ptyprocess/__init__.py
+ ptyprocess/_fork_pty.py
+ ptyprocess/ptyprocess.py
+ ptyprocess/util.py
+)
+
RESOURCE_FILES(
PREFIX contrib/python/ptyprocess/
.dist-info/METADATA
.dist-info/top_level.txt
)
-END()
+END()
diff --git a/contrib/python/py/ya.make b/contrib/python/py/ya.make
index 6a1d18e5fe..cc86cb7fa9 100644
--- a/contrib/python/py/ya.make
+++ b/contrib/python/py/ya.make
@@ -1,13 +1,13 @@
# Generated by devtools/yamaker (pypi).
PY23_LIBRARY()
-
+
OWNER(g:python-contrib)
VERSION(1.11.0)
LICENSE(MIT)
-
+
NO_LINT()
NO_CHECK_IMPORTS(
@@ -15,53 +15,53 @@ NO_CHECK_IMPORTS(
py._code._assertionold
)
-PY_SRCS(
- TOP_LEVEL
+PY_SRCS(
+ TOP_LEVEL
py/__init__.py
py/__init__.pyi
py/__metainfo.py
- py/_builtin.py
+ py/_builtin.py
py/_code/__init__.py
- py/_code/_assertionnew.py
- py/_code/_assertionold.py
+ py/_code/_assertionnew.py
+ py/_code/_assertionold.py
py/_code/_py2traceback.py
- py/_code/assertion.py
- py/_code/code.py
- py/_code/source.py
- py/_error.py
+ py/_code/assertion.py
+ py/_code/code.py
+ py/_code/source.py
+ py/_error.py
py/_io/__init__.py
py/_io/capture.py
- py/_io/saferepr.py
- py/_io/terminalwriter.py
- py/_log/__init__.py
- py/_log/log.py
- py/_log/warning.py
+ py/_io/saferepr.py
+ py/_io/terminalwriter.py
+ py/_log/__init__.py
+ py/_log/log.py
+ py/_log/warning.py
py/_path/__init__.py
- py/_path/cacheutil.py
- py/_path/common.py
- py/_path/local.py
- py/_path/svnurl.py
- py/_path/svnwc.py
+ py/_path/cacheutil.py
+ py/_path/common.py
+ py/_path/local.py
+ py/_path/svnurl.py
+ py/_path/svnwc.py
py/_process/__init__.py
- py/_process/cmdexec.py
- py/_process/forkedfunc.py
- py/_process/killproc.py
- py/_std.py
+ py/_process/cmdexec.py
+ py/_process/forkedfunc.py
+ py/_process/killproc.py
+ py/_std.py
py/_vendored_packages/__init__.py
py/_vendored_packages/apipkg/__init__.py
py/_vendored_packages/apipkg/version.py
py/_vendored_packages/iniconfig/__init__.py
py/_vendored_packages/iniconfig/__init__.pyi
py/_version.py
- py/_xmlgen.py
+ py/_xmlgen.py
py/error.pyi
py/iniconfig.pyi
py/io.pyi
py/path.pyi
py/test.py
py/xml.pyi
-)
-
+)
+
RESOURCE_FILES(
PREFIX contrib/python/py/
.dist-info/METADATA
@@ -70,4 +70,4 @@ RESOURCE_FILES(
py/py.typed
)
-END()
+END()
diff --git a/contrib/python/pyparsing/py2/ya.make b/contrib/python/pyparsing/py2/ya.make
index f625e1661d..e4812f97b5 100644
--- a/contrib/python/pyparsing/py2/ya.make
+++ b/contrib/python/pyparsing/py2/ya.make
@@ -1,7 +1,7 @@
# Generated by devtools/yamaker (pypi).
PY23_LIBRARY()
-
+
PROVIDES(pyparsing)
OWNER(borman orivej g:python-contrib)
@@ -12,15 +12,15 @@ LICENSE(MIT)
NO_LINT()
-PY_SRCS(
- TOP_LEVEL
- pyparsing.py
-)
-
+PY_SRCS(
+ TOP_LEVEL
+ pyparsing.py
+)
+
RESOURCE_FILES(
PREFIX contrib/python/pyparsing/py2/
.dist-info/METADATA
.dist-info/top_level.txt
)
-END()
+END()
diff --git a/contrib/python/pyparsing/py3/ya.make b/contrib/python/pyparsing/py3/ya.make
index 3a8c9d017b..333141d6c4 100644
--- a/contrib/python/pyparsing/py3/ya.make
+++ b/contrib/python/pyparsing/py3/ya.make
@@ -1,7 +1,7 @@
# Generated by devtools/yamaker (pypi).
PY3_LIBRARY()
-
+
PROVIDES(pyparsing)
OWNER(borman orivej g:python-contrib)
@@ -16,8 +16,8 @@ NO_CHECK_IMPORTS(
pyparsing.diagram.*
)
-PY_SRCS(
- TOP_LEVEL
+PY_SRCS(
+ TOP_LEVEL
pyparsing/__init__.py
pyparsing/actions.py
pyparsing/common.py
@@ -29,8 +29,8 @@ PY_SRCS(
pyparsing/testing.py
pyparsing/unicode.py
pyparsing/util.py
-)
-
+)
+
RESOURCE_FILES(
PREFIX contrib/python/pyparsing/py3/
.dist-info/METADATA
@@ -38,4 +38,4 @@ RESOURCE_FILES(
pyparsing/diagram/template.jinja2
)
-END()
+END()
diff --git a/contrib/python/pyparsing/ya.make b/contrib/python/pyparsing/ya.make
index db8aa4fbf6..66da70626d 100644
--- a/contrib/python/pyparsing/ya.make
+++ b/contrib/python/pyparsing/ya.make
@@ -1,5 +1,5 @@
PY23_LIBRARY()
-
+
LICENSE(Service-Py23-Proxy)
OWNER(g:python-contrib)
@@ -13,7 +13,7 @@ ENDIF()
NO_LINT()
END()
-
+
RECURSE(
py2
py3
diff --git a/contrib/python/setuptools/py2/pkg_resources/__init__.py b/contrib/python/setuptools/py2/pkg_resources/__init__.py
index 7f5c87e858..649a2ce651 100644
--- a/contrib/python/setuptools/py2/pkg_resources/__init__.py
+++ b/contrib/python/setuptools/py2/pkg_resources/__init__.py
@@ -1,81 +1,81 @@
# coding: utf-8
-"""
-Package resource API
---------------------
-
-A resource is a logical file contained within a package, or a logical
-subdirectory thereof. The package resource API expects resource names
-to have their path parts separated with ``/``, *not* whatever the local
-path separator is. Do not use os.path operations to manipulate resource
-names being passed into the API.
-
-The package resource API is designed to work with normal filesystem packages,
-.egg files, and unpacked .egg files. It can also work in a limited way with
-.zip files and with custom PEP 302 loaders that support the ``get_data()``
-method.
-"""
-
-from __future__ import absolute_import
-
-import sys
-import os
-import io
-import time
-import re
-import types
-import zipfile
-import zipimport
-import warnings
-import stat
-import functools
-import pkgutil
-import operator
-import platform
-import collections
-import plistlib
-import email.parser
+"""
+Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof. The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is. Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files. It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+"""
+
+from __future__ import absolute_import
+
+import sys
+import os
+import io
+import time
+import re
+import types
+import zipfile
+import zipimport
+import warnings
+import stat
+import functools
+import pkgutil
+import operator
+import platform
+import collections
+import plistlib
+import email.parser
import errno
-import tempfile
-import textwrap
+import tempfile
+import textwrap
import itertools
import inspect
import ntpath
import posixpath
-from pkgutil import get_importer
-
-try:
- import _imp
-except ImportError:
- # Python 3.2 compatibility
- import imp as _imp
-
+from pkgutil import get_importer
+
+try:
+ import _imp
+except ImportError:
+ # Python 3.2 compatibility
+ import imp as _imp
+
try:
FileExistsError
except NameError:
FileExistsError = OSError
-
+
from pkg_resources.extern import six
from pkg_resources.extern.six.moves import urllib, map, filter
-# capture these to bypass sandboxing
-from os import utime
-try:
- from os import mkdir, rename, unlink
- WRITE_SUPPORT = True
-except ImportError:
- # no write support, probably under GAE
- WRITE_SUPPORT = False
-
-from os import open as os_open
-from os.path import isdir, split
-
-try:
- import importlib.machinery as importlib_machinery
- # access attribute to force import under delayed import mechanisms.
- importlib_machinery.__name__
-except ImportError:
- importlib_machinery = None
-
+# capture these to bypass sandboxing
+from os import utime
+try:
+ from os import mkdir, rename, unlink
+ WRITE_SUPPORT = True
+except ImportError:
+ # no write support, probably under GAE
+ WRITE_SUPPORT = False
+
+from os import open as os_open
+from os.path import isdir, split
+
+try:
+ import importlib.machinery as importlib_machinery
+ # access attribute to force import under delayed import mechanisms.
+ importlib_machinery.__name__
+except ImportError:
+ importlib_machinery = None
+
from . import py31compat
from pkg_resources.extern import appdirs
from pkg_resources.extern import packaging
@@ -83,8 +83,8 @@ __import__('pkg_resources.extern.packaging.version')
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.requirements')
__import__('pkg_resources.extern.packaging.markers')
-
-
+
+
__metaclass__ = type
@@ -96,10 +96,10 @@ if six.PY2:
PermissionError = None
NotADirectoryError = None
-# declare some globals that will be defined later to
-# satisfy the linters.
-require = None
-working_set = None
+# declare some globals that will be defined later to
+# satisfy the linters.
+require = None
+working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
@@ -115,827 +115,827 @@ resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
-
-
-class PEP440Warning(RuntimeWarning):
- """
- Used when there is an issue with a version or specifier not complying with
- PEP 440.
- """
-
-
-def parse_version(v):
- try:
+
+
+class PEP440Warning(RuntimeWarning):
+ """
+ Used when there is an issue with a version or specifier not complying with
+ PEP 440.
+ """
+
+
+def parse_version(v):
+ try:
return packaging.version.Version(v)
- except packaging.version.InvalidVersion:
+ except packaging.version.InvalidVersion:
return packaging.version.LegacyVersion(v)
-
-
-_state_vars = {}
-
-
-def _declare_state(vartype, **kw):
- globals().update(kw)
- _state_vars.update(dict.fromkeys(kw, vartype))
-
-
-def __getstate__():
- state = {}
- g = globals()
- for k, v in _state_vars.items():
+
+
+_state_vars = {}
+
+
+def _declare_state(vartype, **kw):
+ globals().update(kw)
+ _state_vars.update(dict.fromkeys(kw, vartype))
+
+
+def __getstate__():
+ state = {}
+ g = globals()
+ for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
- return state
-
+ return state
+
-def __setstate__(state):
- g = globals()
- for k, v in state.items():
+def __setstate__(state):
+ g = globals()
+ for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
- return state
-
-
-def _sget_dict(val):
- return val.copy()
-
-
-def _sset_dict(key, ob, state):
- ob.clear()
- ob.update(state)
-
-
-def _sget_object(val):
- return val.__getstate__()
-
-
-def _sset_object(key, ob, state):
- ob.__setstate__(state)
-
-
-_sget_none = _sset_none = lambda *args: None
-
-
-def get_supported_platform():
- """Return this platform's maximum compatible version.
-
- distutils.util.get_platform() normally reports the minimum version
- of Mac OS X that would be required to *use* extensions produced by
- distutils. But what we want when checking compatibility is to know the
- version of Mac OS X that we are *running*. To allow usage of packages that
- explicitly require a newer version of Mac OS X, we must also know the
- current version of the OS.
-
- If this condition occurs for any other platform with a version in its
- platform strings, this function should be extended accordingly.
- """
- plat = get_build_platform()
- m = macosVersionString.match(plat)
- if m is not None and sys.platform == "darwin":
- try:
- plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
- except ValueError:
- # not Mac OS X
- pass
- return plat
-
-
-__all__ = [
- # Basic resource access and distribution/entry point discovery
+ return state
+
+
+def _sget_dict(val):
+ return val.copy()
+
+
+def _sset_dict(key, ob, state):
+ ob.clear()
+ ob.update(state)
+
+
+def _sget_object(val):
+ return val.__getstate__()
+
+
+def _sset_object(key, ob, state):
+ ob.__setstate__(state)
+
+
+_sget_none = _sset_none = lambda *args: None
+
+
+def get_supported_platform():
+ """Return this platform's maximum compatible version.
+
+ distutils.util.get_platform() normally reports the minimum version
+ of Mac OS X that would be required to *use* extensions produced by
+ distutils. But what we want when checking compatibility is to know the
+ version of Mac OS X that we are *running*. To allow usage of packages that
+ explicitly require a newer version of Mac OS X, we must also know the
+ current version of the OS.
+
+ If this condition occurs for any other platform with a version in its
+ platform strings, this function should be extended accordingly.
+ """
+ plat = get_build_platform()
+ m = macosVersionString.match(plat)
+ if m is not None and sys.platform == "darwin":
+ try:
+ plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
+ except ValueError:
+ # not Mac OS X
+ pass
+ return plat
+
+
+__all__ = [
+ # Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
- 'load_entry_point', 'get_entry_map', 'get_entry_info',
- 'iter_entry_points',
- 'resource_string', 'resource_stream', 'resource_filename',
- 'resource_listdir', 'resource_exists', 'resource_isdir',
-
- # Environmental control
- 'declare_namespace', 'working_set', 'add_activation_listener',
- 'find_distributions', 'set_extraction_path', 'cleanup_resources',
- 'get_default_cache',
-
- # Primary implementation classes
- 'Environment', 'WorkingSet', 'ResourceManager',
- 'Distribution', 'Requirement', 'EntryPoint',
-
- # Exceptions
- 'ResolutionError', 'VersionConflict', 'DistributionNotFound',
- 'UnknownExtra', 'ExtractionError',
-
- # Warnings
- 'PEP440Warning',
-
- # Parsing functions and string utilities
- 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
- 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
- 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
-
- # filesystem utilities
- 'ensure_directory', 'normalize_path',
-
- # Distribution "precedence" constants
- 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
-
- # "Provider" interfaces, implementations, and registration/lookup APIs
- 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
- 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
- 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
- 'register_finder', 'register_namespace_handler', 'register_loader_type',
- 'fixup_namespace_packages', 'get_importer',
-
+ 'load_entry_point', 'get_entry_map', 'get_entry_info',
+ 'iter_entry_points',
+ 'resource_string', 'resource_stream', 'resource_filename',
+ 'resource_listdir', 'resource_exists', 'resource_isdir',
+
+ # Environmental control
+ 'declare_namespace', 'working_set', 'add_activation_listener',
+ 'find_distributions', 'set_extraction_path', 'cleanup_resources',
+ 'get_default_cache',
+
+ # Primary implementation classes
+ 'Environment', 'WorkingSet', 'ResourceManager',
+ 'Distribution', 'Requirement', 'EntryPoint',
+
+ # Exceptions
+ 'ResolutionError', 'VersionConflict', 'DistributionNotFound',
+ 'UnknownExtra', 'ExtractionError',
+
+ # Warnings
+ 'PEP440Warning',
+
+ # Parsing functions and string utilities
+ 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
+ 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
+ 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
+
+ # filesystem utilities
+ 'ensure_directory', 'normalize_path',
+
+ # Distribution "precedence" constants
+ 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
+
+ # "Provider" interfaces, implementations, and registration/lookup APIs
+ 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
+ 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
+ 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
+ 'register_finder', 'register_namespace_handler', 'register_loader_type',
+ 'fixup_namespace_packages', 'get_importer',
+
# Warnings
'PkgResourcesDeprecationWarning',
- # Deprecated/backward compatibility only
- 'run_main', 'AvailableDistributions',
-]
-
+ # Deprecated/backward compatibility only
+ 'run_main', 'AvailableDistributions',
+]
+
-class ResolutionError(Exception):
- """Abstract base for dependency resolution errors"""
+class ResolutionError(Exception):
+ """Abstract base for dependency resolution errors"""
- def __repr__(self):
+ def __repr__(self):
return self.__class__.__name__ + repr(self.args)
-
-
-class VersionConflict(ResolutionError):
- """
- An already-installed version conflicts with the requested version.
-
- Should be initialized with the installed Distribution and the requested
- Requirement.
- """
-
- _template = "{self.dist} is installed but {self.req} is required"
-
- @property
- def dist(self):
- return self.args[0]
-
- @property
- def req(self):
- return self.args[1]
-
- def report(self):
- return self._template.format(**locals())
-
- def with_context(self, required_by):
- """
- If required_by is non-empty, return a version of self that is a
- ContextualVersionConflict.
- """
- if not required_by:
- return self
- args = self.args + (required_by,)
- return ContextualVersionConflict(*args)
-
-
-class ContextualVersionConflict(VersionConflict):
- """
- A VersionConflict that accepts a third parameter, the set of the
- requirements that required the installed Distribution.
- """
-
- _template = VersionConflict._template + ' by {self.required_by}'
-
- @property
- def required_by(self):
- return self.args[2]
-
-
-class DistributionNotFound(ResolutionError):
- """A requested distribution was not found"""
-
- _template = ("The '{self.req}' distribution was not found "
- "and is required by {self.requirers_str}")
-
- @property
- def req(self):
- return self.args[0]
-
- @property
- def requirers(self):
- return self.args[1]
-
- @property
- def requirers_str(self):
- if not self.requirers:
- return 'the application'
- return ', '.join(self.requirers)
-
- def report(self):
- return self._template.format(**locals())
-
- def __str__(self):
- return self.report()
-
-
-class UnknownExtra(ResolutionError):
- """Distribution doesn't have an "extra feature" of the given name"""
-
-
-_provider_factories = {}
-
+
+
+class VersionConflict(ResolutionError):
+ """
+ An already-installed version conflicts with the requested version.
+
+ Should be initialized with the installed Distribution and the requested
+ Requirement.
+ """
+
+ _template = "{self.dist} is installed but {self.req} is required"
+
+ @property
+ def dist(self):
+ return self.args[0]
+
+ @property
+ def req(self):
+ return self.args[1]
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def with_context(self, required_by):
+ """
+ If required_by is non-empty, return a version of self that is a
+ ContextualVersionConflict.
+ """
+ if not required_by:
+ return self
+ args = self.args + (required_by,)
+ return ContextualVersionConflict(*args)
+
+
+class ContextualVersionConflict(VersionConflict):
+ """
+ A VersionConflict that accepts a third parameter, the set of the
+ requirements that required the installed Distribution.
+ """
+
+ _template = VersionConflict._template + ' by {self.required_by}'
+
+ @property
+ def required_by(self):
+ return self.args[2]
+
+
+class DistributionNotFound(ResolutionError):
+ """A requested distribution was not found"""
+
+ _template = ("The '{self.req}' distribution was not found "
+ "and is required by {self.requirers_str}")
+
+ @property
+ def req(self):
+ return self.args[0]
+
+ @property
+ def requirers(self):
+ return self.args[1]
+
+ @property
+ def requirers_str(self):
+ if not self.requirers:
+ return 'the application'
+ return ', '.join(self.requirers)
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def __str__(self):
+ return self.report()
+
+
+class UnknownExtra(ResolutionError):
+ """Distribution doesn't have an "extra feature" of the given name"""
+
+
+_provider_factories = {}
+
PY_MAJOR = '{}.{}'.format(*sys.version_info)
-EGG_DIST = 3
-BINARY_DIST = 2
-SOURCE_DIST = 1
-CHECKOUT_DIST = 0
-DEVELOP_DIST = -1
-
-
-def register_loader_type(loader_type, provider_factory):
- """Register `provider_factory` to make providers for `loader_type`
-
- `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
- and `provider_factory` is a function that, passed a *module* object,
- returns an ``IResourceProvider`` for that module.
- """
- _provider_factories[loader_type] = provider_factory
-
-
-def get_provider(moduleOrReq):
- """Return an IResourceProvider for the named module or requirement"""
- if isinstance(moduleOrReq, Requirement):
- return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
- try:
- module = sys.modules[moduleOrReq]
- except KeyError:
- __import__(moduleOrReq)
- module = sys.modules[moduleOrReq]
- loader = getattr(module, '__loader__', None)
- return _find_adapter(_provider_factories, loader)(module)
-
-
-def _macosx_vers(_cache=[]):
- if not _cache:
- version = platform.mac_ver()[0]
- # fallback for MacPorts
- if version == '':
- plist = '/System/Library/CoreServices/SystemVersion.plist'
- if os.path.exists(plist):
- if hasattr(plistlib, 'readPlist'):
- plist_content = plistlib.readPlist(plist)
- if 'ProductVersion' in plist_content:
- version = plist_content['ProductVersion']
-
- _cache.append(version.split('.'))
- return _cache[0]
-
-
-def _macosx_arch(machine):
- return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
-
-
-def get_build_platform():
- """Return this platform's string for platform-specific distributions
-
- XXX Currently this is the same as ``distutils.util.get_platform()``, but it
- needs some hacks for Linux and Mac OS X.
- """
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+
+def register_loader_type(loader_type, provider_factory):
+ """Register `provider_factory` to make providers for `loader_type`
+
+ `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+ and `provider_factory` is a function that, passed a *module* object,
+ returns an ``IResourceProvider`` for that module.
+ """
+ _provider_factories[loader_type] = provider_factory
+
+
+def get_provider(moduleOrReq):
+ """Return an IResourceProvider for the named module or requirement"""
+ if isinstance(moduleOrReq, Requirement):
+ return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+ try:
+ module = sys.modules[moduleOrReq]
+ except KeyError:
+ __import__(moduleOrReq)
+ module = sys.modules[moduleOrReq]
+ loader = getattr(module, '__loader__', None)
+ return _find_adapter(_provider_factories, loader)(module)
+
+
+def _macosx_vers(_cache=[]):
+ if not _cache:
+ version = platform.mac_ver()[0]
+ # fallback for MacPorts
+ if version == '':
+ plist = '/System/Library/CoreServices/SystemVersion.plist'
+ if os.path.exists(plist):
+ if hasattr(plistlib, 'readPlist'):
+ plist_content = plistlib.readPlist(plist)
+ if 'ProductVersion' in plist_content:
+ version = plist_content['ProductVersion']
+
+ _cache.append(version.split('.'))
+ return _cache[0]
+
+
+def _macosx_arch(machine):
+ return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
+
+
+def get_build_platform():
+ """Return this platform's string for platform-specific distributions
+
+ XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+ needs some hacks for Linux and Mac OS X.
+ """
from sysconfig import get_platform
-
- plat = get_platform()
- if sys.platform == "darwin" and not plat.startswith('macosx-'):
- try:
- version = _macosx_vers()
- machine = os.uname()[4].replace(" ", "_")
+
+ plat = get_platform()
+ if sys.platform == "darwin" and not plat.startswith('macosx-'):
+ try:
+ version = _macosx_vers()
+ machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]), int(version[1]),
_macosx_arch(machine),
)
- except ValueError:
- # if someone is running a non-Mac darwin system, this will fall
- # through to the default implementation
- pass
- return plat
-
-
-macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
-darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
-# XXX backward compat
-get_platform = get_build_platform
-
-
-def compatible_platforms(provided, required):
- """Can code for the `provided` platform run on the `required` platform?
-
- Returns true if either platform is ``None``, or the platforms are equal.
-
- XXX Needs compatibility checks for Linux and other unixy OSes.
- """
+ except ValueError:
+ # if someone is running a non-Mac darwin system, this will fall
+ # through to the default implementation
+ pass
+ return plat
+
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+# XXX backward compat
+get_platform = get_build_platform
+
+
+def compatible_platforms(provided, required):
+ """Can code for the `provided` platform run on the `required` platform?
+
+ Returns true if either platform is ``None``, or the platforms are equal.
+
+ XXX Needs compatibility checks for Linux and other unixy OSes.
+ """
if provided is None or required is None or provided == required:
- # easy case
- return True
-
- # Mac OS X special cases
- reqMac = macosVersionString.match(required)
- if reqMac:
- provMac = macosVersionString.match(provided)
-
- # is this a Mac package?
- if not provMac:
- # this is backwards compatibility for packages built before
- # setuptools 0.6. All packages built after this point will
- # use the new macosx designation.
- provDarwin = darwinVersionString.match(provided)
- if provDarwin:
- dversion = int(provDarwin.group(1))
- macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
- if dversion == 7 and macosversion >= "10.3" or \
- dversion == 8 and macosversion >= "10.4":
- return True
- # egg isn't macosx or legacy darwin
- return False
-
- # are they the same major version and machine type?
- if provMac.group(1) != reqMac.group(1) or \
- provMac.group(3) != reqMac.group(3):
- return False
-
- # is the required OS major update >= the provided one?
- if int(provMac.group(2)) > int(reqMac.group(2)):
- return False
-
- return True
-
- # XXX Linux and other platforms' special cases should go here
- return False
-
-
-def run_script(dist_spec, script_name):
- """Locate distribution `dist_spec` and run its `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- require(dist_spec)[0].run_script(script_name, ns)
-
-
-# backward compatibility
-run_main = run_script
-
-
-def get_distribution(dist):
- """Return a current distribution object for a Requirement or string"""
- if isinstance(dist, six.string_types):
- dist = Requirement.parse(dist)
- if isinstance(dist, Requirement):
- dist = get_provider(dist)
- if not isinstance(dist, Distribution):
- raise TypeError("Expected string, Requirement, or Distribution", dist)
- return dist
-
-
-def load_entry_point(dist, group, name):
- """Return `name` entry point of `group` for `dist` or raise ImportError"""
- return get_distribution(dist).load_entry_point(group, name)
-
-
-def get_entry_map(dist, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- return get_distribution(dist).get_entry_map(group)
-
-
-def get_entry_info(dist, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return get_distribution(dist).get_entry_info(group, name)
-
-
-class IMetadataProvider:
- def has_metadata(name):
- """Does the package's distribution contain the named metadata?"""
-
- def get_metadata(name):
- """The named metadata resource as a string"""
-
- def get_metadata_lines(name):
- """Yield named metadata resource as list of non-blank non-comment lines
-
- Leading and trailing whitespace is stripped from each line, and lines
- with ``#`` as the first non-blank character are omitted."""
-
- def metadata_isdir(name):
- """Is the named metadata a directory? (like ``os.path.isdir()``)"""
-
- def metadata_listdir(name):
- """List of metadata names in the directory (like ``os.listdir()``)"""
-
- def run_script(script_name, namespace):
- """Execute the named script in the supplied namespace dictionary"""
-
-
-class IResourceProvider(IMetadataProvider):
- """An object that provides access to package resources"""
-
- def get_resource_filename(manager, resource_name):
- """Return a true filesystem path for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_stream(manager, resource_name):
- """Return a readable file-like object for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_string(manager, resource_name):
- """Return a string containing the contents of `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def has_resource(resource_name):
- """Does the package contain the named resource?"""
-
- def resource_isdir(resource_name):
- """Is the named resource a directory? (like ``os.path.isdir()``)"""
-
- def resource_listdir(resource_name):
- """List of resource names in the directory (like ``os.listdir()``)"""
-
-
+ # easy case
+ return True
+
+ # Mac OS X special cases
+ reqMac = macosVersionString.match(required)
+ if reqMac:
+ provMac = macosVersionString.match(provided)
+
+ # is this a Mac package?
+ if not provMac:
+ # this is backwards compatibility for packages built before
+ # setuptools 0.6. All packages built after this point will
+ # use the new macosx designation.
+ provDarwin = darwinVersionString.match(provided)
+ if provDarwin:
+ dversion = int(provDarwin.group(1))
+ macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+ if dversion == 7 and macosversion >= "10.3" or \
+ dversion == 8 and macosversion >= "10.4":
+ return True
+ # egg isn't macosx or legacy darwin
+ return False
+
+ # are they the same major version and machine type?
+ if provMac.group(1) != reqMac.group(1) or \
+ provMac.group(3) != reqMac.group(3):
+ return False
+
+ # is the required OS major update >= the provided one?
+ if int(provMac.group(2)) > int(reqMac.group(2)):
+ return False
+
+ return True
+
+ # XXX Linux and other platforms' special cases should go here
+ return False
+
+
+def run_script(dist_spec, script_name):
+ """Locate distribution `dist_spec` and run its `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ require(dist_spec)[0].run_script(script_name, ns)
+
+
+# backward compatibility
+run_main = run_script
+
+
+def get_distribution(dist):
+ """Return a current distribution object for a Requirement or string"""
+ if isinstance(dist, six.string_types):
+ dist = Requirement.parse(dist)
+ if isinstance(dist, Requirement):
+ dist = get_provider(dist)
+ if not isinstance(dist, Distribution):
+ raise TypeError("Expected string, Requirement, or Distribution", dist)
+ return dist
+
+
+def load_entry_point(dist, group, name):
+ """Return `name` entry point of `group` for `dist` or raise ImportError"""
+ return get_distribution(dist).load_entry_point(group, name)
+
+
+def get_entry_map(dist, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ return get_distribution(dist).get_entry_map(group)
+
+
+def get_entry_info(dist, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return get_distribution(dist).get_entry_info(group, name)
+
+
+class IMetadataProvider:
+ def has_metadata(name):
+ """Does the package's distribution contain the named metadata?"""
+
+ def get_metadata(name):
+ """The named metadata resource as a string"""
+
+ def get_metadata_lines(name):
+ """Yield named metadata resource as list of non-blank non-comment lines
+
+ Leading and trailing whitespace is stripped from each line, and lines
+ with ``#`` as the first non-blank character are omitted."""
+
+ def metadata_isdir(name):
+ """Is the named metadata a directory? (like ``os.path.isdir()``)"""
+
+ def metadata_listdir(name):
+ """List of metadata names in the directory (like ``os.listdir()``)"""
+
+ def run_script(script_name, namespace):
+ """Execute the named script in the supplied namespace dictionary"""
+
+
+class IResourceProvider(IMetadataProvider):
+ """An object that provides access to package resources"""
+
+ def get_resource_filename(manager, resource_name):
+ """Return a true filesystem path for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_stream(manager, resource_name):
+ """Return a readable file-like object for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_string(manager, resource_name):
+ """Return a string containing the contents of `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def has_resource(resource_name):
+ """Does the package contain the named resource?"""
+
+ def resource_isdir(resource_name):
+ """Is the named resource a directory? (like ``os.path.isdir()``)"""
+
+ def resource_listdir(resource_name):
+ """List of resource names in the directory (like ``os.listdir()``)"""
+
+
class WorkingSet:
- """A collection of active distributions on sys.path (or a similar list)"""
-
- def __init__(self, entries=None):
- """Create working set from list of path entries (default=sys.path)"""
- self.entries = []
- self.entry_keys = {}
- self.by_key = {}
- self.callbacks = []
-
- if entries is None:
- entries = sys.path
-
- for entry in entries:
- self.add_entry(entry)
-
- @classmethod
- def _build_master(cls):
- """
- Prepare the master working set.
- """
- ws = cls()
- try:
- from __main__ import __requires__
- except ImportError:
- # The main program does not list any requirements
- return ws
-
- # ensure the requirements are met
- try:
- ws.require(__requires__)
- except VersionConflict:
- return cls._build_from_requirements(__requires__)
-
- return ws
-
- @classmethod
- def _build_from_requirements(cls, req_spec):
- """
- Build a working set from a requirement spec. Rewrites sys.path.
- """
- # try it without defaults already on sys.path
- # by starting with an empty path
- ws = cls([])
- reqs = parse_requirements(req_spec)
- dists = ws.resolve(reqs, Environment())
- for dist in dists:
- ws.add(dist)
-
- # add any missing entries from sys.path
- for entry in sys.path:
- if entry not in ws.entries:
- ws.add_entry(entry)
-
- # then copy back to sys.path
- sys.path[:] = ws.entries
- return ws
-
- def add_entry(self, entry):
- """Add a path item to ``.entries``, finding any distributions on it
-
- ``find_distributions(entry, True)`` is used to find distributions
- corresponding to the path entry, and they are added. `entry` is
- always appended to ``.entries``, even if it is already present.
- (This is because ``sys.path`` can contain the same value more than
- once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
- equal ``sys.path``.)
- """
- self.entry_keys.setdefault(entry, [])
- self.entries.append(entry)
- for dist in find_distributions(entry, True):
- self.add(dist, entry, False)
-
- def __contains__(self, dist):
- """True if `dist` is the active distribution for its project"""
- return self.by_key.get(dist.key) == dist
-
- def find(self, req):
- """Find a distribution matching requirement `req`
-
- If there is an active distribution for the requested project, this
- returns it as long as it meets the version requirement specified by
- `req`. But, if there is an active distribution for the project and it
- does *not* meet the `req` requirement, ``VersionConflict`` is raised.
- If there is no active distribution for the requested project, ``None``
- is returned.
- """
- dist = self.by_key.get(req.key)
- if dist is not None and dist not in req:
- # XXX add more info
- raise VersionConflict(dist, req)
- return dist
-
- def iter_entry_points(self, group, name=None):
- """Yield entry point objects from `group` matching `name`
-
- If `name` is None, yields all entry points in `group` from all
- distributions in the working set, otherwise only ones matching
- both `group` and `name` are yielded (in distribution order).
- """
+ """A collection of active distributions on sys.path (or a similar list)"""
+
+ def __init__(self, entries=None):
+ """Create working set from list of path entries (default=sys.path)"""
+ self.entries = []
+ self.entry_keys = {}
+ self.by_key = {}
+ self.callbacks = []
+
+ if entries is None:
+ entries = sys.path
+
+ for entry in entries:
+ self.add_entry(entry)
+
+ @classmethod
+ def _build_master(cls):
+ """
+ Prepare the master working set.
+ """
+ ws = cls()
+ try:
+ from __main__ import __requires__
+ except ImportError:
+ # The main program does not list any requirements
+ return ws
+
+ # ensure the requirements are met
+ try:
+ ws.require(__requires__)
+ except VersionConflict:
+ return cls._build_from_requirements(__requires__)
+
+ return ws
+
+ @classmethod
+ def _build_from_requirements(cls, req_spec):
+ """
+ Build a working set from a requirement spec. Rewrites sys.path.
+ """
+ # try it without defaults already on sys.path
+ # by starting with an empty path
+ ws = cls([])
+ reqs = parse_requirements(req_spec)
+ dists = ws.resolve(reqs, Environment())
+ for dist in dists:
+ ws.add(dist)
+
+ # add any missing entries from sys.path
+ for entry in sys.path:
+ if entry not in ws.entries:
+ ws.add_entry(entry)
+
+ # then copy back to sys.path
+ sys.path[:] = ws.entries
+ return ws
+
+ def add_entry(self, entry):
+ """Add a path item to ``.entries``, finding any distributions on it
+
+ ``find_distributions(entry, True)`` is used to find distributions
+ corresponding to the path entry, and they are added. `entry` is
+ always appended to ``.entries``, even if it is already present.
+ (This is because ``sys.path`` can contain the same value more than
+ once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+ equal ``sys.path``.)
+ """
+ self.entry_keys.setdefault(entry, [])
+ self.entries.append(entry)
+ for dist in find_distributions(entry, True):
+ self.add(dist, entry, False)
+
+ def __contains__(self, dist):
+ """True if `dist` is the active distribution for its project"""
+ return self.by_key.get(dist.key) == dist
+
+ def find(self, req):
+ """Find a distribution matching requirement `req`
+
+ If there is an active distribution for the requested project, this
+ returns it as long as it meets the version requirement specified by
+ `req`. But, if there is an active distribution for the project and it
+ does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+ If there is no active distribution for the requested project, ``None``
+ is returned.
+ """
+ dist = self.by_key.get(req.key)
+ if dist is not None and dist not in req:
+ # XXX add more info
+ raise VersionConflict(dist, req)
+ return dist
+
+ def iter_entry_points(self, group, name=None):
+ """Yield entry point objects from `group` matching `name`
+
+ If `name` is None, yields all entry points in `group` from all
+ distributions in the working set, otherwise only ones matching
+ both `group` and `name` are yielded (in distribution order).
+ """
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
-
- def run_script(self, requires, script_name):
- """Locate distribution for `requires` and run `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- self.require(requires)[0].run_script(script_name, ns)
-
- def __iter__(self):
- """Yield distributions for non-duplicate projects in the working set
-
- The yield order is the order in which the items' path entries were
- added to the working set.
- """
- seen = {}
- for item in self.entries:
- if item not in self.entry_keys:
- # workaround a cache issue
- continue
-
- for key in self.entry_keys[item]:
- if key not in seen:
+
+ def run_script(self, requires, script_name):
+ """Locate distribution for `requires` and run `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ self.require(requires)[0].run_script(script_name, ns)
+
+ def __iter__(self):
+ """Yield distributions for non-duplicate projects in the working set
+
+ The yield order is the order in which the items' path entries were
+ added to the working set.
+ """
+ seen = {}
+ for item in self.entries:
+ if item not in self.entry_keys:
+ # workaround a cache issue
+ continue
+
+ for key in self.entry_keys[item]:
+ if key not in seen:
seen[key] = 1
- yield self.by_key[key]
-
- def add(self, dist, entry=None, insert=True, replace=False):
- """Add `dist` to working set, associated with `entry`
-
- If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
- On exit from this routine, `entry` is added to the end of the working
- set's ``.entries`` (if it wasn't already present).
-
- `dist` is only added to the working set if it's for a project that
- doesn't already have a distribution in the set, unless `replace=True`.
- If it's added, any callbacks registered with the ``subscribe()`` method
- will be called.
- """
- if insert:
- dist.insert_on(self.entries, entry, replace=replace)
-
- if entry is None:
- entry = dist.location
+ yield self.by_key[key]
+
+ def add(self, dist, entry=None, insert=True, replace=False):
+ """Add `dist` to working set, associated with `entry`
+
+ If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+ On exit from this routine, `entry` is added to the end of the working
+ set's ``.entries`` (if it wasn't already present).
+
+ `dist` is only added to the working set if it's for a project that
+ doesn't already have a distribution in the set, unless `replace=True`.
+ If it's added, any callbacks registered with the ``subscribe()`` method
+ will be called.
+ """
+ if insert:
+ dist.insert_on(self.entries, entry, replace=replace)
+
+ if entry is None:
+ entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
- if not replace and dist.key in self.by_key:
- # ignore hidden distros
- return
-
- self.by_key[dist.key] = dist
- if dist.key not in keys:
- keys.append(dist.key)
- if dist.key not in keys2:
- keys2.append(dist.key)
- self._added_new(dist)
-
- def resolve(self, requirements, env=None, installer=None,
+ if not replace and dist.key in self.by_key:
+ # ignore hidden distros
+ return
+
+ self.by_key[dist.key] = dist
+ if dist.key not in keys:
+ keys.append(dist.key)
+ if dist.key not in keys2:
+ keys2.append(dist.key)
+ self._added_new(dist)
+
+ def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False, extras=None):
- """List all distributions needed to (recursively) meet `requirements`
-
- `requirements` must be a sequence of ``Requirement`` objects. `env`,
- if supplied, should be an ``Environment`` instance. If
- not supplied, it defaults to all distributions available within any
- entry or distribution in the working set. `installer`, if supplied,
- will be invoked with each requirement that cannot be met by an
- already-installed distribution; it should return a ``Distribution`` or
- ``None``.
-
+ """List all distributions needed to (recursively) meet `requirements`
+
+ `requirements` must be a sequence of ``Requirement`` objects. `env`,
+ if supplied, should be an ``Environment`` instance. If
+ not supplied, it defaults to all distributions available within any
+ entry or distribution in the working set. `installer`, if supplied,
+ will be invoked with each requirement that cannot be met by an
+ already-installed distribution; it should return a ``Distribution`` or
+ ``None``.
+
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
- any requirements are found on the path that have the correct name but
- the wrong version. Otherwise, if an `installer` is supplied it will be
- invoked to obtain the correct version of the requirement and activate
- it.
+ any requirements are found on the path that have the correct name but
+ the wrong version. Otherwise, if an `installer` is supplied it will be
+ invoked to obtain the correct version of the requirement and activate
+ it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
- """
-
- # set up the stack
- requirements = list(requirements)[::-1]
- # set of processed requirements
- processed = {}
- # key -> dist
- best = {}
- to_activate = []
-
+ """
+
+ # set up the stack
+ requirements = list(requirements)[::-1]
+ # set of processed requirements
+ processed = {}
+ # key -> dist
+ best = {}
+ to_activate = []
+
req_extras = _ReqExtras()
- # Mapping of requirement to set of distributions that required it;
- # useful for reporting info about conflicts.
- required_by = collections.defaultdict(set)
-
- while requirements:
- # process dependencies breadth-first
- req = requirements.pop(0)
- if req in processed:
- # Ignore cyclic or redundant dependencies
- continue
+ # Mapping of requirement to set of distributions that required it;
+ # useful for reporting info about conflicts.
+ required_by = collections.defaultdict(set)
+
+ while requirements:
+ # process dependencies breadth-first
+ req = requirements.pop(0)
+ if req in processed:
+ # Ignore cyclic or redundant dependencies
+ continue
if not req_extras.markers_pass(req, extras):
continue
- dist = best.get(req.key)
- if dist is None:
- # Find the best distribution and add it to the map
- dist = self.by_key.get(req.key)
- if dist is None or (dist not in req and replace_conflicting):
- ws = self
- if env is None:
- if dist is None:
- env = Environment(self.entries)
- else:
- # Use an empty environment and workingset to avoid
- # any further conflicts with the conflicting
- # distribution
- env = Environment([])
- ws = WorkingSet([])
+ dist = best.get(req.key)
+ if dist is None:
+ # Find the best distribution and add it to the map
+ dist = self.by_key.get(req.key)
+ if dist is None or (dist not in req and replace_conflicting):
+ ws = self
+ if env is None:
+ if dist is None:
+ env = Environment(self.entries)
+ else:
+ # Use an empty environment and workingset to avoid
+ # any further conflicts with the conflicting
+ # distribution
+ env = Environment([])
+ ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer,
replace_conflicting=replace_conflicting
)
- if dist is None:
- requirers = required_by.get(req, None)
- raise DistributionNotFound(req, requirers)
- to_activate.append(dist)
- if dist not in req:
- # Oops, the "best" so far conflicts with a dependency
- dependent_req = required_by[req]
- raise VersionConflict(dist, req).with_context(dependent_req)
-
- # push the new requirements onto the stack
- new_requirements = dist.requires(req.extras)[::-1]
- requirements.extend(new_requirements)
-
- # Register the new requirements needed by req
- for new_requirement in new_requirements:
- required_by[new_requirement].add(req.project_name)
+ if dist is None:
+ requirers = required_by.get(req, None)
+ raise DistributionNotFound(req, requirers)
+ to_activate.append(dist)
+ if dist not in req:
+ # Oops, the "best" so far conflicts with a dependency
+ dependent_req = required_by[req]
+ raise VersionConflict(dist, req).with_context(dependent_req)
+
+ # push the new requirements onto the stack
+ new_requirements = dist.requires(req.extras)[::-1]
+ requirements.extend(new_requirements)
+
+ # Register the new requirements needed by req
+ for new_requirement in new_requirements:
+ required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
-
- processed[req] = True
-
- # return list of distros to activate
- return to_activate
-
+
+ processed[req] = True
+
+ # return list of distros to activate
+ return to_activate
+
def find_plugins(
self, plugin_env, full_env=None, installer=None, fallback=True):
- """Find all activatable distributions in `plugin_env`
-
- Example usage::
-
- distributions, errors = working_set.find_plugins(
- Environment(plugin_dirlist)
- )
- # add plugins+libs to sys.path
- map(working_set.add, distributions)
- # display errors
- print('Could not load', errors)
-
- The `plugin_env` should be an ``Environment`` instance that contains
- only distributions that are in the project's "plugin directory" or
- directories. The `full_env`, if supplied, should be an ``Environment``
- contains all currently-available distributions. If `full_env` is not
- supplied, one is created automatically from the ``WorkingSet`` this
- method is called on, which will typically mean that every directory on
- ``sys.path`` will be scanned for distributions.
-
- `installer` is a standard installer callback as used by the
- ``resolve()`` method. The `fallback` flag indicates whether we should
- attempt to resolve older versions of a plugin if the newest version
- cannot be resolved.
-
- This method returns a 2-tuple: (`distributions`, `error_info`), where
- `distributions` is a list of the distributions found in `plugin_env`
- that were loadable, along with any other distributions that are needed
- to resolve their dependencies. `error_info` is a dictionary mapping
- unloadable plugin distributions to an exception instance describing the
- error that occurred. Usually this will be a ``DistributionNotFound`` or
- ``VersionConflict`` instance.
- """
-
- plugin_projects = list(plugin_env)
- # scan project names in alphabetic order
- plugin_projects.sort()
-
- error_info = {}
- distributions = {}
-
- if full_env is None:
- env = Environment(self.entries)
- env += plugin_env
- else:
- env = full_env + plugin_env
-
- shadow_set = self.__class__([])
- # put all our entries in shadow_set
- list(map(shadow_set.add, self))
-
- for project_name in plugin_projects:
-
- for dist in plugin_env[project_name]:
-
- req = [dist.as_requirement()]
-
- try:
- resolvees = shadow_set.resolve(req, env, installer)
-
- except ResolutionError as v:
- # save error info
- error_info[dist] = v
- if fallback:
- # try the next older version of project
- continue
- else:
- # give up on this project, keep going
- break
-
- else:
- list(map(shadow_set.add, resolvees))
- distributions.update(dict.fromkeys(resolvees))
-
- # success, no need to try any more versions of this project
- break
-
- distributions = list(distributions)
- distributions.sort()
-
- return distributions, error_info
-
- def require(self, *requirements):
- """Ensure that distributions matching `requirements` are activated
-
- `requirements` must be a string or a (possibly-nested) sequence
- thereof, specifying the distributions and versions required. The
- return value is a sequence of the distributions that needed to be
- activated to fulfill the requirements; all relevant distributions are
- included, even if they were already activated in this working set.
- """
- needed = self.resolve(parse_requirements(requirements))
-
- for dist in needed:
- self.add(dist)
-
- return needed
-
+ """Find all activatable distributions in `plugin_env`
+
+ Example usage::
+
+ distributions, errors = working_set.find_plugins(
+ Environment(plugin_dirlist)
+ )
+ # add plugins+libs to sys.path
+ map(working_set.add, distributions)
+ # display errors
+ print('Could not load', errors)
+
+ The `plugin_env` should be an ``Environment`` instance that contains
+ only distributions that are in the project's "plugin directory" or
+ directories. The `full_env`, if supplied, should be an ``Environment``
+ contains all currently-available distributions. If `full_env` is not
+ supplied, one is created automatically from the ``WorkingSet`` this
+ method is called on, which will typically mean that every directory on
+ ``sys.path`` will be scanned for distributions.
+
+ `installer` is a standard installer callback as used by the
+ ``resolve()`` method. The `fallback` flag indicates whether we should
+ attempt to resolve older versions of a plugin if the newest version
+ cannot be resolved.
+
+ This method returns a 2-tuple: (`distributions`, `error_info`), where
+ `distributions` is a list of the distributions found in `plugin_env`
+ that were loadable, along with any other distributions that are needed
+ to resolve their dependencies. `error_info` is a dictionary mapping
+ unloadable plugin distributions to an exception instance describing the
+ error that occurred. Usually this will be a ``DistributionNotFound`` or
+ ``VersionConflict`` instance.
+ """
+
+ plugin_projects = list(plugin_env)
+ # scan project names in alphabetic order
+ plugin_projects.sort()
+
+ error_info = {}
+ distributions = {}
+
+ if full_env is None:
+ env = Environment(self.entries)
+ env += plugin_env
+ else:
+ env = full_env + plugin_env
+
+ shadow_set = self.__class__([])
+ # put all our entries in shadow_set
+ list(map(shadow_set.add, self))
+
+ for project_name in plugin_projects:
+
+ for dist in plugin_env[project_name]:
+
+ req = [dist.as_requirement()]
+
+ try:
+ resolvees = shadow_set.resolve(req, env, installer)
+
+ except ResolutionError as v:
+ # save error info
+ error_info[dist] = v
+ if fallback:
+ # try the next older version of project
+ continue
+ else:
+ # give up on this project, keep going
+ break
+
+ else:
+ list(map(shadow_set.add, resolvees))
+ distributions.update(dict.fromkeys(resolvees))
+
+ # success, no need to try any more versions of this project
+ break
+
+ distributions = list(distributions)
+ distributions.sort()
+
+ return distributions, error_info
+
+ def require(self, *requirements):
+ """Ensure that distributions matching `requirements` are activated
+
+ `requirements` must be a string or a (possibly-nested) sequence
+ thereof, specifying the distributions and versions required. The
+ return value is a sequence of the distributions that needed to be
+ activated to fulfill the requirements; all relevant distributions are
+ included, even if they were already activated in this working set.
+ """
+ needed = self.resolve(parse_requirements(requirements))
+
+ for dist in needed:
+ self.add(dist)
+
+ return needed
+
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
- if callback in self.callbacks:
- return
- self.callbacks.append(callback)
+ if callback in self.callbacks:
+ return
+ self.callbacks.append(callback)
if not existing:
return
- for dist in self:
- callback(dist)
-
- def _added_new(self, dist):
- for callback in self.callbacks:
- callback(dist)
-
- def __getstate__(self):
- return (
- self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
- self.callbacks[:]
- )
-
- def __setstate__(self, e_k_b_c):
- entries, keys, by_key, callbacks = e_k_b_c
- self.entries = entries[:]
- self.entry_keys = keys.copy()
- self.by_key = by_key.copy()
- self.callbacks = callbacks[:]
-
-
+ for dist in self:
+ callback(dist)
+
+ def _added_new(self, dist):
+ for callback in self.callbacks:
+ callback(dist)
+
+ def __getstate__(self):
+ return (
+ self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
+ self.callbacks[:]
+ )
+
+ def __setstate__(self, e_k_b_c):
+ entries, keys, by_key, callbacks = e_k_b_c
+ self.entries = entries[:]
+ self.entry_keys = keys.copy()
+ self.by_key = by_key.copy()
+ self.callbacks = callbacks[:]
+
+
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
@@ -957,353 +957,353 @@ class _ReqExtras(dict):
class Environment:
- """Searchable snapshot of distributions on a search path"""
-
+ """Searchable snapshot of distributions on a search path"""
+
def __init__(
self, search_path=None, platform=get_supported_platform(),
- python=PY_MAJOR):
- """Snapshot distributions available on a search path
-
- Any distributions found on `search_path` are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used.
-
- `platform` is an optional string specifying the name of the platform
- that platform-specific distributions must be compatible with. If
- unspecified, it defaults to the current platform. `python` is an
+ python=PY_MAJOR):
+ """Snapshot distributions available on a search path
+
+ Any distributions found on `search_path` are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used.
+
+ `platform` is an optional string specifying the name of the platform
+ that platform-specific distributions must be compatible with. If
+ unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.6'``);
- it defaults to the current version.
-
- You may explicitly set `platform` (and/or `python`) to ``None`` if you
- wish to map *all* distributions, not just those compatible with the
- running platform or Python version.
- """
- self._distmap = {}
- self.platform = platform
- self.python = python
- self.scan(search_path)
-
- def can_add(self, dist):
- """Is distribution `dist` acceptable for this environment?
-
- The distribution must match the platform and python version
- requirements specified when this environment was created, or False
- is returned.
- """
+ it defaults to the current version.
+
+ You may explicitly set `platform` (and/or `python`) to ``None`` if you
+ wish to map *all* distributions, not just those compatible with the
+ running platform or Python version.
+ """
+ self._distmap = {}
+ self.platform = platform
+ self.python = python
+ self.scan(search_path)
+
+ def can_add(self, dist):
+ """Is distribution `dist` acceptable for this environment?
+
+ The distribution must match the platform and python version
+ requirements specified when this environment was created, or False
+ is returned.
+ """
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
-
- def remove(self, dist):
- """Remove `dist` from the environment"""
- self._distmap[dist.key].remove(dist)
-
- def scan(self, search_path=None):
- """Scan `search_path` for distributions usable in this environment
-
- Any distributions found are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used. Only distributions conforming to
- the platform/python version defined at initialization are added.
- """
- if search_path is None:
- search_path = sys.path
-
- for item in search_path:
- for dist in find_distributions(item):
- self.add(dist)
-
- def __getitem__(self, project_name):
- """Return a newest-to-oldest list of distributions for `project_name`
-
- Uses case-insensitive `project_name` comparison, assuming all the
- project's distributions use their project's name converted to all
- lowercase as their key.
-
- """
- distribution_key = project_name.lower()
- return self._distmap.get(distribution_key, [])
-
- def add(self, dist):
- """Add `dist` if we ``can_add()`` it and it has not already been added
- """
- if self.can_add(dist) and dist.has_version():
- dists = self._distmap.setdefault(dist.key, [])
- if dist not in dists:
- dists.append(dist)
- dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
-
+
+ def remove(self, dist):
+ """Remove `dist` from the environment"""
+ self._distmap[dist.key].remove(dist)
+
+ def scan(self, search_path=None):
+ """Scan `search_path` for distributions usable in this environment
+
+ Any distributions found are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used. Only distributions conforming to
+ the platform/python version defined at initialization are added.
+ """
+ if search_path is None:
+ search_path = sys.path
+
+ for item in search_path:
+ for dist in find_distributions(item):
+ self.add(dist)
+
+ def __getitem__(self, project_name):
+ """Return a newest-to-oldest list of distributions for `project_name`
+
+ Uses case-insensitive `project_name` comparison, assuming all the
+ project's distributions use their project's name converted to all
+ lowercase as their key.
+
+ """
+ distribution_key = project_name.lower()
+ return self._distmap.get(distribution_key, [])
+
+ def add(self, dist):
+ """Add `dist` if we ``can_add()`` it and it has not already been added
+ """
+ if self.can_add(dist) and dist.has_version():
+ dists = self._distmap.setdefault(dist.key, [])
+ if dist not in dists:
+ dists.append(dist)
+ dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
+
def best_match(
self, req, working_set, installer=None, replace_conflicting=False):
- """Find distribution best matching `req` and usable on `working_set`
-
- This calls the ``find(req)`` method of the `working_set` to see if a
- suitable distribution is already active. (This may raise
- ``VersionConflict`` if an unsuitable version of the project is already
- active in the specified `working_set`.) If a suitable distribution
- isn't active, this method returns the newest distribution in the
- environment that meets the ``Requirement`` in `req`. If no suitable
- distribution is found, and `installer` is supplied, then the result of
- calling the environment's ``obtain(req, installer)`` method will be
- returned.
- """
+ """Find distribution best matching `req` and usable on `working_set`
+
+ This calls the ``find(req)`` method of the `working_set` to see if a
+ suitable distribution is already active. (This may raise
+ ``VersionConflict`` if an unsuitable version of the project is already
+ active in the specified `working_set`.) If a suitable distribution
+ isn't active, this method returns the newest distribution in the
+ environment that meets the ``Requirement`` in `req`. If no suitable
+ distribution is found, and `installer` is supplied, then the result of
+ calling the environment's ``obtain(req, installer)`` method will be
+ returned.
+ """
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
- if dist is not None:
- return dist
- for dist in self[req.key]:
- if dist in req:
- return dist
- # try to download/install
- return self.obtain(req, installer)
-
- def obtain(self, requirement, installer=None):
- """Obtain a distribution matching `requirement` (e.g. via download)
-
- Obtain a distro that matches requirement (e.g. via download). In the
- base ``Environment`` class, this routine just returns
- ``installer(requirement)``, unless `installer` is None, in which case
- None is returned instead. This method is a hook that allows subclasses
- to attempt other ways of obtaining a distribution before falling back
- to the `installer` argument."""
- if installer is not None:
- return installer(requirement)
-
- def __iter__(self):
- """Yield the unique project names of the available distributions"""
- for key in self._distmap.keys():
- if self[key]:
- yield key
-
- def __iadd__(self, other):
- """In-place addition of a distribution or environment"""
- if isinstance(other, Distribution):
- self.add(other)
- elif isinstance(other, Environment):
- for project in other:
- for dist in other[project]:
- self.add(dist)
- else:
- raise TypeError("Can't add %r to environment" % (other,))
- return self
-
- def __add__(self, other):
- """Add an environment or distribution to an environment"""
- new = self.__class__([], platform=None, python=None)
- for env in self, other:
- new += env
- return new
-
-
-# XXX backward compatibility
-AvailableDistributions = Environment
-
-
-class ExtractionError(RuntimeError):
- """An error occurred extracting a resource
-
- The following attributes are available from instances of this exception:
-
- manager
- The resource manager that raised this exception
-
- cache_path
- The base directory for resource extraction
-
- original_error
- The exception instance that caused extraction to fail
- """
-
-
-class ResourceManager:
- """Manage resource extraction and packages"""
- extraction_path = None
-
- def __init__(self):
- self.cached_files = {}
-
- def resource_exists(self, package_or_requirement, resource_name):
- """Does the named resource exist?"""
- return get_provider(package_or_requirement).has_resource(resource_name)
-
- def resource_isdir(self, package_or_requirement, resource_name):
- """Is the named resource an existing directory?"""
- return get_provider(package_or_requirement).resource_isdir(
- resource_name
- )
-
- def resource_filename(self, package_or_requirement, resource_name):
- """Return a true filesystem path for specified resource"""
- return get_provider(package_or_requirement).get_resource_filename(
- self, resource_name
- )
-
- def resource_stream(self, package_or_requirement, resource_name):
- """Return a readable file-like object for specified resource"""
- return get_provider(package_or_requirement).get_resource_stream(
- self, resource_name
- )
-
- def resource_string(self, package_or_requirement, resource_name):
- """Return specified resource as a string"""
- return get_provider(package_or_requirement).get_resource_string(
- self, resource_name
- )
-
- def resource_listdir(self, package_or_requirement, resource_name):
- """List the contents of the named resource directory"""
- return get_provider(package_or_requirement).resource_listdir(
- resource_name
- )
-
- def extraction_error(self):
- """Give an error message for problems extracting file(s)"""
-
- old_exc = sys.exc_info()[1]
- cache_path = self.extraction_path or get_default_cache()
-
- tmpl = textwrap.dedent("""
- Can't extract file(s) to egg cache
-
+ if dist is not None:
+ return dist
+ for dist in self[req.key]:
+ if dist in req:
+ return dist
+ # try to download/install
+ return self.obtain(req, installer)
+
+ def obtain(self, requirement, installer=None):
+ """Obtain a distribution matching `requirement` (e.g. via download)
+
+ Obtain a distro that matches requirement (e.g. via download). In the
+ base ``Environment`` class, this routine just returns
+ ``installer(requirement)``, unless `installer` is None, in which case
+ None is returned instead. This method is a hook that allows subclasses
+ to attempt other ways of obtaining a distribution before falling back
+ to the `installer` argument."""
+ if installer is not None:
+ return installer(requirement)
+
+ def __iter__(self):
+ """Yield the unique project names of the available distributions"""
+ for key in self._distmap.keys():
+ if self[key]:
+ yield key
+
+ def __iadd__(self, other):
+ """In-place addition of a distribution or environment"""
+ if isinstance(other, Distribution):
+ self.add(other)
+ elif isinstance(other, Environment):
+ for project in other:
+ for dist in other[project]:
+ self.add(dist)
+ else:
+ raise TypeError("Can't add %r to environment" % (other,))
+ return self
+
+ def __add__(self, other):
+ """Add an environment or distribution to an environment"""
+ new = self.__class__([], platform=None, python=None)
+ for env in self, other:
+ new += env
+ return new
+
+
+# XXX backward compatibility
+AvailableDistributions = Environment
+
+
+class ExtractionError(RuntimeError):
+ """An error occurred extracting a resource
+
+ The following attributes are available from instances of this exception:
+
+ manager
+ The resource manager that raised this exception
+
+ cache_path
+ The base directory for resource extraction
+
+ original_error
+ The exception instance that caused extraction to fail
+ """
+
+
+class ResourceManager:
+ """Manage resource extraction and packages"""
+ extraction_path = None
+
+ def __init__(self):
+ self.cached_files = {}
+
+ def resource_exists(self, package_or_requirement, resource_name):
+ """Does the named resource exist?"""
+ return get_provider(package_or_requirement).has_resource(resource_name)
+
+ def resource_isdir(self, package_or_requirement, resource_name):
+ """Is the named resource an existing directory?"""
+ return get_provider(package_or_requirement).resource_isdir(
+ resource_name
+ )
+
+ def resource_filename(self, package_or_requirement, resource_name):
+ """Return a true filesystem path for specified resource"""
+ return get_provider(package_or_requirement).get_resource_filename(
+ self, resource_name
+ )
+
+ def resource_stream(self, package_or_requirement, resource_name):
+ """Return a readable file-like object for specified resource"""
+ return get_provider(package_or_requirement).get_resource_stream(
+ self, resource_name
+ )
+
+ def resource_string(self, package_or_requirement, resource_name):
+ """Return specified resource as a string"""
+ return get_provider(package_or_requirement).get_resource_string(
+ self, resource_name
+ )
+
+ def resource_listdir(self, package_or_requirement, resource_name):
+ """List the contents of the named resource directory"""
+ return get_provider(package_or_requirement).resource_listdir(
+ resource_name
+ )
+
+ def extraction_error(self):
+ """Give an error message for problems extracting file(s)"""
+
+ old_exc = sys.exc_info()[1]
+ cache_path = self.extraction_path or get_default_cache()
+
+ tmpl = textwrap.dedent("""
+ Can't extract file(s) to egg cache
+
The following error occurred while trying to extract file(s)
to the Python egg cache:
-
- {old_exc}
-
- The Python egg cache directory is currently set to:
-
- {cache_path}
-
+
+ {old_exc}
+
+ The Python egg cache directory is currently set to:
+
+ {cache_path}
+
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
- """).lstrip()
- err = ExtractionError(tmpl.format(**locals()))
- err.manager = self
- err.cache_path = cache_path
- err.original_error = old_exc
- raise err
-
- def get_cache_path(self, archive_name, names=()):
- """Return absolute location in cache for `archive_name` and `names`
-
- The parent directory of the resulting path will be created if it does
- not already exist. `archive_name` should be the base filename of the
- enclosing egg (which may not be the name of the enclosing zipfile!),
- including its ".egg" extension. `names`, if provided, should be a
- sequence of path name parts "under" the egg's extraction location.
-
- This method should only be called by resource providers that need to
- obtain an extraction location, and only for names they intend to
- extract, as it tracks the generated names for possible cleanup later.
- """
- extract_path = self.extraction_path or get_default_cache()
+ """).lstrip()
+ err = ExtractionError(tmpl.format(**locals()))
+ err.manager = self
+ err.cache_path = cache_path
+ err.original_error = old_exc
+ raise err
+
+ def get_cache_path(self, archive_name, names=()):
+ """Return absolute location in cache for `archive_name` and `names`
+
+ The parent directory of the resulting path will be created if it does
+ not already exist. `archive_name` should be the base filename of the
+ enclosing egg (which may not be the name of the enclosing zipfile!),
+ including its ".egg" extension. `names`, if provided, should be a
+ sequence of path name parts "under" the egg's extraction location.
+
+ This method should only be called by resource providers that need to
+ obtain an extraction location, and only for names they intend to
+ extract, as it tracks the generated names for possible cleanup later.
+ """
+ extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
- try:
- _bypass_ensure_directory(target_path)
+ try:
+ _bypass_ensure_directory(target_path)
except Exception:
- self.extraction_error()
-
- self._warn_unsafe_extraction_path(extract_path)
-
- self.cached_files[target_path] = 1
- return target_path
-
- @staticmethod
- def _warn_unsafe_extraction_path(path):
- """
- If the default extraction path is overridden and set to an insecure
- location, such as /tmp, it opens up an opportunity for an attacker to
- replace an extracted file with an unauthorized payload. Warn the user
- if a known insecure location is used.
-
- See Distribute #375 for more details.
- """
- if os.name == 'nt' and not path.startswith(os.environ['windir']):
- # On Windows, permissions are generally restrictive by default
- # and temp directories are not writable by other users, so
- # bypass the warning.
- return
- mode = os.stat(path).st_mode
- if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
+ self.extraction_error()
+
+ self._warn_unsafe_extraction_path(extract_path)
+
+ self.cached_files[target_path] = 1
+ return target_path
+
+ @staticmethod
+ def _warn_unsafe_extraction_path(path):
+ """
+ If the default extraction path is overridden and set to an insecure
+ location, such as /tmp, it opens up an opportunity for an attacker to
+ replace an extracted file with an unauthorized payload. Warn the user
+ if a known insecure location is used.
+
+ See Distribute #375 for more details.
+ """
+ if os.name == 'nt' and not path.startswith(os.environ['windir']):
+ # On Windows, permissions are generally restrictive by default
+ # and temp directories are not writable by other users, so
+ # bypass the warning.
+ return
+ mode = os.stat(path).st_mode
+ if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"%s is writable by group/others and vulnerable to attack "
- "when "
- "used with get_resource_filename. Consider a more secure "
- "location (set with .set_extraction_path or the "
+ "when "
+ "used with get_resource_filename. Consider a more secure "
+ "location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path
)
- warnings.warn(msg, UserWarning)
-
- def postprocess(self, tempname, filename):
- """Perform any platform-specific postprocessing of `tempname`
-
- This is where Mac header rewrites should be done; other platforms don't
- have anything special they should do.
-
- Resource providers should call this method ONLY after successfully
- extracting a compressed resource. They must NOT call it on resources
- that are already in the filesystem.
-
- `tempname` is the current (temporary) name of the file, and `filename`
- is the name it will be renamed to by the caller after this routine
- returns.
- """
-
- if os.name == 'posix':
- # Make the resource executable
- mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
- os.chmod(tempname, mode)
-
- def set_extraction_path(self, path):
- """Set the base path where resources will be extracted to, if needed.
-
- If you do not call this routine before any extractions take place, the
- path defaults to the return value of ``get_default_cache()``. (Which
- is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
- platform-specific fallbacks. See that routine's documentation for more
- details.)
-
- Resources are extracted to subdirectories of this path based upon
- information given by the ``IResourceProvider``. You may set this to a
- temporary directory, but then you must call ``cleanup_resources()`` to
- delete the extracted files when done. There is no guarantee that
- ``cleanup_resources()`` will be able to remove all extracted files.
-
- (Note: you may not change the extraction path for a given resource
- manager once resources have been extracted, unless you first call
- ``cleanup_resources()``.)
- """
- if self.cached_files:
- raise ValueError(
- "Can't change extraction path, files already extracted"
- )
-
- self.extraction_path = path
-
- def cleanup_resources(self, force=False):
- """
- Delete all extracted resource files and directories, returning a list
- of the file and directory names that could not be successfully removed.
- This function does not have any concurrency protection, so it should
- generally only be called when the extraction path is a temporary
- directory exclusive to a single process. This method is not
- automatically called; you must call it explicitly or register it as an
- ``atexit`` function if you wish to ensure cleanup of a temporary
- directory used for extractions.
- """
- # XXX
-
-
-def get_default_cache():
- """
+ warnings.warn(msg, UserWarning)
+
+ def postprocess(self, tempname, filename):
+ """Perform any platform-specific postprocessing of `tempname`
+
+ This is where Mac header rewrites should be done; other platforms don't
+ have anything special they should do.
+
+ Resource providers should call this method ONLY after successfully
+ extracting a compressed resource. They must NOT call it on resources
+ that are already in the filesystem.
+
+ `tempname` is the current (temporary) name of the file, and `filename`
+ is the name it will be renamed to by the caller after this routine
+ returns.
+ """
+
+ if os.name == 'posix':
+ # Make the resource executable
+ mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
+ os.chmod(tempname, mode)
+
+ def set_extraction_path(self, path):
+ """Set the base path where resources will be extracted to, if needed.
+
+ If you do not call this routine before any extractions take place, the
+ path defaults to the return value of ``get_default_cache()``. (Which
+ is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+ platform-specific fallbacks. See that routine's documentation for more
+ details.)
+
+ Resources are extracted to subdirectories of this path based upon
+ information given by the ``IResourceProvider``. You may set this to a
+ temporary directory, but then you must call ``cleanup_resources()`` to
+ delete the extracted files when done. There is no guarantee that
+ ``cleanup_resources()`` will be able to remove all extracted files.
+
+ (Note: you may not change the extraction path for a given resource
+ manager once resources have been extracted, unless you first call
+ ``cleanup_resources()``.)
+ """
+ if self.cached_files:
+ raise ValueError(
+ "Can't change extraction path, files already extracted"
+ )
+
+ self.extraction_path = path
+
+ def cleanup_resources(self, force=False):
+ """
+ Delete all extracted resource files and directories, returning a list
+ of the file and directory names that could not be successfully removed.
+ This function does not have any concurrency protection, so it should
+ generally only be called when the extraction path is a temporary
+ directory exclusive to a single process. This method is not
+ automatically called; you must call it explicitly or register it as an
+ ``atexit`` function if you wish to ensure cleanup of a temporary
+ directory used for extractions.
+ """
+ # XXX
+
+
+def get_default_cache():
+ """
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
@@ -1312,45 +1312,45 @@ def get_default_cache():
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
-
-
-def safe_name(name):
- """Convert an arbitrary string to a standard distribution name
-
- Any runs of non-alphanumeric/. characters are replaced with a single '-'.
- """
- return re.sub('[^A-Za-z0-9.]+', '-', name)
-
-
-def safe_version(version):
- """
- Convert an arbitrary string to a standard version string
- """
- try:
- # normalize the version
- return str(packaging.version.Version(version))
- except packaging.version.InvalidVersion:
+
+
+def safe_name(name):
+ """Convert an arbitrary string to a standard distribution name
+
+ Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+ """
+ Convert an arbitrary string to a standard version string
+ """
+ try:
+ # normalize the version
+ return str(packaging.version.Version(version))
+ except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
- return re.sub('[^A-Za-z0-9.]+', '-', version)
-
-
-def safe_extra(extra):
- """Convert an arbitrary string to a standard 'extra' name
-
- Any runs of non-alphanumeric characters are replaced with a single '_',
- and the result is always lowercased.
- """
+ return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def safe_extra(extra):
+ """Convert an arbitrary string to a standard 'extra' name
+
+ Any runs of non-alphanumeric characters are replaced with a single '_',
+ and the result is always lowercased.
+ """
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
-
-
-def to_filename(name):
- """Convert a project or version name to its filename-escaped form
-
- Any '-' characters are currently replaced with '_'.
- """
+
+
+def to_filename(name):
+ """Convert a project or version name to its filename-escaped form
+
+ Any '-' characters are currently replaced with '_'.
+ """
return name.replace('-', '_')
-
-
+
+
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
@@ -1363,14 +1363,14 @@ def invalid_marker(text):
e.lineno = None
return e
return False
-
-
+
+
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
-
+
This implementation uses the 'pyparsing' module.
"""
try:
@@ -1378,38 +1378,38 @@ def evaluate_marker(text, extra=None):
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
-
-
-class NullProvider:
- """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
-
- egg_name = None
- egg_info = None
- loader = None
-
- def __init__(self, module):
- self.loader = getattr(module, '__loader__', None)
- self.module_path = os.path.dirname(getattr(module, '__file__', ''))
-
- def get_resource_filename(self, manager, resource_name):
- return self._fn(self.module_path, resource_name)
-
- def get_resource_stream(self, manager, resource_name):
- return io.BytesIO(self.get_resource_string(manager, resource_name))
-
- def get_resource_string(self, manager, resource_name):
- return self._get(self._fn(self.module_path, resource_name))
-
- def has_resource(self, resource_name):
- return self._has(self._fn(self.module_path, resource_name))
-
+
+
+class NullProvider:
+ """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+ egg_name = None
+ egg_info = None
+ loader = None
+
+ def __init__(self, module):
+ self.loader = getattr(module, '__loader__', None)
+ self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+ def get_resource_filename(self, manager, resource_name):
+ return self._fn(self.module_path, resource_name)
+
+ def get_resource_stream(self, manager, resource_name):
+ return io.BytesIO(self.get_resource_string(manager, resource_name))
+
+ def get_resource_string(self, manager, resource_name):
+ return self._get(self._fn(self.module_path, resource_name))
+
+ def has_resource(self, resource_name):
+ return self._has(self._fn(self.module_path, resource_name))
+
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
- def has_metadata(self, name):
+ def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
-
+
path = self._get_metadata_path(name)
return self._has(path)
@@ -1427,68 +1427,68 @@ class NullProvider:
# troubleshooting, and without changing the exception type.
exc.reason += ' in {} file at path: {}'.format(name, path)
raise
-
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
- def resource_isdir(self, resource_name):
- return self._isdir(self._fn(self.module_path, resource_name))
-
- def metadata_isdir(self, name):
- return self.egg_info and self._isdir(self._fn(self.egg_info, name))
-
- def resource_listdir(self, resource_name):
- return self._listdir(self._fn(self.module_path, resource_name))
-
- def metadata_listdir(self, name):
- if self.egg_info:
- return self._listdir(self._fn(self.egg_info, name))
- return []
-
- def run_script(self, script_name, namespace):
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+ def resource_isdir(self, resource_name):
+ return self._isdir(self._fn(self.module_path, resource_name))
+
+ def metadata_isdir(self, name):
+ return self.egg_info and self._isdir(self._fn(self.egg_info, name))
+
+ def resource_listdir(self, resource_name):
+ return self._listdir(self._fn(self.module_path, resource_name))
+
+ def metadata_listdir(self, name):
+ if self.egg_info:
+ return self._listdir(self._fn(self.egg_info, name))
+ return []
+
+ def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
- if not self.has_metadata(script):
+ if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}"
.format(**locals()),
)
- script_text = self.get_metadata(script).replace('\r\n', '\n')
- script_text = script_text.replace('\r', '\n')
- script_filename = self._fn(self.egg_info, script)
- namespace['__file__'] = script_filename
- if os.path.exists(script_filename):
- source = open(script_filename).read()
- code = compile(source, script_filename, 'exec')
- exec(code, namespace, namespace)
- else:
- from linecache import cache
- cache[script_filename] = (
- len(script_text), 0, script_text.split('\n'), script_filename
- )
+ script_text = self.get_metadata(script).replace('\r\n', '\n')
+ script_text = script_text.replace('\r', '\n')
+ script_filename = self._fn(self.egg_info, script)
+ namespace['__file__'] = script_filename
+ if os.path.exists(script_filename):
+ source = open(script_filename).read()
+ code = compile(source, script_filename, 'exec')
+ exec(code, namespace, namespace)
+ else:
+ from linecache import cache
+ cache[script_filename] = (
+ len(script_text), 0, script_text.split('\n'), script_filename
+ )
script_code = compile(script_text, script_filename, 'exec')
- exec(script_code, namespace, namespace)
-
- def _has(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _isdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _listdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _fn(self, base, resource_name):
+ exec(script_code, namespace, namespace)
+
+ def _has(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _isdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _listdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
- if resource_name:
- return os.path.join(base, *resource_name.split('/'))
- return base
-
+ if resource_name:
+ return os.path.join(base, *resource_name.split('/'))
+ return base
+
@staticmethod
def _validate_resource_path(path):
"""
@@ -1564,74 +1564,74 @@ is not allowed.
stacklevel=4,
)
- def _get(self, path):
- if hasattr(self.loader, 'get_data'):
- return self.loader.get_data(path)
- raise NotImplementedError(
- "Can't perform this operation for loaders without 'get_data()'"
- )
-
-
-register_loader_type(object, NullProvider)
-
-
-class EggProvider(NullProvider):
- """Provider based on a virtual filesystem"""
-
- def __init__(self, module):
- NullProvider.__init__(self, module)
- self._setup_prefix()
-
- def _setup_prefix(self):
- # we assume here that our metadata may be nested inside a "basket"
- # of multiple eggs; that's why we use module_path instead of .archive
- path = self.module_path
- old = None
+ def _get(self, path):
+ if hasattr(self.loader, 'get_data'):
+ return self.loader.get_data(path)
+ raise NotImplementedError(
+ "Can't perform this operation for loaders without 'get_data()'"
+ )
+
+
+register_loader_type(object, NullProvider)
+
+
+class EggProvider(NullProvider):
+ """Provider based on a virtual filesystem"""
+
+ def __init__(self, module):
+ NullProvider.__init__(self, module)
+ self._setup_prefix()
+
+ def _setup_prefix(self):
+ # we assume here that our metadata may be nested inside a "basket"
+ # of multiple eggs; that's why we use module_path instead of .archive
+ path = self.module_path
+ old = None
while path != old:
if _is_egg_path(path):
- self.egg_name = os.path.basename(path)
- self.egg_info = os.path.join(path, 'EGG-INFO')
- self.egg_root = path
- break
- old = path
- path, base = os.path.split(path)
-
-
-class DefaultProvider(EggProvider):
- """Provides access to package resources in the filesystem"""
-
- def _has(self, path):
- return os.path.exists(path)
-
- def _isdir(self, path):
- return os.path.isdir(path)
-
- def _listdir(self, path):
- return os.listdir(path)
-
- def get_resource_stream(self, manager, resource_name):
- return open(self._fn(self.module_path, resource_name), 'rb')
-
- def _get(self, path):
- with open(path, 'rb') as stream:
- return stream.read()
-
- @classmethod
- def _register(cls):
+ self.egg_name = os.path.basename(path)
+ self.egg_info = os.path.join(path, 'EGG-INFO')
+ self.egg_root = path
+ break
+ old = path
+ path, base = os.path.split(path)
+
+
+class DefaultProvider(EggProvider):
+ """Provides access to package resources in the filesystem"""
+
+ def _has(self, path):
+ return os.path.exists(path)
+
+ def _isdir(self, path):
+ return os.path.isdir(path)
+
+ def _listdir(self, path):
+ return os.listdir(path)
+
+ def get_resource_stream(self, manager, resource_name):
+ return open(self._fn(self.module_path, resource_name), 'rb')
+
+ def _get(self, path):
+ with open(path, 'rb') as stream:
+ return stream.read()
+
+ @classmethod
+ def _register(cls):
loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
-
-
-DefaultProvider._register()
-
-
-class EmptyProvider(NullProvider):
- """Provider that returns nothing for all requests"""
-
- module_path = None
-
+
+
+DefaultProvider._register()
+
+
+class EmptyProvider(NullProvider):
+ """Provider that returns nothing for all requests"""
+
+ module_path = None
+
_isdir = _has = lambda self, path: False
def _get(self, path):
@@ -1640,260 +1640,260 @@ class EmptyProvider(NullProvider):
def _listdir(self, path):
return []
- def __init__(self):
- pass
-
-
-empty_provider = EmptyProvider()
-
-
-class ZipManifests(dict):
- """
- zip manifest builder
- """
-
- @classmethod
- def build(cls, path):
- """
- Build a dictionary similar to the zipimport directory
- caches, except instead of tuples, store ZipInfo objects.
-
- Use a platform-specific path separator (os.sep) for the path keys
- for compatibility with pypy on Windows.
- """
+ def __init__(self):
+ pass
+
+
+empty_provider = EmptyProvider()
+
+
+class ZipManifests(dict):
+ """
+ zip manifest builder
+ """
+
+ @classmethod
+ def build(cls, path):
+ """
+ Build a dictionary similar to the zipimport directory
+ caches, except instead of tuples, store ZipInfo objects.
+
+ Use a platform-specific path separator (os.sep) for the path keys
+ for compatibility with pypy on Windows.
+ """
with zipfile.ZipFile(path) as zfile:
- items = (
- (
- name.replace('/', os.sep),
- zfile.getinfo(name),
- )
- for name in zfile.namelist()
- )
- return dict(items)
-
- load = build
-
-
-class MemoizedZipManifests(ZipManifests):
- """
- Memoized zipfile manifests.
- """
- manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
-
- def load(self, path):
- """
- Load a manifest at path or return a suitable manifest already loaded.
- """
- path = os.path.normpath(path)
- mtime = os.stat(path).st_mtime
-
- if path not in self or self[path].mtime != mtime:
- manifest = self.build(path)
- self[path] = self.manifest_mod(manifest, mtime)
-
- return self[path].manifest
-
-
-class ZipProvider(EggProvider):
- """Resource support for zips and eggs"""
-
- eagers = None
- _zip_manifests = MemoizedZipManifests()
-
- def __init__(self, module):
- EggProvider.__init__(self, module)
+ items = (
+ (
+ name.replace('/', os.sep),
+ zfile.getinfo(name),
+ )
+ for name in zfile.namelist()
+ )
+ return dict(items)
+
+ load = build
+
+
+class MemoizedZipManifests(ZipManifests):
+ """
+ Memoized zipfile manifests.
+ """
+ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
+
+ def load(self, path):
+ """
+ Load a manifest at path or return a suitable manifest already loaded.
+ """
+ path = os.path.normpath(path)
+ mtime = os.stat(path).st_mtime
+
+ if path not in self or self[path].mtime != mtime:
+ manifest = self.build(path)
+ self[path] = self.manifest_mod(manifest, mtime)
+
+ return self[path].manifest
+
+
+class ZipProvider(EggProvider):
+ """Resource support for zips and eggs"""
+
+ eagers = None
+ _zip_manifests = MemoizedZipManifests()
+
+ def __init__(self, module):
+ EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
-
- def _zipinfo_name(self, fspath):
- # Convert a virtual filename (full path to file) into a zipfile subpath
- # usable with the zipimport directory cache for our target archive
+
+ def _zipinfo_name(self, fspath):
+ # Convert a virtual filename (full path to file) into a zipfile subpath
+ # usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
- if fspath.startswith(self.zip_pre):
- return fspath[len(self.zip_pre):]
- raise AssertionError(
- "%s is not a subpath of %s" % (fspath, self.zip_pre)
- )
-
- def _parts(self, zip_path):
- # Convert a zipfile subpath into an egg-relative path part list.
- # pseudo-fs path
+ if fspath.startswith(self.zip_pre):
+ return fspath[len(self.zip_pre):]
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.zip_pre)
+ )
+
+ def _parts(self, zip_path):
+ # Convert a zipfile subpath into an egg-relative path part list.
+ # pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
- raise AssertionError(
- "%s is not a subpath of %s" % (fspath, self.egg_root)
- )
-
- @property
- def zipinfo(self):
- return self._zip_manifests.load(self.loader.archive)
-
- def get_resource_filename(self, manager, resource_name):
- if not self.egg_name:
- raise NotImplementedError(
- "resource_filename() only supported for .egg, not .zip"
- )
- # no need to lock for extraction, since we use temp names
- zip_path = self._resource_to_zip(resource_name)
- eagers = self._get_eager_resources()
- if '/'.join(self._parts(zip_path)) in eagers:
- for name in eagers:
- self._extract_resource(manager, self._eager_to_zip(name))
- return self._extract_resource(manager, zip_path)
-
- @staticmethod
- def _get_date_and_size(zip_stat):
- size = zip_stat.file_size
- # ymdhms+wday, yday, dst
- date_time = zip_stat.date_time + (0, 0, -1)
- # 1980 offset already done
- timestamp = time.mktime(date_time)
- return timestamp, size
-
- def _extract_resource(self, manager, zip_path):
-
- if zip_path in self._index():
- for name in self._index()[zip_path]:
- last = self._extract_resource(
- manager, os.path.join(zip_path, name)
- )
- # return the extracted directory name
- return os.path.dirname(last)
-
- timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
-
- if not WRITE_SUPPORT:
- raise IOError('"os.rename" and "os.unlink" are not supported '
- 'on this platform')
- try:
-
- real_path = manager.get_cache_path(
- self.egg_name, self._parts(zip_path)
- )
-
- if self._is_current(real_path, zip_path):
- return real_path
-
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.egg_root)
+ )
+
+ @property
+ def zipinfo(self):
+ return self._zip_manifests.load(self.loader.archive)
+
+ def get_resource_filename(self, manager, resource_name):
+ if not self.egg_name:
+ raise NotImplementedError(
+ "resource_filename() only supported for .egg, not .zip"
+ )
+ # no need to lock for extraction, since we use temp names
+ zip_path = self._resource_to_zip(resource_name)
+ eagers = self._get_eager_resources()
+ if '/'.join(self._parts(zip_path)) in eagers:
+ for name in eagers:
+ self._extract_resource(manager, self._eager_to_zip(name))
+ return self._extract_resource(manager, zip_path)
+
+ @staticmethod
+ def _get_date_and_size(zip_stat):
+ size = zip_stat.file_size
+ # ymdhms+wday, yday, dst
+ date_time = zip_stat.date_time + (0, 0, -1)
+ # 1980 offset already done
+ timestamp = time.mktime(date_time)
+ return timestamp, size
+
+ def _extract_resource(self, manager, zip_path):
+
+ if zip_path in self._index():
+ for name in self._index()[zip_path]:
+ last = self._extract_resource(
+ manager, os.path.join(zip_path, name)
+ )
+ # return the extracted directory name
+ return os.path.dirname(last)
+
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+
+ if not WRITE_SUPPORT:
+ raise IOError('"os.rename" and "os.unlink" are not supported '
+ 'on this platform')
+ try:
+
+ real_path = manager.get_cache_path(
+ self.egg_name, self._parts(zip_path)
+ )
+
+ if self._is_current(real_path, zip_path):
+ return real_path
+
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
- os.write(outf, self.loader.get_data(zip_path))
- os.close(outf)
- utime(tmpnam, (timestamp, timestamp))
- manager.postprocess(tmpnam, real_path)
-
- try:
- rename(tmpnam, real_path)
-
- except os.error:
- if os.path.isfile(real_path):
- if self._is_current(real_path, zip_path):
- # the file became current since it was checked above,
- # so proceed.
- return real_path
- # Windows, del old file and retry
+ os.write(outf, self.loader.get_data(zip_path))
+ os.close(outf)
+ utime(tmpnam, (timestamp, timestamp))
+ manager.postprocess(tmpnam, real_path)
+
+ try:
+ rename(tmpnam, real_path)
+
+ except os.error:
+ if os.path.isfile(real_path):
+ if self._is_current(real_path, zip_path):
+ # the file became current since it was checked above,
+ # so proceed.
+ return real_path
+ # Windows, del old file and retry
elif os.name == 'nt':
- unlink(real_path)
- rename(tmpnam, real_path)
- return real_path
- raise
-
- except os.error:
- # report a user-friendly error
- manager.extraction_error()
-
- return real_path
-
- def _is_current(self, file_path, zip_path):
- """
- Return True if the file_path is current for this zip_path
- """
- timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
- if not os.path.isfile(file_path):
- return False
- stat = os.stat(file_path)
+ unlink(real_path)
+ rename(tmpnam, real_path)
+ return real_path
+ raise
+
+ except os.error:
+ # report a user-friendly error
+ manager.extraction_error()
+
+ return real_path
+
+ def _is_current(self, file_path, zip_path):
+ """
+ Return True if the file_path is current for this zip_path
+ """
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+ if not os.path.isfile(file_path):
+ return False
+ stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
- return False
- # check that the contents match
- zip_contents = self.loader.get_data(zip_path)
- with open(file_path, 'rb') as f:
- file_contents = f.read()
- return zip_contents == file_contents
-
- def _get_eager_resources(self):
- if self.eagers is None:
- eagers = []
- for name in ('native_libs.txt', 'eager_resources.txt'):
- if self.has_metadata(name):
- eagers.extend(self.get_metadata_lines(name))
- self.eagers = eagers
- return self.eagers
-
- def _index(self):
- try:
- return self._dirindex
- except AttributeError:
- ind = {}
- for path in self.zipinfo:
- parts = path.split(os.sep)
- while parts:
- parent = os.sep.join(parts[:-1])
- if parent in ind:
- ind[parent].append(parts[-1])
- break
- else:
- ind[parent] = [parts.pop()]
- self._dirindex = ind
- return ind
-
- def _has(self, fspath):
- zip_path = self._zipinfo_name(fspath)
- return zip_path in self.zipinfo or zip_path in self._index()
-
- def _isdir(self, fspath):
- return self._zipinfo_name(fspath) in self._index()
-
- def _listdir(self, fspath):
- return list(self._index().get(self._zipinfo_name(fspath), ()))
-
- def _eager_to_zip(self, resource_name):
- return self._zipinfo_name(self._fn(self.egg_root, resource_name))
-
- def _resource_to_zip(self, resource_name):
- return self._zipinfo_name(self._fn(self.module_path, resource_name))
-
-
-register_loader_type(zipimport.zipimporter, ZipProvider)
-
-
-class FileMetadata(EmptyProvider):
- """Metadata handler for standalone PKG-INFO files
-
- Usage::
-
- metadata = FileMetadata("/path/to/PKG-INFO")
-
- This provider rejects all data and metadata requests except for PKG-INFO,
- which is treated as existing, and will be the contents of the file at
- the provided location.
- """
-
- def __init__(self, path):
- self.path = path
-
+ return False
+ # check that the contents match
+ zip_contents = self.loader.get_data(zip_path)
+ with open(file_path, 'rb') as f:
+ file_contents = f.read()
+ return zip_contents == file_contents
+
+ def _get_eager_resources(self):
+ if self.eagers is None:
+ eagers = []
+ for name in ('native_libs.txt', 'eager_resources.txt'):
+ if self.has_metadata(name):
+ eagers.extend(self.get_metadata_lines(name))
+ self.eagers = eagers
+ return self.eagers
+
+ def _index(self):
+ try:
+ return self._dirindex
+ except AttributeError:
+ ind = {}
+ for path in self.zipinfo:
+ parts = path.split(os.sep)
+ while parts:
+ parent = os.sep.join(parts[:-1])
+ if parent in ind:
+ ind[parent].append(parts[-1])
+ break
+ else:
+ ind[parent] = [parts.pop()]
+ self._dirindex = ind
+ return ind
+
+ def _has(self, fspath):
+ zip_path = self._zipinfo_name(fspath)
+ return zip_path in self.zipinfo or zip_path in self._index()
+
+ def _isdir(self, fspath):
+ return self._zipinfo_name(fspath) in self._index()
+
+ def _listdir(self, fspath):
+ return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+ def _eager_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.egg_root, resource_name))
+
+ def _resource_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.module_path, resource_name))
+
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+class FileMetadata(EmptyProvider):
+ """Metadata handler for standalone PKG-INFO files
+
+ Usage::
+
+ metadata = FileMetadata("/path/to/PKG-INFO")
+
+ This provider rejects all data and metadata requests except for PKG-INFO,
+ which is treated as existing, and will be the contents of the file at
+ the provided location.
+ """
+
+ def __init__(self, path):
+ self.path = path
+
def _get_metadata_path(self, name):
return self.path
- def has_metadata(self, name):
+ def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
-
- def get_metadata(self, name):
+
+ def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
-
+
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
@@ -1907,106 +1907,106 @@ class FileMetadata(EmptyProvider):
msg = tmpl.format(**locals())
warnings.warn(msg)
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
-
-class PathMetadata(DefaultProvider):
- """Metadata provider for egg directories
-
- Usage::
-
- # Development eggs:
-
- egg_info = "/path/to/PackageName.egg-info"
- base_dir = os.path.dirname(egg_info)
- metadata = PathMetadata(base_dir, egg_info)
- dist_name = os.path.splitext(os.path.basename(egg_info))[0]
- dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
-
- # Unpacked egg directories:
-
- egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
- metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
- dist = Distribution.from_filename(egg_path, metadata=metadata)
- """
-
- def __init__(self, path, egg_info):
- self.module_path = path
- self.egg_info = egg_info
-
-
-class EggMetadata(ZipProvider):
- """Metadata provider for .egg files"""
-
- def __init__(self, importer):
- """Create a metadata provider from a zipimporter"""
-
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+
+class PathMetadata(DefaultProvider):
+ """Metadata provider for egg directories
+
+ Usage::
+
+ # Development eggs:
+
+ egg_info = "/path/to/PackageName.egg-info"
+ base_dir = os.path.dirname(egg_info)
+ metadata = PathMetadata(base_dir, egg_info)
+ dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+ dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
+
+ # Unpacked egg directories:
+
+ egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+ metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+ dist = Distribution.from_filename(egg_path, metadata=metadata)
+ """
+
+ def __init__(self, path, egg_info):
+ self.module_path = path
+ self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+ """Metadata provider for .egg files"""
+
+ def __init__(self, importer):
+ """Create a metadata provider from a zipimporter"""
+
self.zip_pre = importer.archive + os.sep
- self.loader = importer
- if importer.prefix:
- self.module_path = os.path.join(importer.archive, importer.prefix)
- else:
- self.module_path = importer.archive
- self._setup_prefix()
-
-
+ self.loader = importer
+ if importer.prefix:
+ self.module_path = os.path.join(importer.archive, importer.prefix)
+ else:
+ self.module_path = importer.archive
+ self._setup_prefix()
+
+
_declare_state('dict', _distribution_finders={})
-def register_finder(importer_type, distribution_finder):
- """Register `distribution_finder` to find distributions in sys.path items
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `distribution_finder` is a callable that, passed a path
- item and the importer instance, yields ``Distribution`` instances found on
- that path item. See ``pkg_resources.find_on_path`` for an example."""
- _distribution_finders[importer_type] = distribution_finder
-
-
-def find_distributions(path_item, only=False):
- """Yield distributions accessible via `path_item`"""
- importer = get_importer(path_item)
- finder = _find_adapter(_distribution_finders, importer)
- return finder(importer, path_item, only)
-
-
-def find_eggs_in_zip(importer, path_item, only=False):
- """
- Find eggs in zip files; possibly multiple nested eggs.
- """
- if importer.archive.endswith('.whl'):
- # wheels are not supported with this finder
- # they don't have PKG-INFO metadata, and won't ever contain eggs
- return
- metadata = EggMetadata(importer)
- if metadata.has_metadata('PKG-INFO'):
- yield Distribution.from_filename(path_item, metadata=metadata)
- if only:
- # don't yield nested distros
- return
+def register_finder(importer_type, distribution_finder):
+ """Register `distribution_finder` to find distributions in sys.path items
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `distribution_finder` is a callable that, passed a path
+ item and the importer instance, yields ``Distribution`` instances found on
+ that path item. See ``pkg_resources.find_on_path`` for an example."""
+ _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item, only=False):
+ """Yield distributions accessible via `path_item`"""
+ importer = get_importer(path_item)
+ finder = _find_adapter(_distribution_finders, importer)
+ return finder(importer, path_item, only)
+
+
+def find_eggs_in_zip(importer, path_item, only=False):
+ """
+ Find eggs in zip files; possibly multiple nested eggs.
+ """
+ if importer.archive.endswith('.whl'):
+ # wheels are not supported with this finder
+ # they don't have PKG-INFO metadata, and won't ever contain eggs
+ return
+ metadata = EggMetadata(importer)
+ if metadata.has_metadata('PKG-INFO'):
+ yield Distribution.from_filename(path_item, metadata=metadata)
+ if only:
+ # don't yield nested distros
+ return
for subitem in metadata.resource_listdir(''):
if _is_egg_path(subitem):
- subpath = os.path.join(path_item, subitem)
+ subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
- yield dist
+ yield dist
elif subitem.lower().endswith('.dist-info'):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
-
-register_finder(zipimport.zipimporter, find_eggs_in_zip)
-
-def find_nothing(importer, path_item, only=False):
- return ()
+register_finder(zipimport.zipimporter, find_eggs_in_zip)
+
+def find_nothing(importer, path_item, only=False):
+ return ()
+
+
+register_finder(object, find_nothing)
-register_finder(object, find_nothing)
-
def _by_version_descending(names):
"""
@@ -2034,15 +2034,15 @@ def _by_version_descending(names):
return sorted(names, key=_by_version, reverse=True)
-def find_on_path(importer, path_item, only=False):
- """Yield distributions accessible on a sys.path directory"""
- path_item = _normalize_cached(path_item)
-
+def find_on_path(importer, path_item, only=False):
+ """Yield distributions accessible on a sys.path directory"""
+ path_item = _normalize_cached(path_item)
+
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
- )
+ )
)
return
@@ -2161,73 +2161,73 @@ def resolve_egg_link(path):
return next(dist_groups, ())
-register_finder(pkgutil.ImpImporter, find_on_path)
-
-if hasattr(importlib_machinery, 'FileFinder'):
- register_finder(importlib_machinery.FileFinder, find_on_path)
-
-_declare_state('dict', _namespace_handlers={})
-_declare_state('dict', _namespace_packages={})
-
-
-def register_namespace_handler(importer_type, namespace_handler):
- """Register `namespace_handler` to declare namespace packages
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `namespace_handler` is a callable like this::
-
- def namespace_handler(importer, path_entry, moduleName, module):
- # return a path_entry to use for child packages
-
- Namespace handlers are only called if the importer object has already
- agreed that it can handle the relevant path item, and they should only
- return a subpath if the module __path__ does not already contain an
- equivalent subpath. For an example namespace handler, see
- ``pkg_resources.file_ns_handler``.
- """
- _namespace_handlers[importer_type] = namespace_handler
-
-
-def _handle_ns(packageName, path_item):
- """Ensure that named package includes a subpath of path_item (if needed)"""
-
- importer = get_importer(path_item)
- if importer is None:
- return None
+register_finder(pkgutil.ImpImporter, find_on_path)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_finder(importlib_machinery.FileFinder, find_on_path)
+
+_declare_state('dict', _namespace_handlers={})
+_declare_state('dict', _namespace_packages={})
+
+
+def register_namespace_handler(importer_type, namespace_handler):
+ """Register `namespace_handler` to declare namespace packages
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `namespace_handler` is a callable like this::
+
+ def namespace_handler(importer, path_entry, moduleName, module):
+ # return a path_entry to use for child packages
+
+ Namespace handlers are only called if the importer object has already
+ agreed that it can handle the relevant path item, and they should only
+ return a subpath if the module __path__ does not already contain an
+ equivalent subpath. For an example namespace handler, see
+ ``pkg_resources.file_ns_handler``.
+ """
+ _namespace_handlers[importer_type] = namespace_handler
+
+
+def _handle_ns(packageName, path_item):
+ """Ensure that named package includes a subpath of path_item (if needed)"""
+
+ importer = get_importer(path_item)
+ if importer is None:
+ return None
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
- if loader is None:
- return None
- module = sys.modules.get(packageName)
- if module is None:
- module = sys.modules[packageName] = types.ModuleType(packageName)
- module.__path__ = []
- _set_parent_ns(packageName)
+ if loader is None:
+ return None
+ module = sys.modules.get(packageName)
+ if module is None:
+ module = sys.modules[packageName] = types.ModuleType(packageName)
+ module.__path__ = []
+ _set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
- raise TypeError("Not a package:", packageName)
- handler = _find_adapter(_namespace_handlers, importer)
- subpath = handler(importer, path_item, packageName, module)
- if subpath is not None:
- path = module.__path__
- path.append(subpath)
- loader.load_module(packageName)
- _rebuild_mod_path(path, packageName, module)
- return subpath
-
-
-def _rebuild_mod_path(orig_path, package_name, module):
- """
- Rebuild module.__path__ ensuring that all entries are ordered
- corresponding to their sys.path order
- """
- sys_path = [_normalize_cached(p) for p in sys.path]
+ raise TypeError("Not a package:", packageName)
+ handler = _find_adapter(_namespace_handlers, importer)
+ subpath = handler(importer, path_item, packageName, module)
+ if subpath is not None:
+ path = module.__path__
+ path.append(subpath)
+ loader.load_module(packageName)
+ _rebuild_mod_path(path, packageName, module)
+ return subpath
+
+
+def _rebuild_mod_path(orig_path, package_name, module):
+ """
+ Rebuild module.__path__ ensuring that all entries are ordered
+ corresponding to their sys.path order
+ """
+ sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
- """
+ """
Workaround for #520 and #513.
"""
try:
@@ -2237,13 +2237,13 @@ def _rebuild_mod_path(orig_path, package_name, module):
def position_in_sys_path(path):
"""
- Return the ordinal of the path based on its position in sys.path
- """
+ Return the ordinal of the path based on its position in sys.path
+ """
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
-
+
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
@@ -2251,85 +2251,85 @@ def _rebuild_mod_path(orig_path, package_name, module):
module.__path__[:] = new_path
else:
module.__path__ = new_path
-
-
-def declare_namespace(packageName):
- """Declare that package 'packageName' is a namespace package"""
-
- _imp.acquire_lock()
- try:
- if packageName in _namespace_packages:
- return
-
+
+
+def declare_namespace(packageName):
+ """Declare that package 'packageName' is a namespace package"""
+
+ _imp.acquire_lock()
+ try:
+ if packageName in _namespace_packages:
+ return
+
path = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
- declare_namespace(parent)
- if parent not in _namespace_packages:
- __import__(parent)
- try:
- path = sys.modules[parent].__path__
- except AttributeError:
- raise TypeError("Not a package:", parent)
-
- # Track what packages are namespaces, so when new path items are added,
- # they can be updated
+ declare_namespace(parent)
+ if parent not in _namespace_packages:
+ __import__(parent)
+ try:
+ path = sys.modules[parent].__path__
+ except AttributeError:
+ raise TypeError("Not a package:", parent)
+
+ # Track what packages are namespaces, so when new path items are added,
+ # they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
-
- for path_item in path:
- # Ensure all the parent's path items are reflected in the child,
- # if they apply
- _handle_ns(packageName, path_item)
-
- finally:
- _imp.release_lock()
-
-
-def fixup_namespace_packages(path_item, parent=None):
- """Ensure that previously-declared namespace packages include path_item"""
- _imp.acquire_lock()
- try:
+
+ for path_item in path:
+ # Ensure all the parent's path items are reflected in the child,
+ # if they apply
+ _handle_ns(packageName, path_item)
+
+ finally:
+ _imp.release_lock()
+
+
+def fixup_namespace_packages(path_item, parent=None):
+ """Ensure that previously-declared namespace packages include path_item"""
+ _imp.acquire_lock()
+ try:
for package in _namespace_packages.get(parent, ()):
- subpath = _handle_ns(package, path_item)
- if subpath:
- fixup_namespace_packages(subpath, package)
- finally:
- _imp.release_lock()
-
-
-def file_ns_handler(importer, path_item, packageName, module):
- """Compute an ns-package subpath for a filesystem or zipfile importer"""
-
- subpath = os.path.join(path_item, packageName.split('.')[-1])
- normalized = _normalize_cached(subpath)
- for item in module.__path__:
+ subpath = _handle_ns(package, path_item)
+ if subpath:
+ fixup_namespace_packages(subpath, package)
+ finally:
+ _imp.release_lock()
+
+
+def file_ns_handler(importer, path_item, packageName, module):
+ """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+ subpath = os.path.join(path_item, packageName.split('.')[-1])
+ normalized = _normalize_cached(subpath)
+ for item in module.__path__:
if _normalize_cached(item) == normalized:
- break
- else:
- # Only return the path if it's not already there
- return subpath
-
-
-register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
-register_namespace_handler(zipimport.zipimporter, file_ns_handler)
-
-if hasattr(importlib_machinery, 'FileFinder'):
- register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
-
-
-def null_ns_handler(importer, path_item, packageName, module):
- return None
-
-
-register_namespace_handler(object, null_ns_handler)
-
-
-def normalize_path(filename):
- """Normalize a file/dir name for comparison purposes"""
+ break
+ else:
+ # Only return the path if it's not already there
+ return subpath
+
+
+register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
+
+
+def null_ns_handler(importer, path_item, packageName, module):
+ return None
+
+
+register_namespace_handler(object, null_ns_handler)
+
+
+def normalize_path(filename):
+ """Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
-
+
def _cygwin_patch(filename): # pragma: nocover
"""
@@ -2342,13 +2342,13 @@ def _cygwin_patch(filename): # pragma: nocover
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
-def _normalize_cached(filename, _cache={}):
- try:
- return _cache[filename]
- except KeyError:
- _cache[filename] = result = normalize_path(filename)
- return result
-
+def _normalize_cached(filename, _cache={}):
+ try:
+ return _cache[filename]
+ except KeyError:
+ _cache[filename] = result = normalize_path(filename)
+ return result
+
def _is_egg_path(path):
"""
@@ -2357,351 +2357,351 @@ def _is_egg_path(path):
return path.lower().endswith('.egg')
-def _is_unpacked_egg(path):
- """
- Determine if given path appears to be an unpacked egg.
- """
- return (
+def _is_unpacked_egg(path):
+ """
+ Determine if given path appears to be an unpacked egg.
+ """
+ return (
_is_egg_path(path) and
os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
- )
-
-
-def _set_parent_ns(packageName):
- parts = packageName.split('.')
- name = parts.pop()
- if parts:
- parent = '.'.join(parts)
- setattr(sys.modules[parent], name, sys.modules[packageName])
-
-
-def yield_lines(strs):
- """Yield non-empty/non-comment lines of a string or sequence"""
- if isinstance(strs, six.string_types):
- for s in strs.splitlines():
- s = s.strip()
- # skip blank lines/comments
- if s and not s.startswith('#'):
- yield s
- else:
- for ss in strs:
- for s in yield_lines(ss):
- yield s
-
-
-MODULE = re.compile(r"\w+(\.\w+)*$").match
-EGG_NAME = re.compile(
- r"""
- (?P<name>[^-]+) (
- -(?P<ver>[^-]+) (
- -py(?P<pyver>[^-]+) (
- -(?P<plat>.+)
- )?
- )?
- )?
- """,
- re.VERBOSE | re.IGNORECASE,
-).match
-
-
+ )
+
+
+def _set_parent_ns(packageName):
+ parts = packageName.split('.')
+ name = parts.pop()
+ if parts:
+ parent = '.'.join(parts)
+ setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
+def yield_lines(strs):
+ """Yield non-empty/non-comment lines of a string or sequence"""
+ if isinstance(strs, six.string_types):
+ for s in strs.splitlines():
+ s = s.strip()
+ # skip blank lines/comments
+ if s and not s.startswith('#'):
+ yield s
+ else:
+ for ss in strs:
+ for s in yield_lines(ss):
+ yield s
+
+
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+ r"""
+ (?P<name>[^-]+) (
+ -(?P<ver>[^-]+) (
+ -py(?P<pyver>[^-]+) (
+ -(?P<plat>.+)
+ )?
+ )?
+ )?
+ """,
+ re.VERBOSE | re.IGNORECASE,
+).match
+
+
class EntryPoint:
- """Object representing an advertised importable object"""
-
- def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
- if not MODULE(module_name):
- raise ValueError("Invalid module name", module_name)
- self.name = name
- self.module_name = module_name
- self.attrs = tuple(attrs)
+ """Object representing an advertised importable object"""
+
+ def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
+ if not MODULE(module_name):
+ raise ValueError("Invalid module name", module_name)
+ self.name = name
+ self.module_name = module_name
+ self.attrs = tuple(attrs)
self.extras = tuple(extras)
- self.dist = dist
-
- def __str__(self):
- s = "%s = %s" % (self.name, self.module_name)
- if self.attrs:
- s += ':' + '.'.join(self.attrs)
- if self.extras:
- s += ' [%s]' % ','.join(self.extras)
- return s
-
- def __repr__(self):
- return "EntryPoint.parse(%r)" % str(self)
-
- def load(self, require=True, *args, **kwargs):
- """
- Require packages for this EntryPoint, then resolve it.
- """
- if not require or args or kwargs:
- warnings.warn(
- "Parameters to load are deprecated. Call .resolve and "
- ".require separately.",
+ self.dist = dist
+
+ def __str__(self):
+ s = "%s = %s" % (self.name, self.module_name)
+ if self.attrs:
+ s += ':' + '.'.join(self.attrs)
+ if self.extras:
+ s += ' [%s]' % ','.join(self.extras)
+ return s
+
+ def __repr__(self):
+ return "EntryPoint.parse(%r)" % str(self)
+
+ def load(self, require=True, *args, **kwargs):
+ """
+ Require packages for this EntryPoint, then resolve it.
+ """
+ if not require or args or kwargs:
+ warnings.warn(
+ "Parameters to load are deprecated. Call .resolve and "
+ ".require separately.",
PkgResourcesDeprecationWarning,
- stacklevel=2,
- )
- if require:
- self.require(*args, **kwargs)
- return self.resolve()
-
- def resolve(self):
- """
- Resolve the entry point from its module and attrs.
- """
- module = __import__(self.module_name, fromlist=['__name__'], level=0)
- try:
- return functools.reduce(getattr, self.attrs, module)
- except AttributeError as exc:
- raise ImportError(str(exc))
-
- def require(self, env=None, installer=None):
- if self.extras and not self.dist:
- raise UnknownExtra("Can't require() without a distribution", self)
+ stacklevel=2,
+ )
+ if require:
+ self.require(*args, **kwargs)
+ return self.resolve()
+
+ def resolve(self):
+ """
+ Resolve the entry point from its module and attrs.
+ """
+ module = __import__(self.module_name, fromlist=['__name__'], level=0)
+ try:
+ return functools.reduce(getattr, self.attrs, module)
+ except AttributeError as exc:
+ raise ImportError(str(exc))
+
+ def require(self, env=None, installer=None):
+ if self.extras and not self.dist:
+ raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
- reqs = self.dist.requires(self.extras)
+ reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
- list(map(working_set.add, items))
-
- pattern = re.compile(
- r'\s*'
- r'(?P<name>.+?)\s*'
- r'=\s*'
- r'(?P<module>[\w.]+)\s*'
- r'(:\s*(?P<attr>[\w.]+))?\s*'
- r'(?P<extras>\[.*\])?\s*$'
- )
-
- @classmethod
- def parse(cls, src, dist=None):
- """Parse a single entry point from string `src`
-
- Entry point syntax follows the form::
-
- name = some.module:some.attr [extra1, extra2]
-
- The entry name and module name are required, but the ``:attrs`` and
- ``[extras]`` parts are optional
- """
- m = cls.pattern.match(src)
- if not m:
- msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
- raise ValueError(msg, src)
- res = m.groupdict()
- extras = cls._parse_extras(res['extras'])
- attrs = res['attr'].split('.') if res['attr'] else ()
- return cls(res['name'], res['module'], attrs, extras, dist)
-
- @classmethod
- def _parse_extras(cls, extras_spec):
- if not extras_spec:
- return ()
- req = Requirement.parse('x' + extras_spec)
- if req.specs:
- raise ValueError()
- return req.extras
-
- @classmethod
- def parse_group(cls, group, lines, dist=None):
- """Parse an entry point group"""
- if not MODULE(group):
- raise ValueError("Invalid group name", group)
- this = {}
- for line in yield_lines(lines):
- ep = cls.parse(line, dist)
- if ep.name in this:
- raise ValueError("Duplicate entry point", group, ep.name)
+ list(map(working_set.add, items))
+
+ pattern = re.compile(
+ r'\s*'
+ r'(?P<name>.+?)\s*'
+ r'=\s*'
+ r'(?P<module>[\w.]+)\s*'
+ r'(:\s*(?P<attr>[\w.]+))?\s*'
+ r'(?P<extras>\[.*\])?\s*$'
+ )
+
+ @classmethod
+ def parse(cls, src, dist=None):
+ """Parse a single entry point from string `src`
+
+ Entry point syntax follows the form::
+
+ name = some.module:some.attr [extra1, extra2]
+
+ The entry name and module name are required, but the ``:attrs`` and
+ ``[extras]`` parts are optional
+ """
+ m = cls.pattern.match(src)
+ if not m:
+ msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
+ raise ValueError(msg, src)
+ res = m.groupdict()
+ extras = cls._parse_extras(res['extras'])
+ attrs = res['attr'].split('.') if res['attr'] else ()
+ return cls(res['name'], res['module'], attrs, extras, dist)
+
+ @classmethod
+ def _parse_extras(cls, extras_spec):
+ if not extras_spec:
+ return ()
+ req = Requirement.parse('x' + extras_spec)
+ if req.specs:
+ raise ValueError()
+ return req.extras
+
+ @classmethod
+ def parse_group(cls, group, lines, dist=None):
+ """Parse an entry point group"""
+ if not MODULE(group):
+ raise ValueError("Invalid group name", group)
+ this = {}
+ for line in yield_lines(lines):
+ ep = cls.parse(line, dist)
+ if ep.name in this:
+ raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
- return this
-
- @classmethod
- def parse_map(cls, data, dist=None):
- """Parse a map of entry point groups"""
- if isinstance(data, dict):
- data = data.items()
- else:
- data = split_sections(data)
- maps = {}
- for group, lines in data:
- if group is None:
- if not lines:
- continue
- raise ValueError("Entry points must be listed in groups")
- group = group.strip()
- if group in maps:
- raise ValueError("Duplicate group name", group)
- maps[group] = cls.parse_group(group, lines, dist)
- return maps
-
-
-def _remove_md5_fragment(location):
- if not location:
- return ''
- parsed = urllib.parse.urlparse(location)
- if parsed[-1].startswith('md5='):
- return urllib.parse.urlunparse(parsed[:-1] + ('',))
- return location
-
-
-def _version_from_file(lines):
- """
- Given an iterable of lines from a Metadata file, return
- the value of the Version field, if present, or None otherwise.
- """
+ return this
+
+ @classmethod
+ def parse_map(cls, data, dist=None):
+ """Parse a map of entry point groups"""
+ if isinstance(data, dict):
+ data = data.items()
+ else:
+ data = split_sections(data)
+ maps = {}
+ for group, lines in data:
+ if group is None:
+ if not lines:
+ continue
+ raise ValueError("Entry points must be listed in groups")
+ group = group.strip()
+ if group in maps:
+ raise ValueError("Duplicate group name", group)
+ maps[group] = cls.parse_group(group, lines, dist)
+ return maps
+
+
+def _remove_md5_fragment(location):
+ if not location:
+ return ''
+ parsed = urllib.parse.urlparse(location)
+ if parsed[-1].startswith('md5='):
+ return urllib.parse.urlunparse(parsed[:-1] + ('',))
+ return location
+
+
+def _version_from_file(lines):
+ """
+ Given an iterable of lines from a Metadata file, return
+ the value of the Version field, if present, or None otherwise.
+ """
def is_version_line(line):
return line.lower().startswith('version:')
- version_lines = filter(is_version_line, lines)
- line = next(iter(version_lines), '')
- _, _, value = line.partition(':')
- return safe_version(value.strip()) or None
-
-
+ version_lines = filter(is_version_line, lines)
+ line = next(iter(version_lines), '')
+ _, _, value = line.partition(':')
+ return safe_version(value.strip()) or None
+
+
class Distribution:
- """Wrap an actual or potential sys.path entry w/metadata"""
- PKG_INFO = 'PKG-INFO'
-
+ """Wrap an actual or potential sys.path entry w/metadata"""
+ PKG_INFO = 'PKG-INFO'
+
def __init__(
self, location=None, metadata=None, project_name=None,
- version=None, py_version=PY_MAJOR, platform=None,
- precedence=EGG_DIST):
- self.project_name = safe_name(project_name or 'Unknown')
- if version is not None:
- self._version = safe_version(version)
- self.py_version = py_version
- self.platform = platform
- self.location = location
- self.precedence = precedence
- self._provider = metadata or empty_provider
-
- @classmethod
- def from_location(cls, location, basename, metadata=None, **kw):
+ version=None, py_version=PY_MAJOR, platform=None,
+ precedence=EGG_DIST):
+ self.project_name = safe_name(project_name or 'Unknown')
+ if version is not None:
+ self._version = safe_version(version)
+ self.py_version = py_version
+ self.platform = platform
+ self.location = location
+ self.precedence = precedence
+ self._provider = metadata or empty_provider
+
+ @classmethod
+ def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
- basename, ext = os.path.splitext(basename)
- if ext.lower() in _distributionImpl:
- cls = _distributionImpl[ext.lower()]
-
- match = EGG_NAME(basename)
- if match:
- project_name, version, py_version, platform = match.group(
- 'name', 'ver', 'pyver', 'plat'
- )
- return cls(
- location, metadata, project_name=project_name, version=version,
- py_version=py_version, platform=platform, **kw
- )._reload_version()
-
- def _reload_version(self):
- return self
-
- @property
- def hashcmp(self):
- return (
- self.parsed_version,
- self.precedence,
- self.key,
- _remove_md5_fragment(self.location),
- self.py_version or '',
- self.platform or '',
- )
-
- def __hash__(self):
- return hash(self.hashcmp)
-
- def __lt__(self, other):
- return self.hashcmp < other.hashcmp
-
- def __le__(self, other):
- return self.hashcmp <= other.hashcmp
-
- def __gt__(self, other):
- return self.hashcmp > other.hashcmp
-
- def __ge__(self, other):
- return self.hashcmp >= other.hashcmp
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- # It's not a Distribution, so they are not equal
- return False
- return self.hashcmp == other.hashcmp
-
- def __ne__(self, other):
- return not self == other
-
- # These properties have to be lazy so that we don't have to load any
- # metadata until/unless it's actually needed. (i.e., some distributions
- # may not know their name or version without loading PKG-INFO)
-
- @property
- def key(self):
- try:
- return self._key
- except AttributeError:
- self._key = key = self.project_name.lower()
- return key
-
- @property
- def parsed_version(self):
- if not hasattr(self, "_parsed_version"):
- self._parsed_version = parse_version(self.version)
-
- return self._parsed_version
-
- def _warn_legacy_version(self):
- LV = packaging.version.LegacyVersion
- is_legacy = isinstance(self._parsed_version, LV)
- if not is_legacy:
- return
-
- # While an empty version is technically a legacy version and
- # is not a valid PEP 440 version, it's also unlikely to
- # actually come from someone and instead it is more likely that
- # it comes from setuptools attempting to parse a filename and
- # including it in the list. So for that we'll gate this warning
- # on if the version is anything at all or not.
- if not self.version:
- return
-
- tmpl = textwrap.dedent("""
- '{project_name} ({version})' is being parsed as a legacy,
- non PEP 440,
- version. You may find odd behavior and sort order.
- In particular it will be sorted as less than 0.0. It
- is recommended to migrate to PEP 440 compatible
- versions.
- """).strip().replace('\n', ' ')
-
- warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
-
- @property
- def version(self):
- try:
- return self._version
- except AttributeError:
+ basename, ext = os.path.splitext(basename)
+ if ext.lower() in _distributionImpl:
+ cls = _distributionImpl[ext.lower()]
+
+ match = EGG_NAME(basename)
+ if match:
+ project_name, version, py_version, platform = match.group(
+ 'name', 'ver', 'pyver', 'plat'
+ )
+ return cls(
+ location, metadata, project_name=project_name, version=version,
+ py_version=py_version, platform=platform, **kw
+ )._reload_version()
+
+ def _reload_version(self):
+ return self
+
+ @property
+ def hashcmp(self):
+ return (
+ self.parsed_version,
+ self.precedence,
+ self.key,
+ _remove_md5_fragment(self.location),
+ self.py_version or '',
+ self.platform or '',
+ )
+
+ def __hash__(self):
+ return hash(self.hashcmp)
+
+ def __lt__(self, other):
+ return self.hashcmp < other.hashcmp
+
+ def __le__(self, other):
+ return self.hashcmp <= other.hashcmp
+
+ def __gt__(self, other):
+ return self.hashcmp > other.hashcmp
+
+ def __ge__(self, other):
+ return self.hashcmp >= other.hashcmp
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ # It's not a Distribution, so they are not equal
+ return False
+ return self.hashcmp == other.hashcmp
+
+ def __ne__(self, other):
+ return not self == other
+
+ # These properties have to be lazy so that we don't have to load any
+ # metadata until/unless it's actually needed. (i.e., some distributions
+ # may not know their name or version without loading PKG-INFO)
+
+ @property
+ def key(self):
+ try:
+ return self._key
+ except AttributeError:
+ self._key = key = self.project_name.lower()
+ return key
+
+ @property
+ def parsed_version(self):
+ if not hasattr(self, "_parsed_version"):
+ self._parsed_version = parse_version(self.version)
+
+ return self._parsed_version
+
+ def _warn_legacy_version(self):
+ LV = packaging.version.LegacyVersion
+ is_legacy = isinstance(self._parsed_version, LV)
+ if not is_legacy:
+ return
+
+ # While an empty version is technically a legacy version and
+ # is not a valid PEP 440 version, it's also unlikely to
+ # actually come from someone and instead it is more likely that
+ # it comes from setuptools attempting to parse a filename and
+ # including it in the list. So for that we'll gate this warning
+ # on if the version is anything at all or not.
+ if not self.version:
+ return
+
+ tmpl = textwrap.dedent("""
+ '{project_name} ({version})' is being parsed as a legacy,
+ non PEP 440,
+ version. You may find odd behavior and sort order.
+ In particular it will be sorted as less than 0.0. It
+ is recommended to migrate to PEP 440 compatible
+ versions.
+ """).strip().replace('\n', ' ')
+
+ warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
+
+ @property
+ def version(self):
+ try:
+ return self._version
+ except AttributeError:
version = self._get_version()
- if version is None:
+ if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = (
"Missing 'Version:' header and/or {} file at path: {}"
).format(self.PKG_INFO, path)
raise ValueError(msg, self)
- return version
-
- @property
- def _dep_map(self):
+ return version
+
+ @property
+ def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
- try:
- return self.__dep_map
- except AttributeError:
+ try:
+ return self.__dep_map
+ except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
-
+
@staticmethod
def _filter_extras(dm):
"""
@@ -2731,20 +2731,20 @@ class Distribution:
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
- def requires(self, extras=()):
- """List of Requirements needed for this distro if `extras` are used"""
- dm = self._dep_map
- deps = []
- deps.extend(dm.get(None, ()))
- for ext in extras:
- try:
- deps.extend(dm[safe_extra(ext)])
- except KeyError:
- raise UnknownExtra(
- "%s has no such extra feature %r" % (self, ext)
- )
- return deps
-
+ def requires(self, extras=()):
+ """List of Requirements needed for this distro if `extras` are used"""
+ dm = self._dep_map
+ deps = []
+ deps.extend(dm.get(None, ()))
+ for ext in extras:
+ try:
+ deps.extend(dm[safe_extra(ext)])
+ except KeyError:
+ raise UnknownExtra(
+ "%s has no such extra feature %r" % (self, ext)
+ )
+ return deps
+
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
@@ -2762,11 +2762,11 @@ class Distribution:
return path
- def _get_metadata(self, name):
- if self.has_metadata(name):
- for line in self.get_metadata_lines(name):
- yield line
-
+ def _get_metadata(self, name):
+ if self.has_metadata(name):
+ for line in self.get_metadata_lines(name):
+ yield line
+
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
@@ -2774,47 +2774,47 @@ class Distribution:
return version
def activate(self, path=None, replace=False):
- """Ensure distribution is importable on `path` (default=sys.path)"""
- if path is None:
- path = sys.path
+ """Ensure distribution is importable on `path` (default=sys.path)"""
+ if path is None:
+ path = sys.path
self.insert_on(path, replace=replace)
- if path is sys.path:
- fixup_namespace_packages(self.location)
- for pkg in self._get_metadata('namespace_packages.txt'):
- if pkg in sys.modules:
- declare_namespace(pkg)
-
- def egg_name(self):
- """Return what this distribution's standard .egg filename should be"""
- filename = "%s-%s-py%s" % (
- to_filename(self.project_name), to_filename(self.version),
- self.py_version or PY_MAJOR
- )
-
- if self.platform:
- filename += '-' + self.platform
- return filename
-
- def __repr__(self):
- if self.location:
- return "%s (%s)" % (self, self.location)
- else:
- return str(self)
-
- def __str__(self):
- try:
- version = getattr(self, 'version', None)
- except ValueError:
- version = None
- version = version or "[unknown version]"
- return "%s %s" % (self.project_name, version)
-
- def __getattr__(self, attr):
- """Delegate all unrecognized public attributes to .metadata provider"""
- if attr.startswith('_'):
- raise AttributeError(attr)
- return getattr(self._provider, attr)
-
+ if path is sys.path:
+ fixup_namespace_packages(self.location)
+ for pkg in self._get_metadata('namespace_packages.txt'):
+ if pkg in sys.modules:
+ declare_namespace(pkg)
+
+ def egg_name(self):
+ """Return what this distribution's standard .egg filename should be"""
+ filename = "%s-%s-py%s" % (
+ to_filename(self.project_name), to_filename(self.version),
+ self.py_version or PY_MAJOR
+ )
+
+ if self.platform:
+ filename += '-' + self.platform
+ return filename
+
+ def __repr__(self):
+ if self.location:
+ return "%s (%s)" % (self, self.location)
+ else:
+ return str(self)
+
+ def __str__(self):
+ try:
+ version = getattr(self, 'version', None)
+ except ValueError:
+ version = None
+ version = version or "[unknown version]"
+ return "%s %s" % (self.project_name, version)
+
+ def __getattr__(self, attr):
+ """Delegate all unrecognized public attributes to .metadata provider"""
+ if attr.startswith('_'):
+ raise AttributeError(attr)
+ return getattr(self._provider, attr)
+
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
@@ -2828,48 +2828,48 @@ class Distribution:
# python 2.7 not supported
del __dir__
- @classmethod
- def from_filename(cls, filename, metadata=None, **kw):
- return cls.from_location(
- _normalize_cached(filename), os.path.basename(filename), metadata,
- **kw
- )
-
- def as_requirement(self):
- """Return a ``Requirement`` that matches this distribution exactly"""
- if isinstance(self.parsed_version, packaging.version.Version):
- spec = "%s==%s" % (self.project_name, self.parsed_version)
- else:
- spec = "%s===%s" % (self.project_name, self.parsed_version)
-
- return Requirement.parse(spec)
-
- def load_entry_point(self, group, name):
- """Return the `name` entry point of `group` or raise ImportError"""
- ep = self.get_entry_info(group, name)
- if ep is None:
- raise ImportError("Entry point %r not found" % ((group, name),))
- return ep.load()
-
- def get_entry_map(self, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- try:
- ep_map = self._ep_map
- except AttributeError:
- ep_map = self._ep_map = EntryPoint.parse_map(
- self._get_metadata('entry_points.txt'), self
- )
- if group is not None:
+ @classmethod
+ def from_filename(cls, filename, metadata=None, **kw):
+ return cls.from_location(
+ _normalize_cached(filename), os.path.basename(filename), metadata,
+ **kw
+ )
+
+ def as_requirement(self):
+ """Return a ``Requirement`` that matches this distribution exactly"""
+ if isinstance(self.parsed_version, packaging.version.Version):
+ spec = "%s==%s" % (self.project_name, self.parsed_version)
+ else:
+ spec = "%s===%s" % (self.project_name, self.parsed_version)
+
+ return Requirement.parse(spec)
+
+ def load_entry_point(self, group, name):
+ """Return the `name` entry point of `group` or raise ImportError"""
+ ep = self.get_entry_info(group, name)
+ if ep is None:
+ raise ImportError("Entry point %r not found" % ((group, name),))
+ return ep.load()
+
+ def get_entry_map(self, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ try:
+ ep_map = self._ep_map
+ except AttributeError:
+ ep_map = self._ep_map = EntryPoint.parse_map(
+ self._get_metadata('entry_points.txt'), self
+ )
+ if group is not None:
return ep_map.get(group, {})
- return ep_map
-
- def get_entry_info(self, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return self.get_entry_map(group).get(name)
-
- def insert_on(self, path, loc=None, replace=False):
+ return ep_map
+
+ def get_entry_info(self, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return self.get_entry_map(group).get(name)
+
+ def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
-
+
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
@@ -2887,200 +2887,200 @@ class Distribution:
- Else: add it to the front of path.
"""
- loc = loc or self.location
- if not loc:
- return
-
- nloc = _normalize_cached(loc)
- bdir = os.path.dirname(nloc)
+ loc = loc or self.location
+ if not loc:
+ return
+
+ nloc = _normalize_cached(loc)
+ bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
-
- for p, item in enumerate(npath):
- if item == nloc:
+
+ for p, item in enumerate(npath):
+ if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
- elif item == bdir and self.precedence == EGG_DIST:
- # if it's an .egg, give it precedence over its directory
+ elif item == bdir and self.precedence == EGG_DIST:
+ # if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
- if path is sys.path:
- self.check_version_conflict()
- path.insert(p, loc)
- npath.insert(p, nloc)
- break
- else:
- if path is sys.path:
- self.check_version_conflict()
- if replace:
- path.insert(0, loc)
- else:
- path.append(loc)
- return
-
- # p is the spot where we found or inserted loc; now remove duplicates
- while True:
- try:
+ if path is sys.path:
+ self.check_version_conflict()
+ path.insert(p, loc)
+ npath.insert(p, nloc)
+ break
+ else:
+ if path is sys.path:
+ self.check_version_conflict()
+ if replace:
+ path.insert(0, loc)
+ else:
+ path.append(loc)
+ return
+
+ # p is the spot where we found or inserted loc; now remove duplicates
+ while True:
+ try:
np = npath.index(nloc, p + 1)
- except ValueError:
- break
- else:
- del npath[np], path[np]
- # ha!
- p = np
-
- return
-
- def check_version_conflict(self):
- if self.key == 'setuptools':
- # ignore the inevitable setuptools self-conflicts :(
- return
-
- nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
- loc = normalize_path(self.location)
- for modname in self._get_metadata('top_level.txt'):
- if (modname not in sys.modules or modname in nsp
- or modname in _namespace_packages):
- continue
- if modname in ('pkg_resources', 'setuptools', 'site'):
- continue
- fn = getattr(sys.modules[modname], '__file__', None)
- if fn and (normalize_path(fn).startswith(loc) or
- fn.startswith(self.location)):
- continue
- issue_warning(
- "Module %s was already imported from %s, but %s is being added"
- " to sys.path" % (modname, fn, self.location),
- )
-
- def has_version(self):
- try:
- self.version
- except ValueError:
- issue_warning("Unbuilt egg for " + repr(self))
- return False
- return True
-
+ except ValueError:
+ break
+ else:
+ del npath[np], path[np]
+ # ha!
+ p = np
+
+ return
+
+ def check_version_conflict(self):
+ if self.key == 'setuptools':
+ # ignore the inevitable setuptools self-conflicts :(
+ return
+
+ nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+ loc = normalize_path(self.location)
+ for modname in self._get_metadata('top_level.txt'):
+ if (modname not in sys.modules or modname in nsp
+ or modname in _namespace_packages):
+ continue
+ if modname in ('pkg_resources', 'setuptools', 'site'):
+ continue
+ fn = getattr(sys.modules[modname], '__file__', None)
+ if fn and (normalize_path(fn).startswith(loc) or
+ fn.startswith(self.location)):
+ continue
+ issue_warning(
+ "Module %s was already imported from %s, but %s is being added"
+ " to sys.path" % (modname, fn, self.location),
+ )
+
+ def has_version(self):
+ try:
+ self.version
+ except ValueError:
+ issue_warning("Unbuilt egg for " + repr(self))
+ return False
+ return True
+
def clone(self, **kw):
- """Copy this distribution, substituting in any changed keyword args"""
- names = 'project_name version py_version platform location precedence'
- for attr in names.split():
- kw.setdefault(attr, getattr(self, attr, None))
- kw.setdefault('metadata', self._provider)
- return self.__class__(**kw)
-
- @property
- def extras(self):
- return [dep for dep in self._dep_map if dep]
-
-
-class EggInfoDistribution(Distribution):
- def _reload_version(self):
- """
- Packages installed by distutils (e.g. numpy or scipy),
- which uses an old safe_version, and so
- their version numbers can get mangled when
- converted to filenames (e.g., 1.11.0.dev0+2329eae to
- 1.11.0.dev0_2329eae). These distributions will not be
- parsed properly
- downstream by Distribution and safe_version, so
- take an extra step and try to get the version number from
- the metadata file itself instead of the filename.
- """
+ """Copy this distribution, substituting in any changed keyword args"""
+ names = 'project_name version py_version platform location precedence'
+ for attr in names.split():
+ kw.setdefault(attr, getattr(self, attr, None))
+ kw.setdefault('metadata', self._provider)
+ return self.__class__(**kw)
+
+ @property
+ def extras(self):
+ return [dep for dep in self._dep_map if dep]
+
+
+class EggInfoDistribution(Distribution):
+ def _reload_version(self):
+ """
+ Packages installed by distutils (e.g. numpy or scipy),
+ which uses an old safe_version, and so
+ their version numbers can get mangled when
+ converted to filenames (e.g., 1.11.0.dev0+2329eae to
+ 1.11.0.dev0_2329eae). These distributions will not be
+ parsed properly
+ downstream by Distribution and safe_version, so
+ take an extra step and try to get the version number from
+ the metadata file itself instead of the filename.
+ """
md_version = self._get_version()
- if md_version:
- self._version = md_version
- return self
-
-
-class DistInfoDistribution(Distribution):
+ if md_version:
+ self._version = md_version
+ return self
+
+
+class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
- PKG_INFO = 'METADATA'
- EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
-
- @property
- def _parsed_pkg_info(self):
- """Parse and cache metadata"""
- try:
- return self._pkg_info
- except AttributeError:
- metadata = self.get_metadata(self.PKG_INFO)
- self._pkg_info = email.parser.Parser().parsestr(metadata)
- return self._pkg_info
-
- @property
- def _dep_map(self):
- try:
- return self.__dep_map
- except AttributeError:
- self.__dep_map = self._compute_dependencies()
- return self.__dep_map
-
- def _compute_dependencies(self):
- """Recompute this distribution's dependencies."""
- dm = self.__dep_map = {None: []}
-
- reqs = []
- # Including any condition expressions
- for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
+ PKG_INFO = 'METADATA'
+ EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
+
+ @property
+ def _parsed_pkg_info(self):
+ """Parse and cache metadata"""
+ try:
+ return self._pkg_info
+ except AttributeError:
+ metadata = self.get_metadata(self.PKG_INFO)
+ self._pkg_info = email.parser.Parser().parsestr(metadata)
+ return self._pkg_info
+
+ @property
+ def _dep_map(self):
+ try:
+ return self.__dep_map
+ except AttributeError:
+ self.__dep_map = self._compute_dependencies()
+ return self.__dep_map
+
+ def _compute_dependencies(self):
+ """Recompute this distribution's dependencies."""
+ dm = self.__dep_map = {None: []}
+
+ reqs = []
+ # Including any condition expressions
+ for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
-
- def reqs_for_extra(extra):
- for req in reqs:
+
+ def reqs_for_extra(extra):
+ for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
- yield req
-
- common = frozenset(reqs_for_extra(None))
- dm[None].extend(common)
-
- for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
+ yield req
+
+ common = frozenset(reqs_for_extra(None))
+ dm[None].extend(common)
+
+ for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
-
- return dm
-
-
-_distributionImpl = {
- '.egg': Distribution,
- '.egg-info': EggInfoDistribution,
- '.dist-info': DistInfoDistribution,
+
+ return dm
+
+
+_distributionImpl = {
+ '.egg': Distribution,
+ '.egg-info': EggInfoDistribution,
+ '.dist-info': DistInfoDistribution,
}
-
-
+
+
def issue_warning(*args, **kw):
- level = 1
- g = globals()
- try:
- # find the first stack frame that is *not* code in
- # the pkg_resources module, to use for the warning
- while sys._getframe(level).f_globals is g:
- level += 1
- except ValueError:
- pass
- warnings.warn(stacklevel=level + 1, *args, **kw)
-
-
-class RequirementParseError(ValueError):
- def __str__(self):
- return ' '.join(self.args)
-
-
-def parse_requirements(strs):
- """Yield ``Requirement`` objects for each specification in `strs`
-
- `strs` must be a string, or a (possibly-nested) iterable thereof.
- """
- # create a steppable iterator, so we can handle \-continuations
- lines = iter(yield_lines(strs))
-
- for line in lines:
+ level = 1
+ g = globals()
+ try:
+ # find the first stack frame that is *not* code in
+ # the pkg_resources module, to use for the warning
+ while sys._getframe(level).f_globals is g:
+ level += 1
+ except ValueError:
+ pass
+ warnings.warn(stacklevel=level + 1, *args, **kw)
+
+
+class RequirementParseError(ValueError):
+ def __str__(self):
+ return ' '.join(self.args)
+
+
+def parse_requirements(strs):
+ """Yield ``Requirement`` objects for each specification in `strs`
+
+ `strs` must be a string, or a (possibly-nested) iterable thereof.
+ """
+ # create a steppable iterator, so we can handle \-continuations
+ lines = iter(yield_lines(strs))
+
+ for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
@@ -3092,63 +3092,63 @@ def parse_requirements(strs):
except StopIteration:
return
yield Requirement(line)
-
-
+
+
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
- """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+ """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
- self.project_name, self.key = project_name, project_name.lower()
+ self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
- self.hashCmp = (
- self.key,
+ self.hashCmp = (
+ self.key,
self.url,
- self.specifier,
- frozenset(self.extras),
+ self.specifier,
+ frozenset(self.extras),
str(self.marker) if self.marker else None,
- )
- self.__hash = hash(self.hashCmp)
-
- def __eq__(self, other):
- return (
- isinstance(other, Requirement) and
- self.hashCmp == other.hashCmp
- )
-
- def __ne__(self, other):
- return not self == other
-
- def __contains__(self, item):
- if isinstance(item, Distribution):
- if item.key != self.key:
- return False
-
- item = item.version
-
- # Allow prereleases always in order to match the previous behavior of
- # this method. In the future this should be smarter and follow PEP 440
- # more accurately.
- return self.specifier.contains(item, prereleases=True)
-
- def __hash__(self):
- return self.__hash
-
+ )
+ self.__hash = hash(self.hashCmp)
+
+ def __eq__(self, other):
+ return (
+ isinstance(other, Requirement) and
+ self.hashCmp == other.hashCmp
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __contains__(self, item):
+ if isinstance(item, Distribution):
+ if item.key != self.key:
+ return False
+
+ item = item.version
+
+ # Allow prereleases always in order to match the previous behavior of
+ # this method. In the future this should be smarter and follow PEP 440
+ # more accurately.
+ return self.specifier.contains(item, prereleases=True)
+
+ def __hash__(self):
+ return self.__hash
+
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
-
- @staticmethod
- def parse(s):
- req, = parse_requirements(s)
- return req
-
-
+
+ @staticmethod
+ def parse(s):
+ req, = parse_requirements(s)
+ return req
+
+
def _always_object(classes):
"""
Ensure object appears in the mro even
@@ -3159,70 +3159,70 @@ def _always_object(classes):
return classes
-def _find_adapter(registry, ob):
- """Return an adapter factory for `ob` from `registry`"""
+def _find_adapter(registry, ob):
+ """Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
- if t in registry:
- return registry[t]
-
-
-def ensure_directory(path):
- """Ensure that the parent directory of `path` exists"""
- dirname = os.path.dirname(path)
+ if t in registry:
+ return registry[t]
+
+
+def ensure_directory(path):
+ """Ensure that the parent directory of `path` exists"""
+ dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True)
-
-
-def _bypass_ensure_directory(path):
- """Sandbox-bypassing version of ensure_directory()"""
- if not WRITE_SUPPORT:
- raise IOError('"os.mkdir" not supported on this platform.')
- dirname, filename = split(path)
- if dirname and filename and not isdir(dirname):
- _bypass_ensure_directory(dirname)
+
+
+def _bypass_ensure_directory(path):
+ """Sandbox-bypassing version of ensure_directory()"""
+ if not WRITE_SUPPORT:
+ raise IOError('"os.mkdir" not supported on this platform.')
+ dirname, filename = split(path)
+ if dirname and filename and not isdir(dirname):
+ _bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
-
-
-def split_sections(s):
- """Split a string or iterable thereof into (section, content) pairs
-
- Each ``section`` is a stripped version of the section header ("[section]")
- and each ``content`` is a list of stripped lines excluding blank lines and
- comment-only lines. If there are any such lines before the first section
- header, they're returned in a first ``section`` of ``None``.
- """
- section = None
- content = []
- for line in yield_lines(s):
- if line.startswith("["):
- if line.endswith("]"):
- if section or content:
- yield section, content
- section = line[1:-1].strip()
- content = []
- else:
- raise ValueError("Invalid section heading", line)
- else:
- content.append(line)
-
- # wrap up last segment
- yield section, content
-
+
+
+def split_sections(s):
+ """Split a string or iterable thereof into (section, content) pairs
+
+ Each ``section`` is a stripped version of the section header ("[section]")
+ and each ``content`` is a list of stripped lines excluding blank lines and
+ comment-only lines. If there are any such lines before the first section
+ header, they're returned in a first ``section`` of ``None``.
+ """
+ section = None
+ content = []
+ for line in yield_lines(s):
+ if line.startswith("["):
+ if line.endswith("]"):
+ if section or content:
+ yield section, content
+ section = line[1:-1].strip()
+ content = []
+ else:
+ raise ValueError("Invalid section heading", line)
+ else:
+ content.append(line)
+
+ # wrap up last segment
+ yield section, content
+
def _mkstemp(*args, **kw):
- old_open = os.open
- try:
- # temporarily bypass sandboxing
- os.open = os_open
+ old_open = os.open
+ try:
+ # temporarily bypass sandboxing
+ os.open = os_open
return tempfile.mkstemp(*args, **kw)
- finally:
- # and then put it back
- os.open = old_open
-
-
+ finally:
+ # and then put it back
+ os.open = old_open
+
+
# Yandex resource support
from __res import Y_PYTHON_SOURCE_ROOT, ResourceImporter, executable
from library.python import resource
@@ -3311,53 +3311,53 @@ register_finder(ResourceImporter, find_in_res)
register_loader_type(ResourceImporter, ResProvider.from_module)
-# Silence the PEP440Warning by default, so that end users don't get hit by it
-# randomly just because they use pkg_resources. We want to append the rule
-# because we want earlier uses of filterwarnings to take precedence over this
-# one.
-warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
-
-
-# from jaraco.functools 1.3
-def _call_aside(f, *args, **kwargs):
- f(*args, **kwargs)
- return f
-
-
-@_call_aside
-def _initialize(g=globals()):
- "Set up global resource manager (deliberately not state-saved)"
- manager = ResourceManager()
- g['_manager'] = manager
+# Silence the PEP440Warning by default, so that end users don't get hit by it
+# randomly just because they use pkg_resources. We want to append the rule
+# because we want earlier uses of filterwarnings to take precedence over this
+# one.
+warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
+
+
+# from jaraco.functools 1.3
+def _call_aside(f, *args, **kwargs):
+ f(*args, **kwargs)
+ return f
+
+
+@_call_aside
+def _initialize(g=globals()):
+ "Set up global resource manager (deliberately not state-saved)"
+ manager = ResourceManager()
+ g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
-
-
-@_call_aside
-def _initialize_master_working_set():
- """
- Prepare the master working set and make the ``require()``
- API available.
-
- This function has explicit effects on the global state
- of pkg_resources. It is intended to be invoked once at
- the initialization of this module.
-
- Invocation by other packages is unsupported and done
- at their own risk.
- """
- working_set = WorkingSet._build_master()
- _declare_state('object', working_set=working_set)
-
- require = working_set.require
- iter_entry_points = working_set.iter_entry_points
- add_activation_listener = working_set.subscribe
- run_script = working_set.run_script
- # backward compatibility
- run_main = run_script
+
+
+@_call_aside
+def _initialize_master_working_set():
+ """
+ Prepare the master working set and make the ``require()``
+ API available.
+
+ This function has explicit effects on the global state
+ of pkg_resources. It is intended to be invoked once at
+ the initialization of this module.
+
+ Invocation by other packages is unsupported and done
+ at their own risk.
+ """
+ working_set = WorkingSet._build_master()
+ _declare_state('object', working_set=working_set)
+
+ require = working_set.require
+ iter_entry_points = working_set.iter_entry_points
+ add_activation_listener = working_set.subscribe
+ run_script = working_set.run_script
+ # backward compatibility
+ run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
@@ -3371,9 +3371,9 @@ def _initialize_master_working_set():
existing=False,
)
working_set.entries = []
- # match order
- list(map(working_set.add_entry, sys.path))
- globals().update(locals())
+ # match order
+ list(map(working_set.add_entry, sys.path))
+ globals().update(locals())
class PkgResourcesDeprecationWarning(Warning):
"""
diff --git a/contrib/python/setuptools/py2/setuptools/__init__.py b/contrib/python/setuptools/py2/setuptools/__init__.py
index baa1ea0883..9d8ae1ed5f 100644
--- a/contrib/python/setuptools/py2/setuptools/__init__.py
+++ b/contrib/python/setuptools/py2/setuptools/__init__.py
@@ -1,93 +1,93 @@
-"""Extensions to the 'distutils' for large or complex distributions"""
-
-import os
+"""Extensions to the 'distutils' for large or complex distributions"""
+
+import os
import sys
-import functools
-import distutils.core
-import distutils.filelist
+import functools
+import distutils.core
+import distutils.filelist
import re
from distutils.errors import DistutilsOptionError
-from distutils.util import convert_path
-from fnmatch import fnmatchcase
-
+from distutils.util import convert_path
+from fnmatch import fnmatchcase
+
from ._deprecation_warning import SetuptoolsDeprecationWarning
-
+
from setuptools.extern.six import PY3, string_types
from setuptools.extern.six.moves import filter, map
-import setuptools.version
-from setuptools.extension import Extension
+import setuptools.version
+from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature
-from setuptools.depends import Require
+from setuptools.depends import Require
from . import monkey
-
+
__metaclass__ = type
-__all__ = [
- 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
+__all__ = [
+ 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'SetuptoolsDeprecationWarning',
'find_packages'
-]
-
+]
+
if PY3:
__all__.append('find_namespace_packages')
-__version__ = setuptools.version.__version__
-
-bootstrap_install_from = None
-
-# If we run 2to3 on .py files, should we also convert docstrings?
-# Default: yes; assume that we can detect doctests reliably
-run_2to3_on_doctests = True
-# Standard package names for fixer packages
-lib2to3_fixer_packages = ['lib2to3.fixes']
-
-
+__version__ = setuptools.version.__version__
+
+bootstrap_install_from = None
+
+# If we run 2to3 on .py files, should we also convert docstrings?
+# Default: yes; assume that we can detect doctests reliably
+run_2to3_on_doctests = True
+# Standard package names for fixer packages
+lib2to3_fixer_packages = ['lib2to3.fixes']
+
+
class PackageFinder:
"""
Generate a list of all Python packages found within a directory
"""
- @classmethod
- def find(cls, where='.', exclude=(), include=('*',)):
- """Return a list all Python packages found within directory 'where'
-
+ @classmethod
+ def find(cls, where='.', exclude=(), include=('*',)):
+ """Return a list all Python packages found within directory 'where'
+
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
- 'exclude' is a sequence of package names to exclude; '*' can be used
- as a wildcard in the names, such that 'foo.*' will exclude all
- subpackages of 'foo' (but not 'foo' itself).
-
- 'include' is a sequence of package names to include. If it's
- specified, only the named packages will be included. If it's not
- specified, all found packages will be included. 'include' can contain
- shell style wildcard patterns just like 'exclude'.
- """
-
+ 'exclude' is a sequence of package names to exclude; '*' can be used
+ as a wildcard in the names, such that 'foo.*' will exclude all
+ subpackages of 'foo' (but not 'foo' itself).
+
+ 'include' is a sequence of package names to include. If it's
+ specified, only the named packages will be included. If it's not
+ specified, all found packages will be included. 'include' can contain
+ shell style wildcard patterns just like 'exclude'.
+ """
+
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
-
+
@classmethod
def _find_packages_iter(cls, where, exclude, include):
- """
+ """
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
- """
+ """
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
-
+
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
-
+
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
@@ -100,31 +100,31 @@ class PackageFinder:
# down there, even if the parent was excluded.
dirs.append(dir)
- @staticmethod
- def _looks_like_package(path):
+ @staticmethod
+ def _looks_like_package(path):
"""Does a directory look like a package?"""
- return os.path.isfile(os.path.join(path, '__init__.py'))
-
- @staticmethod
- def _build_filter(*patterns):
- """
- Given a list of patterns, return a callable that will be true only if
+ return os.path.isfile(os.path.join(path, '__init__.py'))
+
+ @staticmethod
+ def _build_filter(*patterns):
+ """
+ Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
- """
- return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
-
-
-class PEP420PackageFinder(PackageFinder):
- @staticmethod
- def _looks_like_package(path):
- return True
-
-
-find_packages = PackageFinder.find
-
+ """
+ return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
+
+
+class PEP420PackageFinder(PackageFinder):
+ @staticmethod
+ def _looks_like_package(path):
+ return True
+
+
+find_packages = PackageFinder.find
+
if PY3:
find_namespace_packages = PEP420PackageFinder.find
-
+
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
@@ -165,21 +165,21 @@ setup.__doc__ = distutils.core.setup.__doc__
_Command = monkey.get_unpatched(distutils.core.Command)
-
-
-class Command(_Command):
- __doc__ = _Command.__doc__
-
- command_consumes_arguments = False
-
- def __init__(self, dist, **kw):
- """
- Construct the command for dist, updating
- vars(self) with any keyword parameters.
- """
- _Command.__init__(self, dist)
- vars(self).update(kw)
-
+
+
+class Command(_Command):
+ __doc__ = _Command.__doc__
+
+ command_consumes_arguments = False
+
+ def __init__(self, dist, **kw):
+ """
+ Construct the command for dist, updating
+ vars(self) with any keyword parameters.
+ """
+ _Command.__init__(self, dist)
+ vars(self).update(kw)
+
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
@@ -211,35 +211,35 @@ class Command(_Command):
"'%s' must be a list of strings (got %r)"
% (option, val))
- def reinitialize_command(self, command, reinit_subcommands=0, **kw):
- cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
- vars(cmd).update(kw)
- return cmd
-
-
-def _find_all_simple(path):
- """
- Find all files under 'path'
- """
- results = (
- os.path.join(base, file)
- for base, dirs, files in os.walk(path, followlinks=True)
- for file in files
- )
- return filter(os.path.isfile, results)
-
-
-def findall(dir=os.curdir):
- """
- Find all files under 'dir' and return the list of full filenames.
- Unless dir is '.', return full filenames with dir prepended.
- """
- files = _find_all_simple(dir)
- if dir == os.curdir:
- make_rel = functools.partial(os.path.relpath, start=dir)
- files = map(make_rel, files)
- return list(files)
-
-
+ def reinitialize_command(self, command, reinit_subcommands=0, **kw):
+ cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
+ vars(cmd).update(kw)
+ return cmd
+
+
+def _find_all_simple(path):
+ """
+ Find all files under 'path'
+ """
+ results = (
+ os.path.join(base, file)
+ for base, dirs, files in os.walk(path, followlinks=True)
+ for file in files
+ )
+ return filter(os.path.isfile, results)
+
+
+def findall(dir=os.curdir):
+ """
+ Find all files under 'dir' and return the list of full filenames.
+ Unless dir is '.', return full filenames with dir prepended.
+ """
+ files = _find_all_simple(dir)
+ if dir == os.curdir:
+ make_rel = functools.partial(os.path.relpath, start=dir)
+ files = map(make_rel, files)
+ return list(files)
+
+
# Apply monkey patches
monkey.patch_all()
diff --git a/contrib/python/setuptools/py2/setuptools/archive_util.py b/contrib/python/setuptools/py2/setuptools/archive_util.py
index 82bb09a37b..81436044d9 100644
--- a/contrib/python/setuptools/py2/setuptools/archive_util.py
+++ b/contrib/python/setuptools/py2/setuptools/archive_util.py
@@ -1,13 +1,13 @@
-"""Utilities for extracting common archive formats"""
-
-import zipfile
-import tarfile
-import os
-import shutil
-import posixpath
-import contextlib
-from distutils.errors import DistutilsError
-
+"""Utilities for extracting common archive formats"""
+
+import zipfile
+import tarfile
+import os
+import shutil
+import posixpath
+import contextlib
+from distutils.errors import DistutilsError
+
from pkg_resources import ensure_directory
__all__ = [
@@ -16,158 +16,158 @@ __all__ = [
]
-class UnrecognizedFormat(DistutilsError):
- """Couldn't recognize the archive type"""
-
+class UnrecognizedFormat(DistutilsError):
+ """Couldn't recognize the archive type"""
+
def default_filter(src, dst):
- """The default progress/filter callback; returns True for all files"""
- return dst
-
-
-def unpack_archive(filename, extract_dir, progress_filter=default_filter,
- drivers=None):
- """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
-
- `progress_filter` is a function taking two arguments: a source path
- internal to the archive ('/'-separated), and a filesystem path where it
- will be extracted. The callback must return the desired extract path
- (which may be the same as the one passed in), or else ``None`` to skip
- that file or directory. The callback can thus be used to report on the
- progress of the extraction, as well as to filter the items extracted or
- alter their extraction paths.
-
- `drivers`, if supplied, must be a non-empty sequence of functions with the
- same signature as this function (minus the `drivers` argument), that raise
- ``UnrecognizedFormat`` if they do not support extracting the designated
- archive type. The `drivers` are tried in sequence until one is found that
- does not raise an error, or until all are exhausted (in which case
- ``UnrecognizedFormat`` is raised). If you do not supply a sequence of
- drivers, the module's ``extraction_drivers`` constant will be used, which
- means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
- order.
- """
- for driver in drivers or extraction_drivers:
- try:
- driver(filename, extract_dir, progress_filter)
- except UnrecognizedFormat:
- continue
- else:
- return
- else:
- raise UnrecognizedFormat(
- "Not a recognized archive type: %s" % filename
- )
-
-
-def unpack_directory(filename, extract_dir, progress_filter=default_filter):
- """"Unpack" a directory, using the same interface as for archives
-
- Raises ``UnrecognizedFormat`` if `filename` is not a directory
- """
- if not os.path.isdir(filename):
- raise UnrecognizedFormat("%s is not a directory" % filename)
-
- paths = {
- filename: ('', extract_dir),
- }
- for base, dirs, files in os.walk(filename):
- src, dst = paths[base]
- for d in dirs:
- paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
- for f in files:
- target = os.path.join(dst, f)
- target = progress_filter(src + f, target)
- if not target:
- # skip non-files
- continue
- ensure_directory(target)
- f = os.path.join(base, f)
- shutil.copyfile(f, target)
- shutil.copystat(f, target)
-
-
-def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
- """Unpack zip `filename` to `extract_dir`
-
- Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
- by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
- of the `progress_filter` argument.
- """
-
- if not zipfile.is_zipfile(filename):
- raise UnrecognizedFormat("%s is not a zip file" % (filename,))
-
+ """The default progress/filter callback; returns True for all files"""
+ return dst
+
+
+def unpack_archive(filename, extract_dir, progress_filter=default_filter,
+ drivers=None):
+ """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
+
+ `progress_filter` is a function taking two arguments: a source path
+ internal to the archive ('/'-separated), and a filesystem path where it
+ will be extracted. The callback must return the desired extract path
+ (which may be the same as the one passed in), or else ``None`` to skip
+ that file or directory. The callback can thus be used to report on the
+ progress of the extraction, as well as to filter the items extracted or
+ alter their extraction paths.
+
+ `drivers`, if supplied, must be a non-empty sequence of functions with the
+ same signature as this function (minus the `drivers` argument), that raise
+ ``UnrecognizedFormat`` if they do not support extracting the designated
+ archive type. The `drivers` are tried in sequence until one is found that
+ does not raise an error, or until all are exhausted (in which case
+ ``UnrecognizedFormat`` is raised). If you do not supply a sequence of
+ drivers, the module's ``extraction_drivers`` constant will be used, which
+ means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
+ order.
+ """
+ for driver in drivers or extraction_drivers:
+ try:
+ driver(filename, extract_dir, progress_filter)
+ except UnrecognizedFormat:
+ continue
+ else:
+ return
+ else:
+ raise UnrecognizedFormat(
+ "Not a recognized archive type: %s" % filename
+ )
+
+
+def unpack_directory(filename, extract_dir, progress_filter=default_filter):
+ """"Unpack" a directory, using the same interface as for archives
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a directory
+ """
+ if not os.path.isdir(filename):
+ raise UnrecognizedFormat("%s is not a directory" % filename)
+
+ paths = {
+ filename: ('', extract_dir),
+ }
+ for base, dirs, files in os.walk(filename):
+ src, dst = paths[base]
+ for d in dirs:
+ paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
+ for f in files:
+ target = os.path.join(dst, f)
+ target = progress_filter(src + f, target)
+ if not target:
+ # skip non-files
+ continue
+ ensure_directory(target)
+ f = os.path.join(base, f)
+ shutil.copyfile(f, target)
+ shutil.copystat(f, target)
+
+
+def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
+ """Unpack zip `filename` to `extract_dir`
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
+ by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
+ of the `progress_filter` argument.
+ """
+
+ if not zipfile.is_zipfile(filename):
+ raise UnrecognizedFormat("%s is not a zip file" % (filename,))
+
with zipfile.ZipFile(filename) as z:
- for info in z.infolist():
- name = info.filename
-
- # don't extract absolute paths or ones with .. in them
- if name.startswith('/') or '..' in name.split('/'):
- continue
-
- target = os.path.join(extract_dir, *name.split('/'))
- target = progress_filter(name, target)
- if not target:
- continue
- if name.endswith('/'):
- # directory
- ensure_directory(target)
- else:
- # file
- ensure_directory(target)
- data = z.read(info.filename)
- with open(target, 'wb') as f:
- f.write(data)
- unix_attributes = info.external_attr >> 16
- if unix_attributes:
- os.chmod(target, unix_attributes)
-
-
-def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
- """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
-
- Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
- by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
- of the `progress_filter` argument.
- """
- try:
- tarobj = tarfile.open(filename)
- except tarfile.TarError:
- raise UnrecognizedFormat(
- "%s is not a compressed or uncompressed tar file" % (filename,)
- )
- with contextlib.closing(tarobj):
- # don't do any chowning!
- tarobj.chown = lambda *args: None
- for member in tarobj:
- name = member.name
- # don't extract absolute paths or ones with .. in them
- if not name.startswith('/') and '..' not in name.split('/'):
- prelim_dst = os.path.join(extract_dir, *name.split('/'))
-
- # resolve any links and to extract the link targets as normal
- # files
- while member is not None and (member.islnk() or member.issym()):
- linkpath = member.linkname
- if member.issym():
- base = posixpath.dirname(member.name)
- linkpath = posixpath.join(base, linkpath)
- linkpath = posixpath.normpath(linkpath)
- member = tarobj._getmember(linkpath)
-
- if member is not None and (member.isfile() or member.isdir()):
- final_dst = progress_filter(name, prelim_dst)
- if final_dst:
- if final_dst.endswith(os.sep):
- final_dst = final_dst[:-1]
- try:
- # XXX Ugh
- tarobj._extract_member(member, final_dst)
- except tarfile.ExtractError:
- # chown/chmod/mkfifo/mknode/makedev failed
- pass
- return True
-
-
-extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
+ for info in z.infolist():
+ name = info.filename
+
+ # don't extract absolute paths or ones with .. in them
+ if name.startswith('/') or '..' in name.split('/'):
+ continue
+
+ target = os.path.join(extract_dir, *name.split('/'))
+ target = progress_filter(name, target)
+ if not target:
+ continue
+ if name.endswith('/'):
+ # directory
+ ensure_directory(target)
+ else:
+ # file
+ ensure_directory(target)
+ data = z.read(info.filename)
+ with open(target, 'wb') as f:
+ f.write(data)
+ unix_attributes = info.external_attr >> 16
+ if unix_attributes:
+ os.chmod(target, unix_attributes)
+
+
+def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
+ """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
+ by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
+ of the `progress_filter` argument.
+ """
+ try:
+ tarobj = tarfile.open(filename)
+ except tarfile.TarError:
+ raise UnrecognizedFormat(
+ "%s is not a compressed or uncompressed tar file" % (filename,)
+ )
+ with contextlib.closing(tarobj):
+ # don't do any chowning!
+ tarobj.chown = lambda *args: None
+ for member in tarobj:
+ name = member.name
+ # don't extract absolute paths or ones with .. in them
+ if not name.startswith('/') and '..' not in name.split('/'):
+ prelim_dst = os.path.join(extract_dir, *name.split('/'))
+
+ # resolve any links and to extract the link targets as normal
+ # files
+ while member is not None and (member.islnk() or member.issym()):
+ linkpath = member.linkname
+ if member.issym():
+ base = posixpath.dirname(member.name)
+ linkpath = posixpath.join(base, linkpath)
+ linkpath = posixpath.normpath(linkpath)
+ member = tarobj._getmember(linkpath)
+
+ if member is not None and (member.isfile() or member.isdir()):
+ final_dst = progress_filter(name, prelim_dst)
+ if final_dst:
+ if final_dst.endswith(os.sep):
+ final_dst = final_dst[:-1]
+ try:
+ # XXX Ugh
+ tarobj._extract_member(member, final_dst)
+ except tarfile.ExtractError:
+ # chown/chmod/mkfifo/mknode/makedev failed
+ pass
+ return True
+
+
+extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
diff --git a/contrib/python/setuptools/py2/setuptools/command/__init__.py b/contrib/python/setuptools/py2/setuptools/command/__init__.py
index f335601c84..743f5588fa 100644
--- a/contrib/python/setuptools/py2/setuptools/command/__init__.py
+++ b/contrib/python/setuptools/py2/setuptools/command/__init__.py
@@ -1,17 +1,17 @@
-__all__ = [
- 'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
- 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
- 'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts',
+__all__ = [
+ 'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
+ 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
+ 'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts',
'bdist_wininst', 'upload_docs', 'build_clib', 'dist_info',
-]
-
-from distutils.command.bdist import bdist
-import sys
-
-from setuptools.command import install_scripts
-
-if 'egg' not in bdist.format_commands:
- bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
- bdist.format_commands.append('egg')
-
-del bdist, sys
+]
+
+from distutils.command.bdist import bdist
+import sys
+
+from setuptools.command import install_scripts
+
+if 'egg' not in bdist.format_commands:
+ bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
+ bdist.format_commands.append('egg')
+
+del bdist, sys
diff --git a/contrib/python/setuptools/py2/setuptools/command/alias.py b/contrib/python/setuptools/py2/setuptools/command/alias.py
index c057825904..4532b1cc0d 100644
--- a/contrib/python/setuptools/py2/setuptools/command/alias.py
+++ b/contrib/python/setuptools/py2/setuptools/command/alias.py
@@ -1,80 +1,80 @@
-from distutils.errors import DistutilsOptionError
-
+from distutils.errors import DistutilsOptionError
+
from setuptools.extern.six.moves import map
-
-from setuptools.command.setopt import edit_config, option_base, config_file
-
-
-def shquote(arg):
- """Quote an argument for later parsing by shlex.split()"""
- for c in '"', "'", "\\", "#":
- if c in arg:
- return repr(arg)
- if arg.split() != [arg]:
- return repr(arg)
- return arg
-
-
-class alias(option_base):
- """Define a shortcut that invokes one or more commands"""
-
- description = "define a shortcut to invoke one or more commands"
- command_consumes_arguments = True
-
- user_options = [
- ('remove', 'r', 'remove (unset) the alias'),
- ] + option_base.user_options
-
- boolean_options = option_base.boolean_options + ['remove']
-
- def initialize_options(self):
- option_base.initialize_options(self)
- self.args = None
- self.remove = None
-
- def finalize_options(self):
- option_base.finalize_options(self)
- if self.remove and len(self.args) != 1:
- raise DistutilsOptionError(
- "Must specify exactly one argument (the alias name) when "
- "using --remove"
- )
-
- def run(self):
- aliases = self.distribution.get_option_dict('aliases')
-
- if not self.args:
- print("Command Aliases")
- print("---------------")
- for alias in aliases:
- print("setup.py alias", format_alias(alias, aliases))
- return
-
- elif len(self.args) == 1:
- alias, = self.args
- if self.remove:
- command = None
- elif alias in aliases:
- print("setup.py alias", format_alias(alias, aliases))
- return
- else:
- print("No alias definition found for %r" % alias)
- return
- else:
- alias = self.args[0]
- command = ' '.join(map(shquote, self.args[1:]))
-
- edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
-
-
-def format_alias(name, aliases):
- source, command = aliases[name]
- if source == config_file('global'):
- source = '--global-config '
- elif source == config_file('user'):
- source = '--user-config '
- elif source == config_file('local'):
- source = ''
- else:
- source = '--filename=%r' % source
- return source + name + ' ' + command
+
+from setuptools.command.setopt import edit_config, option_base, config_file
+
+
+def shquote(arg):
+ """Quote an argument for later parsing by shlex.split()"""
+ for c in '"', "'", "\\", "#":
+ if c in arg:
+ return repr(arg)
+ if arg.split() != [arg]:
+ return repr(arg)
+ return arg
+
+
+class alias(option_base):
+ """Define a shortcut that invokes one or more commands"""
+
+ description = "define a shortcut to invoke one or more commands"
+ command_consumes_arguments = True
+
+ user_options = [
+ ('remove', 'r', 'remove (unset) the alias'),
+ ] + option_base.user_options
+
+ boolean_options = option_base.boolean_options + ['remove']
+
+ def initialize_options(self):
+ option_base.initialize_options(self)
+ self.args = None
+ self.remove = None
+
+ def finalize_options(self):
+ option_base.finalize_options(self)
+ if self.remove and len(self.args) != 1:
+ raise DistutilsOptionError(
+ "Must specify exactly one argument (the alias name) when "
+ "using --remove"
+ )
+
+ def run(self):
+ aliases = self.distribution.get_option_dict('aliases')
+
+ if not self.args:
+ print("Command Aliases")
+ print("---------------")
+ for alias in aliases:
+ print("setup.py alias", format_alias(alias, aliases))
+ return
+
+ elif len(self.args) == 1:
+ alias, = self.args
+ if self.remove:
+ command = None
+ elif alias in aliases:
+ print("setup.py alias", format_alias(alias, aliases))
+ return
+ else:
+ print("No alias definition found for %r" % alias)
+ return
+ else:
+ alias = self.args[0]
+ command = ' '.join(map(shquote, self.args[1:]))
+
+ edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
+
+
+def format_alias(name, aliases):
+ source, command = aliases[name]
+ if source == config_file('global'):
+ source = '--global-config '
+ elif source == config_file('user'):
+ source = '--user-config '
+ elif source == config_file('local'):
+ source = ''
+ else:
+ source = '--filename=%r' % source
+ return source + name + ' ' + command
diff --git a/contrib/python/setuptools/py2/setuptools/command/bdist_egg.py b/contrib/python/setuptools/py2/setuptools/command/bdist_egg.py
index 798c044589..98470f1715 100644
--- a/contrib/python/setuptools/py2/setuptools/command/bdist_egg.py
+++ b/contrib/python/setuptools/py2/setuptools/command/bdist_egg.py
@@ -1,45 +1,45 @@
-"""setuptools.command.bdist_egg
-
-Build .egg distributions"""
-
-from distutils.errors import DistutilsSetupError
-from distutils.dir_util import remove_tree, mkpath
-from distutils import log
-from types import CodeType
-import sys
-import os
+"""setuptools.command.bdist_egg
+
+Build .egg distributions"""
+
+from distutils.errors import DistutilsSetupError
+from distutils.dir_util import remove_tree, mkpath
+from distutils import log
+from types import CodeType
+import sys
+import os
import re
import textwrap
-import marshal
-
+import marshal
+
from setuptools.extern import six
-
-from pkg_resources import get_build_platform, Distribution, ensure_directory
-from pkg_resources import EntryPoint
-from setuptools.extension import Library
-from setuptools import Command
-
-try:
- # Python 2.7 or >=3.2
- from sysconfig import get_path, get_python_version
-
- def _get_purelib():
- return get_path("purelib")
-except ImportError:
- from distutils.sysconfig import get_python_lib, get_python_version
-
- def _get_purelib():
- return get_python_lib(False)
-
-
-def strip_module(filename):
- if '.' in filename:
- filename = os.path.splitext(filename)[0]
- if filename.endswith('module'):
- filename = filename[:-6]
- return filename
-
-
+
+from pkg_resources import get_build_platform, Distribution, ensure_directory
+from pkg_resources import EntryPoint
+from setuptools.extension import Library
+from setuptools import Command
+
+try:
+ # Python 2.7 or >=3.2
+ from sysconfig import get_path, get_python_version
+
+ def _get_purelib():
+ return get_path("purelib")
+except ImportError:
+ from distutils.sysconfig import get_python_lib, get_python_version
+
+ def _get_purelib():
+ return get_python_lib(False)
+
+
+def strip_module(filename):
+ if '.' in filename:
+ filename = os.path.splitext(filename)[0]
+ if filename.endswith('module'):
+ filename = filename[:-6]
+ return filename
+
+
def sorted_walk(dir):
"""Do os.walk in a reproducible way,
independent of indeterministic filesystem readdir order
@@ -50,205 +50,205 @@ def sorted_walk(dir):
yield base, dirs, files
-def write_stub(resource, pyfile):
- _stub_template = textwrap.dedent("""
- def __bootstrap__():
- global __bootstrap__, __loader__, __file__
- import sys, pkg_resources, imp
- __file__ = pkg_resources.resource_filename(__name__, %r)
- __loader__ = None; del __bootstrap__, __loader__
- imp.load_dynamic(__name__,__file__)
- __bootstrap__()
- """).lstrip()
- with open(pyfile, 'w') as f:
- f.write(_stub_template % resource)
-
-
-class bdist_egg(Command):
- description = "create an \"egg\" distribution"
-
- user_options = [
- ('bdist-dir=', 'b',
- "temporary directory for creating the distribution"),
- ('plat-name=', 'p', "platform name to embed in generated filenames "
- "(default: %s)" % get_build_platform()),
- ('exclude-source-files', None,
- "remove all .py files from the generated egg"),
- ('keep-temp', 'k',
- "keep the pseudo-installation tree around after " +
- "creating the distribution archive"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ]
-
- boolean_options = [
- 'keep-temp', 'skip-build', 'exclude-source-files'
- ]
-
- def initialize_options(self):
- self.bdist_dir = None
- self.plat_name = None
- self.keep_temp = 0
- self.dist_dir = None
- self.skip_build = 0
- self.egg_output = None
- self.exclude_source_files = None
-
- def finalize_options(self):
- ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
- self.egg_info = ei_cmd.egg_info
-
- if self.bdist_dir is None:
- bdist_base = self.get_finalized_command('bdist').bdist_base
- self.bdist_dir = os.path.join(bdist_base, 'egg')
-
- if self.plat_name is None:
- self.plat_name = get_build_platform()
-
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
-
- if self.egg_output is None:
-
- # Compute filename of the output egg
- basename = Distribution(
- None, None, ei_cmd.egg_name, ei_cmd.egg_version,
- get_python_version(),
- self.distribution.has_ext_modules() and self.plat_name
- ).egg_name()
-
- self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
-
- def do_install_data(self):
- # Hack for packages that install data to install's --install-lib
- self.get_finalized_command('install').install_lib = self.bdist_dir
-
- site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
- old, self.distribution.data_files = self.distribution.data_files, []
-
- for item in old:
- if isinstance(item, tuple) and len(item) == 2:
- if os.path.isabs(item[0]):
- realpath = os.path.realpath(item[0])
- normalized = os.path.normcase(realpath)
- if normalized == site_packages or normalized.startswith(
- site_packages + os.sep
- ):
- item = realpath[len(site_packages) + 1:], item[1]
- # XXX else: raise ???
- self.distribution.data_files.append(item)
-
- try:
+def write_stub(resource, pyfile):
+ _stub_template = textwrap.dedent("""
+ def __bootstrap__():
+ global __bootstrap__, __loader__, __file__
+ import sys, pkg_resources, imp
+ __file__ = pkg_resources.resource_filename(__name__, %r)
+ __loader__ = None; del __bootstrap__, __loader__
+ imp.load_dynamic(__name__,__file__)
+ __bootstrap__()
+ """).lstrip()
+ with open(pyfile, 'w') as f:
+ f.write(_stub_template % resource)
+
+
+class bdist_egg(Command):
+ description = "create an \"egg\" distribution"
+
+ user_options = [
+ ('bdist-dir=', 'b',
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p', "platform name to embed in generated filenames "
+ "(default: %s)" % get_build_platform()),
+ ('exclude-source-files', None,
+ "remove all .py files from the generated egg"),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ]
+
+ boolean_options = [
+ 'keep-temp', 'skip-build', 'exclude-source-files'
+ ]
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.plat_name = None
+ self.keep_temp = 0
+ self.dist_dir = None
+ self.skip_build = 0
+ self.egg_output = None
+ self.exclude_source_files = None
+
+ def finalize_options(self):
+ ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
+ self.egg_info = ei_cmd.egg_info
+
+ if self.bdist_dir is None:
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'egg')
+
+ if self.plat_name is None:
+ self.plat_name = get_build_platform()
+
+ self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+ if self.egg_output is None:
+
+ # Compute filename of the output egg
+ basename = Distribution(
+ None, None, ei_cmd.egg_name, ei_cmd.egg_version,
+ get_python_version(),
+ self.distribution.has_ext_modules() and self.plat_name
+ ).egg_name()
+
+ self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
+
+ def do_install_data(self):
+ # Hack for packages that install data to install's --install-lib
+ self.get_finalized_command('install').install_lib = self.bdist_dir
+
+ site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
+ old, self.distribution.data_files = self.distribution.data_files, []
+
+ for item in old:
+ if isinstance(item, tuple) and len(item) == 2:
+ if os.path.isabs(item[0]):
+ realpath = os.path.realpath(item[0])
+ normalized = os.path.normcase(realpath)
+ if normalized == site_packages or normalized.startswith(
+ site_packages + os.sep
+ ):
+ item = realpath[len(site_packages) + 1:], item[1]
+ # XXX else: raise ???
+ self.distribution.data_files.append(item)
+
+ try:
log.info("installing package data to %s", self.bdist_dir)
- self.call_command('install_data', force=0, root=None)
- finally:
- self.distribution.data_files = old
-
- def get_outputs(self):
- return [self.egg_output]
-
- def call_command(self, cmdname, **kw):
- """Invoke reinitialized command `cmdname` with keyword args"""
- for dirname in INSTALL_DIRECTORY_ATTRS:
- kw.setdefault(dirname, self.bdist_dir)
- kw.setdefault('skip_build', self.skip_build)
- kw.setdefault('dry_run', self.dry_run)
- cmd = self.reinitialize_command(cmdname, **kw)
- self.run_command(cmdname)
- return cmd
-
- def run(self):
- # Generate metadata first
- self.run_command("egg_info")
- # We run install_lib before install_data, because some data hacks
- # pull their data path from the install_lib command.
+ self.call_command('install_data', force=0, root=None)
+ finally:
+ self.distribution.data_files = old
+
+ def get_outputs(self):
+ return [self.egg_output]
+
+ def call_command(self, cmdname, **kw):
+ """Invoke reinitialized command `cmdname` with keyword args"""
+ for dirname in INSTALL_DIRECTORY_ATTRS:
+ kw.setdefault(dirname, self.bdist_dir)
+ kw.setdefault('skip_build', self.skip_build)
+ kw.setdefault('dry_run', self.dry_run)
+ cmd = self.reinitialize_command(cmdname, **kw)
+ self.run_command(cmdname)
+ return cmd
+
+ def run(self):
+ # Generate metadata first
+ self.run_command("egg_info")
+ # We run install_lib before install_data, because some data hacks
+ # pull their data path from the install_lib command.
log.info("installing library code to %s", self.bdist_dir)
- instcmd = self.get_finalized_command('install')
- old_root = instcmd.root
- instcmd.root = None
- if self.distribution.has_c_libraries() and not self.skip_build:
- self.run_command('build_clib')
- cmd = self.call_command('install_lib', warn_dir=0)
- instcmd.root = old_root
-
- all_outputs, ext_outputs = self.get_ext_outputs()
- self.stubs = []
- to_compile = []
- for (p, ext_name) in enumerate(ext_outputs):
- filename, ext = os.path.splitext(ext_name)
- pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
- '.py')
- self.stubs.append(pyfile)
+ instcmd = self.get_finalized_command('install')
+ old_root = instcmd.root
+ instcmd.root = None
+ if self.distribution.has_c_libraries() and not self.skip_build:
+ self.run_command('build_clib')
+ cmd = self.call_command('install_lib', warn_dir=0)
+ instcmd.root = old_root
+
+ all_outputs, ext_outputs = self.get_ext_outputs()
+ self.stubs = []
+ to_compile = []
+ for (p, ext_name) in enumerate(ext_outputs):
+ filename, ext = os.path.splitext(ext_name)
+ pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
+ '.py')
+ self.stubs.append(pyfile)
log.info("creating stub loader for %s", ext_name)
- if not self.dry_run:
- write_stub(os.path.basename(ext_name), pyfile)
- to_compile.append(pyfile)
- ext_outputs[p] = ext_name.replace(os.sep, '/')
-
- if to_compile:
- cmd.byte_compile(to_compile)
- if self.distribution.data_files:
- self.do_install_data()
-
- # Make the EGG-INFO directory
- archive_root = self.bdist_dir
- egg_info = os.path.join(archive_root, 'EGG-INFO')
- self.mkpath(egg_info)
- if self.distribution.scripts:
- script_dir = os.path.join(egg_info, 'scripts')
+ if not self.dry_run:
+ write_stub(os.path.basename(ext_name), pyfile)
+ to_compile.append(pyfile)
+ ext_outputs[p] = ext_name.replace(os.sep, '/')
+
+ if to_compile:
+ cmd.byte_compile(to_compile)
+ if self.distribution.data_files:
+ self.do_install_data()
+
+ # Make the EGG-INFO directory
+ archive_root = self.bdist_dir
+ egg_info = os.path.join(archive_root, 'EGG-INFO')
+ self.mkpath(egg_info)
+ if self.distribution.scripts:
+ script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s", script_dir)
- self.call_command('install_scripts', install_dir=script_dir,
- no_ep=1)
-
- self.copy_metadata_to(egg_info)
- native_libs = os.path.join(egg_info, "native_libs.txt")
- if all_outputs:
+ self.call_command('install_scripts', install_dir=script_dir,
+ no_ep=1)
+
+ self.copy_metadata_to(egg_info)
+ native_libs = os.path.join(egg_info, "native_libs.txt")
+ if all_outputs:
log.info("writing %s", native_libs)
- if not self.dry_run:
- ensure_directory(native_libs)
- libs_file = open(native_libs, 'wt')
- libs_file.write('\n'.join(all_outputs))
- libs_file.write('\n')
- libs_file.close()
- elif os.path.isfile(native_libs):
+ if not self.dry_run:
+ ensure_directory(native_libs)
+ libs_file = open(native_libs, 'wt')
+ libs_file.write('\n'.join(all_outputs))
+ libs_file.write('\n')
+ libs_file.close()
+ elif os.path.isfile(native_libs):
log.info("removing %s", native_libs)
- if not self.dry_run:
- os.unlink(native_libs)
-
- write_safety_flag(
- os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
- )
-
- if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
- log.warn(
- "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
- "Use the install_requires/extras_require setup() args instead."
- )
-
- if self.exclude_source_files:
- self.zap_pyfiles()
-
- # Make the archive
- make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
- dry_run=self.dry_run, mode=self.gen_header())
- if not self.keep_temp:
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
-
- # Add to 'Distribution.dist_files' so that the "upload" command works
- getattr(self.distribution, 'dist_files', []).append(
- ('bdist_egg', get_python_version(), self.egg_output))
-
- def zap_pyfiles(self):
- log.info("Removing .py files from temporary directory")
- for base, dirs, files in walk_egg(self.bdist_dir):
- for name in files:
+ if not self.dry_run:
+ os.unlink(native_libs)
+
+ write_safety_flag(
+ os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
+ )
+
+ if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
+ log.warn(
+ "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
+ "Use the install_requires/extras_require setup() args instead."
+ )
+
+ if self.exclude_source_files:
+ self.zap_pyfiles()
+
+ # Make the archive
+ make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
+ dry_run=self.dry_run, mode=self.gen_header())
+ if not self.keep_temp:
+ remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+ # Add to 'Distribution.dist_files' so that the "upload" command works
+ getattr(self.distribution, 'dist_files', []).append(
+ ('bdist_egg', get_python_version(), self.egg_output))
+
+ def zap_pyfiles(self):
+ log.info("Removing .py files from temporary directory")
+ for base, dirs, files in walk_egg(self.bdist_dir):
+ for name in files:
path = os.path.join(base, name)
- if name.endswith('.py'):
- log.debug("Deleting %s", path)
- os.unlink(path)
-
+ if name.endswith('.py'):
+ log.debug("Deleting %s", path)
+ os.unlink(path)
+
if base.endswith('__pycache__'):
path_old = path
@@ -265,238 +265,238 @@ class bdist_egg(Command):
pass
os.rename(path_old, path_new)
- def zip_safe(self):
- safe = getattr(self.distribution, 'zip_safe', None)
- if safe is not None:
- return safe
- log.warn("zip_safe flag not set; analyzing archive contents...")
- return analyze_egg(self.bdist_dir, self.stubs)
-
- def gen_header(self):
- epm = EntryPoint.parse_map(self.distribution.entry_points or '')
- ep = epm.get('setuptools.installation', {}).get('eggsecutable')
- if ep is None:
- return 'w' # not an eggsecutable, do it the usual way.
-
- if not ep.attrs or ep.extras:
- raise DistutilsSetupError(
- "eggsecutable entry point (%r) cannot have 'extras' "
- "or refer to a module" % (ep,)
- )
-
+ def zip_safe(self):
+ safe = getattr(self.distribution, 'zip_safe', None)
+ if safe is not None:
+ return safe
+ log.warn("zip_safe flag not set; analyzing archive contents...")
+ return analyze_egg(self.bdist_dir, self.stubs)
+
+ def gen_header(self):
+ epm = EntryPoint.parse_map(self.distribution.entry_points or '')
+ ep = epm.get('setuptools.installation', {}).get('eggsecutable')
+ if ep is None:
+ return 'w' # not an eggsecutable, do it the usual way.
+
+ if not ep.attrs or ep.extras:
+ raise DistutilsSetupError(
+ "eggsecutable entry point (%r) cannot have 'extras' "
+ "or refer to a module" % (ep,)
+ )
+
pyver = '{}.{}'.format(*sys.version_info)
- pkg = ep.module_name
- full = '.'.join(ep.attrs)
- base = ep.attrs[0]
- basename = os.path.basename(self.egg_output)
-
- header = (
- "#!/bin/sh\n"
- 'if [ `basename $0` = "%(basename)s" ]\n'
- 'then exec python%(pyver)s -c "'
- "import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
- "from %(pkg)s import %(base)s; sys.exit(%(full)s())"
- '" "$@"\n'
- 'else\n'
- ' echo $0 is not the correct name for this egg file.\n'
- ' echo Please rename it back to %(basename)s and try again.\n'
- ' exec false\n'
- 'fi\n'
- ) % locals()
-
- if not self.dry_run:
- mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
- f = open(self.egg_output, 'w')
- f.write(header)
- f.close()
- return 'a'
-
- def copy_metadata_to(self, target_dir):
- "Copy metadata (egg info) to the target_dir"
- # normalize the path (so that a forward-slash in egg_info will
- # match using startswith below)
- norm_egg_info = os.path.normpath(self.egg_info)
- prefix = os.path.join(norm_egg_info, '')
- for path in self.ei_cmd.filelist.files:
- if path.startswith(prefix):
- target = os.path.join(target_dir, path[len(prefix):])
- ensure_directory(target)
- self.copy_file(path, target)
-
- def get_ext_outputs(self):
- """Get a list of relative paths to C extensions in the output distro"""
-
- all_outputs = []
- ext_outputs = []
-
- paths = {self.bdist_dir: ''}
+ pkg = ep.module_name
+ full = '.'.join(ep.attrs)
+ base = ep.attrs[0]
+ basename = os.path.basename(self.egg_output)
+
+ header = (
+ "#!/bin/sh\n"
+ 'if [ `basename $0` = "%(basename)s" ]\n'
+ 'then exec python%(pyver)s -c "'
+ "import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
+ "from %(pkg)s import %(base)s; sys.exit(%(full)s())"
+ '" "$@"\n'
+ 'else\n'
+ ' echo $0 is not the correct name for this egg file.\n'
+ ' echo Please rename it back to %(basename)s and try again.\n'
+ ' exec false\n'
+ 'fi\n'
+ ) % locals()
+
+ if not self.dry_run:
+ mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
+ f = open(self.egg_output, 'w')
+ f.write(header)
+ f.close()
+ return 'a'
+
+ def copy_metadata_to(self, target_dir):
+ "Copy metadata (egg info) to the target_dir"
+ # normalize the path (so that a forward-slash in egg_info will
+ # match using startswith below)
+ norm_egg_info = os.path.normpath(self.egg_info)
+ prefix = os.path.join(norm_egg_info, '')
+ for path in self.ei_cmd.filelist.files:
+ if path.startswith(prefix):
+ target = os.path.join(target_dir, path[len(prefix):])
+ ensure_directory(target)
+ self.copy_file(path, target)
+
+ def get_ext_outputs(self):
+ """Get a list of relative paths to C extensions in the output distro"""
+
+ all_outputs = []
+ ext_outputs = []
+
+ paths = {self.bdist_dir: ''}
for base, dirs, files in sorted_walk(self.bdist_dir):
- for filename in files:
- if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
- all_outputs.append(paths[base] + filename)
- for filename in dirs:
- paths[os.path.join(base, filename)] = (paths[base] +
- filename + '/')
-
- if self.distribution.has_ext_modules():
- build_cmd = self.get_finalized_command('build_ext')
- for ext in build_cmd.extensions:
- if isinstance(ext, Library):
- continue
- fullname = build_cmd.get_ext_fullname(ext.name)
- filename = build_cmd.get_ext_filename(fullname)
- if not os.path.basename(filename).startswith('dl-'):
- if os.path.exists(os.path.join(self.bdist_dir, filename)):
- ext_outputs.append(filename)
-
- return all_outputs, ext_outputs
-
-
-NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
-
-
-def walk_egg(egg_dir):
- """Walk an unpacked egg's contents, skipping the metadata directory"""
+ for filename in files:
+ if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
+ all_outputs.append(paths[base] + filename)
+ for filename in dirs:
+ paths[os.path.join(base, filename)] = (paths[base] +
+ filename + '/')
+
+ if self.distribution.has_ext_modules():
+ build_cmd = self.get_finalized_command('build_ext')
+ for ext in build_cmd.extensions:
+ if isinstance(ext, Library):
+ continue
+ fullname = build_cmd.get_ext_fullname(ext.name)
+ filename = build_cmd.get_ext_filename(fullname)
+ if not os.path.basename(filename).startswith('dl-'):
+ if os.path.exists(os.path.join(self.bdist_dir, filename)):
+ ext_outputs.append(filename)
+
+ return all_outputs, ext_outputs
+
+
+NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
+
+
+def walk_egg(egg_dir):
+ """Walk an unpacked egg's contents, skipping the metadata directory"""
walker = sorted_walk(egg_dir)
- base, dirs, files = next(walker)
- if 'EGG-INFO' in dirs:
- dirs.remove('EGG-INFO')
- yield base, dirs, files
- for bdf in walker:
- yield bdf
-
-
-def analyze_egg(egg_dir, stubs):
- # check for existing flag in EGG-INFO
- for flag, fn in safety_flags.items():
- if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
- return flag
- if not can_scan():
- return False
- safe = True
- for base, dirs, files in walk_egg(egg_dir):
- for name in files:
- if name.endswith('.py') or name.endswith('.pyw'):
- continue
- elif name.endswith('.pyc') or name.endswith('.pyo'):
- # always scan, even if we already know we're not safe
- safe = scan_module(egg_dir, base, name, stubs) and safe
- return safe
-
-
-def write_safety_flag(egg_dir, safe):
- # Write or remove zip safety flag file(s)
- for flag, fn in safety_flags.items():
- fn = os.path.join(egg_dir, fn)
- if os.path.exists(fn):
- if safe is None or bool(safe) != flag:
- os.unlink(fn)
- elif safe is not None and bool(safe) == flag:
- f = open(fn, 'wt')
- f.write('\n')
- f.close()
-
-
-safety_flags = {
- True: 'zip-safe',
- False: 'not-zip-safe',
-}
-
-
-def scan_module(egg_dir, base, name, stubs):
- """Check whether module possibly uses unsafe-for-zipfile stuff"""
-
- filename = os.path.join(base, name)
- if filename[:-1] in stubs:
- return True # Extension module
- pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
- module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
+ base, dirs, files = next(walker)
+ if 'EGG-INFO' in dirs:
+ dirs.remove('EGG-INFO')
+ yield base, dirs, files
+ for bdf in walker:
+ yield bdf
+
+
+def analyze_egg(egg_dir, stubs):
+ # check for existing flag in EGG-INFO
+ for flag, fn in safety_flags.items():
+ if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
+ return flag
+ if not can_scan():
+ return False
+ safe = True
+ for base, dirs, files in walk_egg(egg_dir):
+ for name in files:
+ if name.endswith('.py') or name.endswith('.pyw'):
+ continue
+ elif name.endswith('.pyc') or name.endswith('.pyo'):
+ # always scan, even if we already know we're not safe
+ safe = scan_module(egg_dir, base, name, stubs) and safe
+ return safe
+
+
+def write_safety_flag(egg_dir, safe):
+ # Write or remove zip safety flag file(s)
+ for flag, fn in safety_flags.items():
+ fn = os.path.join(egg_dir, fn)
+ if os.path.exists(fn):
+ if safe is None or bool(safe) != flag:
+ os.unlink(fn)
+ elif safe is not None and bool(safe) == flag:
+ f = open(fn, 'wt')
+ f.write('\n')
+ f.close()
+
+
+safety_flags = {
+ True: 'zip-safe',
+ False: 'not-zip-safe',
+}
+
+
+def scan_module(egg_dir, base, name, stubs):
+ """Check whether module possibly uses unsafe-for-zipfile stuff"""
+
+ filename = os.path.join(base, name)
+ if filename[:-1] in stubs:
+ return True # Extension module
+ pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
+ module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if six.PY2:
- skip = 8 # skip magic & date
+ skip = 8 # skip magic & date
elif sys.version_info < (3, 7):
skip = 12 # skip magic & date & file size
- else:
+ else:
skip = 16 # skip magic & reserved? & date & file size
- f = open(filename, 'rb')
- f.read(skip)
- code = marshal.load(f)
- f.close()
- safe = True
- symbols = dict.fromkeys(iter_symbols(code))
- for bad in ['__file__', '__path__']:
- if bad in symbols:
- log.warn("%s: module references %s", module, bad)
- safe = False
- if 'inspect' in symbols:
- for bad in [
- 'getsource', 'getabsfile', 'getsourcefile', 'getfile'
- 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
- 'getinnerframes', 'getouterframes', 'stack', 'trace'
- ]:
- if bad in symbols:
- log.warn("%s: module MAY be using inspect.%s", module, bad)
- safe = False
- return safe
-
-
-def iter_symbols(code):
- """Yield names and strings used by `code` and its nested code objects"""
- for name in code.co_names:
- yield name
- for const in code.co_consts:
- if isinstance(const, six.string_types):
- yield const
- elif isinstance(const, CodeType):
- for name in iter_symbols(const):
- yield name
-
-
-def can_scan():
- if not sys.platform.startswith('java') and sys.platform != 'cli':
- # CPython, PyPy, etc.
- return True
- log.warn("Unable to analyze compiled code on this platform.")
- log.warn("Please ask the author to include a 'zip_safe'"
- " setting (either True or False) in the package's setup.py")
-
-
-# Attribute names of options for commands that might need to be convinced to
-# install to the egg build directory
-
-INSTALL_DIRECTORY_ATTRS = [
- 'install_lib', 'install_dir', 'install_data', 'install_base'
-]
-
-
-def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
- mode='w'):
- """Create a zip file from all the files under 'base_dir'. The output
- zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
- Python module (if available) or the InfoZIP "zip" utility (if installed
- and found on the default search path). If neither tool is available,
- raises DistutilsExecError. Returns the name of the output zip file.
- """
- import zipfile
-
- mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
- log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
-
- def visit(z, dirname, names):
- for name in names:
- path = os.path.normpath(os.path.join(dirname, name))
- if os.path.isfile(path):
- p = path[len(base_dir) + 1:]
- if not dry_run:
- z.write(path, p)
+ f = open(filename, 'rb')
+ f.read(skip)
+ code = marshal.load(f)
+ f.close()
+ safe = True
+ symbols = dict.fromkeys(iter_symbols(code))
+ for bad in ['__file__', '__path__']:
+ if bad in symbols:
+ log.warn("%s: module references %s", module, bad)
+ safe = False
+ if 'inspect' in symbols:
+ for bad in [
+ 'getsource', 'getabsfile', 'getsourcefile', 'getfile'
+ 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
+ 'getinnerframes', 'getouterframes', 'stack', 'trace'
+ ]:
+ if bad in symbols:
+ log.warn("%s: module MAY be using inspect.%s", module, bad)
+ safe = False
+ return safe
+
+
+def iter_symbols(code):
+ """Yield names and strings used by `code` and its nested code objects"""
+ for name in code.co_names:
+ yield name
+ for const in code.co_consts:
+ if isinstance(const, six.string_types):
+ yield const
+ elif isinstance(const, CodeType):
+ for name in iter_symbols(const):
+ yield name
+
+
+def can_scan():
+ if not sys.platform.startswith('java') and sys.platform != 'cli':
+ # CPython, PyPy, etc.
+ return True
+ log.warn("Unable to analyze compiled code on this platform.")
+ log.warn("Please ask the author to include a 'zip_safe'"
+ " setting (either True or False) in the package's setup.py")
+
+
+# Attribute names of options for commands that might need to be convinced to
+# install to the egg build directory
+
+INSTALL_DIRECTORY_ATTRS = [
+ 'install_lib', 'install_dir', 'install_data', 'install_base'
+]
+
+
+def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
+ mode='w'):
+ """Create a zip file from all the files under 'base_dir'. The output
+ zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
+ Python module (if available) or the InfoZIP "zip" utility (if installed
+ and found on the default search path). If neither tool is available,
+ raises DistutilsExecError. Returns the name of the output zip file.
+ """
+ import zipfile
+
+ mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
+ log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
+
+ def visit(z, dirname, names):
+ for name in names:
+ path = os.path.normpath(os.path.join(dirname, name))
+ if os.path.isfile(path):
+ p = path[len(base_dir) + 1:]
+ if not dry_run:
+ z.write(path, p)
log.debug("adding '%s'", p)
-
- compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
- if not dry_run:
- z = zipfile.ZipFile(zip_filename, mode, compression=compression)
+
+ compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
+ if not dry_run:
+ z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in sorted_walk(base_dir):
- visit(z, dirname, files)
- z.close()
- else:
+ visit(z, dirname, files)
+ z.close()
+ else:
for dirname, dirs, files in sorted_walk(base_dir):
- visit(None, dirname, files)
- return zip_filename
+ visit(None, dirname, files)
+ return zip_filename
diff --git a/contrib/python/setuptools/py2/setuptools/command/bdist_rpm.py b/contrib/python/setuptools/py2/setuptools/command/bdist_rpm.py
index 5f1a15e2d9..70730927ec 100644
--- a/contrib/python/setuptools/py2/setuptools/command/bdist_rpm.py
+++ b/contrib/python/setuptools/py2/setuptools/command/bdist_rpm.py
@@ -1,43 +1,43 @@
-import distutils.command.bdist_rpm as orig
-
-
-class bdist_rpm(orig.bdist_rpm):
- """
- Override the default bdist_rpm behavior to do the following:
-
- 1. Run egg_info to ensure the name and version are properly calculated.
- 2. Always run 'install' using --single-version-externally-managed to
- disable eggs in RPM distributions.
- 3. Replace dash with underscore in the version numbers for better RPM
- compatibility.
- """
-
- def run(self):
- # ensure distro name is up-to-date
- self.run_command('egg_info')
-
- orig.bdist_rpm.run(self)
-
- def _make_spec_file(self):
- version = self.distribution.get_version()
- rpmversion = version.replace('-', '_')
- spec = orig.bdist_rpm._make_spec_file(self)
- line23 = '%define version ' + version
- line24 = '%define version ' + rpmversion
- spec = [
- line.replace(
- "Source0: %{name}-%{version}.tar",
- "Source0: %{name}-%{unmangled_version}.tar"
- ).replace(
- "setup.py install ",
- "setup.py install --single-version-externally-managed "
- ).replace(
- "%setup",
- "%setup -n %{name}-%{unmangled_version}"
- ).replace(line23, line24)
- for line in spec
- ]
- insert_loc = spec.index(line24) + 1
- unmangled_version = "%define unmangled_version " + version
- spec.insert(insert_loc, unmangled_version)
- return spec
+import distutils.command.bdist_rpm as orig
+
+
+class bdist_rpm(orig.bdist_rpm):
+ """
+ Override the default bdist_rpm behavior to do the following:
+
+ 1. Run egg_info to ensure the name and version are properly calculated.
+ 2. Always run 'install' using --single-version-externally-managed to
+ disable eggs in RPM distributions.
+ 3. Replace dash with underscore in the version numbers for better RPM
+ compatibility.
+ """
+
+ def run(self):
+ # ensure distro name is up-to-date
+ self.run_command('egg_info')
+
+ orig.bdist_rpm.run(self)
+
+ def _make_spec_file(self):
+ version = self.distribution.get_version()
+ rpmversion = version.replace('-', '_')
+ spec = orig.bdist_rpm._make_spec_file(self)
+ line23 = '%define version ' + version
+ line24 = '%define version ' + rpmversion
+ spec = [
+ line.replace(
+ "Source0: %{name}-%{version}.tar",
+ "Source0: %{name}-%{unmangled_version}.tar"
+ ).replace(
+ "setup.py install ",
+ "setup.py install --single-version-externally-managed "
+ ).replace(
+ "%setup",
+ "%setup -n %{name}-%{unmangled_version}"
+ ).replace(line23, line24)
+ for line in spec
+ ]
+ insert_loc = spec.index(line24) + 1
+ unmangled_version = "%define unmangled_version " + version
+ spec.insert(insert_loc, unmangled_version)
+ return spec
diff --git a/contrib/python/setuptools/py2/setuptools/command/bdist_wininst.py b/contrib/python/setuptools/py2/setuptools/command/bdist_wininst.py
index 9ca62d60c3..073de97b46 100644
--- a/contrib/python/setuptools/py2/setuptools/command/bdist_wininst.py
+++ b/contrib/python/setuptools/py2/setuptools/command/bdist_wininst.py
@@ -1,21 +1,21 @@
-import distutils.command.bdist_wininst as orig
-
-
-class bdist_wininst(orig.bdist_wininst):
- def reinitialize_command(self, command, reinit_subcommands=0):
- """
- Supplement reinitialize_command to work around
- http://bugs.python.org/issue20819
- """
- cmd = self.distribution.reinitialize_command(
- command, reinit_subcommands)
- if command in ('install', 'install_lib'):
- cmd.install_lib = None
- return cmd
-
- def run(self):
- self._is_running = True
- try:
- orig.bdist_wininst.run(self)
- finally:
- self._is_running = False
+import distutils.command.bdist_wininst as orig
+
+
+class bdist_wininst(orig.bdist_wininst):
+ def reinitialize_command(self, command, reinit_subcommands=0):
+ """
+ Supplement reinitialize_command to work around
+ http://bugs.python.org/issue20819
+ """
+ cmd = self.distribution.reinitialize_command(
+ command, reinit_subcommands)
+ if command in ('install', 'install_lib'):
+ cmd.install_lib = None
+ return cmd
+
+ def run(self):
+ self._is_running = True
+ try:
+ orig.bdist_wininst.run(self)
+ finally:
+ self._is_running = False
diff --git a/contrib/python/setuptools/py2/setuptools/command/build_ext.py b/contrib/python/setuptools/py2/setuptools/command/build_ext.py
index 235ad9d305..1b51e040b4 100644
--- a/contrib/python/setuptools/py2/setuptools/command/build_ext.py
+++ b/contrib/python/setuptools/py2/setuptools/command/build_ext.py
@@ -1,16 +1,16 @@
import os
import sys
import itertools
-from distutils.command.build_ext import build_ext as _du_build_ext
-from distutils.file_util import copy_file
-from distutils.ccompiler import new_compiler
+from distutils.command.build_ext import build_ext as _du_build_ext
+from distutils.file_util import copy_file
+from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
-from distutils.errors import DistutilsError
-from distutils import log
-
-from setuptools.extension import Library
+from distutils.errors import DistutilsError
+from distutils import log
+
+from setuptools.extension import Library
from setuptools.extern import six
-
+
if six.PY2:
import imp
@@ -18,20 +18,20 @@ if six.PY2:
else:
from importlib.machinery import EXTENSION_SUFFIXES
-try:
- # Attempt to use Cython for building extensions, if available
- from Cython.Distutils.build_ext import build_ext as _build_ext
+try:
+ # Attempt to use Cython for building extensions, if available
+ from Cython.Distutils.build_ext import build_ext as _build_ext
# Additionally, assert that the compiler module will load
# also. Ref #1229.
__import__('Cython.Compiler.Main')
-except ImportError:
- _build_ext = _du_build_ext
-
+except ImportError:
+ _build_ext = _du_build_ext
+
# make sure _config_vars is initialized
get_config_var("LDSHARED")
from distutils.sysconfig import _config_vars as _CONFIG_VARS
-
-
+
+
def _customize_compiler_for_shlib(compiler):
if sys.platform == "darwin":
# building .dylib requires additional compiler flags on OSX; here we
@@ -52,21 +52,21 @@ def _customize_compiler_for_shlib(compiler):
customize_compiler(compiler)
-have_rtld = False
-use_stubs = False
-libtype = 'shared'
-
-if sys.platform == "darwin":
- use_stubs = True
-elif os.name != 'nt':
- try:
- import dl
- use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
- except ImportError:
- pass
-
-if_dl = lambda s: s if have_rtld else ''
-
+have_rtld = False
+use_stubs = False
+libtype = 'shared'
+
+if sys.platform == "darwin":
+ use_stubs = True
+elif os.name != 'nt':
+ try:
+ import dl
+ use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
+ except ImportError:
+ pass
+
+if_dl = lambda s: s if have_rtld else ''
+
def get_abi3_suffix():
"""Return the file extension for an abi3-compliant Extension()"""
@@ -77,41 +77,41 @@ def get_abi3_suffix():
return suffix
-class build_ext(_build_ext):
- def run(self):
- """Build extensions in build directory, then copy if --inplace"""
- old_inplace, self.inplace = self.inplace, 0
- _build_ext.run(self)
- self.inplace = old_inplace
- if old_inplace:
- self.copy_extensions_to_source()
-
- def copy_extensions_to_source(self):
- build_py = self.get_finalized_command('build_py')
- for ext in self.extensions:
- fullname = self.get_ext_fullname(ext.name)
- filename = self.get_ext_filename(fullname)
- modpath = fullname.split('.')
- package = '.'.join(modpath[:-1])
- package_dir = build_py.get_package_dir(package)
- dest_filename = os.path.join(package_dir,
- os.path.basename(filename))
- src_filename = os.path.join(self.build_lib, filename)
-
- # Always copy, even if source is older than destination, to ensure
- # that the right extensions for the current Python/platform are
- # used.
- copy_file(
- src_filename, dest_filename, verbose=self.verbose,
- dry_run=self.dry_run
- )
- if ext._needs_stub:
- self.write_stub(package_dir or os.curdir, ext, True)
-
- def get_ext_filename(self, fullname):
- filename = _build_ext.get_ext_filename(self, fullname)
- if fullname in self.ext_map:
- ext = self.ext_map[fullname]
+class build_ext(_build_ext):
+ def run(self):
+ """Build extensions in build directory, then copy if --inplace"""
+ old_inplace, self.inplace = self.inplace, 0
+ _build_ext.run(self)
+ self.inplace = old_inplace
+ if old_inplace:
+ self.copy_extensions_to_source()
+
+ def copy_extensions_to_source(self):
+ build_py = self.get_finalized_command('build_py')
+ for ext in self.extensions:
+ fullname = self.get_ext_fullname(ext.name)
+ filename = self.get_ext_filename(fullname)
+ modpath = fullname.split('.')
+ package = '.'.join(modpath[:-1])
+ package_dir = build_py.get_package_dir(package)
+ dest_filename = os.path.join(package_dir,
+ os.path.basename(filename))
+ src_filename = os.path.join(self.build_lib, filename)
+
+ # Always copy, even if source is older than destination, to ensure
+ # that the right extensions for the current Python/platform are
+ # used.
+ copy_file(
+ src_filename, dest_filename, verbose=self.verbose,
+ dry_run=self.dry_run
+ )
+ if ext._needs_stub:
+ self.write_stub(package_dir or os.curdir, ext, True)
+
+ def get_ext_filename(self, fullname):
+ filename = _build_ext.get_ext_filename(self, fullname)
+ if fullname in self.ext_map:
+ ext = self.ext_map[fullname]
use_abi3 = (
not six.PY2
and getattr(ext, 'py_limited_api')
@@ -121,207 +121,207 @@ class build_ext(_build_ext):
so_ext = get_config_var('EXT_SUFFIX')
filename = filename[:-len(so_ext)]
filename = filename + get_abi3_suffix()
- if isinstance(ext, Library):
- fn, ext = os.path.splitext(filename)
- return self.shlib_compiler.library_filename(fn, libtype)
- elif use_stubs and ext._links_to_dynamic:
- d, fn = os.path.split(filename)
- return os.path.join(d, 'dl-' + fn)
- return filename
-
- def initialize_options(self):
- _build_ext.initialize_options(self)
- self.shlib_compiler = None
- self.shlibs = []
- self.ext_map = {}
-
- def finalize_options(self):
- _build_ext.finalize_options(self)
- self.extensions = self.extensions or []
- self.check_extensions_list(self.extensions)
- self.shlibs = [ext for ext in self.extensions
- if isinstance(ext, Library)]
- if self.shlibs:
- self.setup_shlib_compiler()
- for ext in self.extensions:
- ext._full_name = self.get_ext_fullname(ext.name)
- for ext in self.extensions:
- fullname = ext._full_name
- self.ext_map[fullname] = ext
-
- # distutils 3.1 will also ask for module names
- # XXX what to do with conflicts?
- self.ext_map[fullname.split('.')[-1]] = ext
-
- ltd = self.shlibs and self.links_to_dynamic(ext) or False
- ns = ltd and use_stubs and not isinstance(ext, Library)
- ext._links_to_dynamic = ltd
- ext._needs_stub = ns
- filename = ext._file_name = self.get_ext_filename(fullname)
- libdir = os.path.dirname(os.path.join(self.build_lib, filename))
- if ltd and libdir not in ext.library_dirs:
- ext.library_dirs.append(libdir)
- if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
- ext.runtime_library_dirs.append(os.curdir)
-
- def setup_shlib_compiler(self):
- compiler = self.shlib_compiler = new_compiler(
- compiler=self.compiler, dry_run=self.dry_run, force=self.force
- )
+ if isinstance(ext, Library):
+ fn, ext = os.path.splitext(filename)
+ return self.shlib_compiler.library_filename(fn, libtype)
+ elif use_stubs and ext._links_to_dynamic:
+ d, fn = os.path.split(filename)
+ return os.path.join(d, 'dl-' + fn)
+ return filename
+
+ def initialize_options(self):
+ _build_ext.initialize_options(self)
+ self.shlib_compiler = None
+ self.shlibs = []
+ self.ext_map = {}
+
+ def finalize_options(self):
+ _build_ext.finalize_options(self)
+ self.extensions = self.extensions or []
+ self.check_extensions_list(self.extensions)
+ self.shlibs = [ext for ext in self.extensions
+ if isinstance(ext, Library)]
+ if self.shlibs:
+ self.setup_shlib_compiler()
+ for ext in self.extensions:
+ ext._full_name = self.get_ext_fullname(ext.name)
+ for ext in self.extensions:
+ fullname = ext._full_name
+ self.ext_map[fullname] = ext
+
+ # distutils 3.1 will also ask for module names
+ # XXX what to do with conflicts?
+ self.ext_map[fullname.split('.')[-1]] = ext
+
+ ltd = self.shlibs and self.links_to_dynamic(ext) or False
+ ns = ltd and use_stubs and not isinstance(ext, Library)
+ ext._links_to_dynamic = ltd
+ ext._needs_stub = ns
+ filename = ext._file_name = self.get_ext_filename(fullname)
+ libdir = os.path.dirname(os.path.join(self.build_lib, filename))
+ if ltd and libdir not in ext.library_dirs:
+ ext.library_dirs.append(libdir)
+ if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
+ ext.runtime_library_dirs.append(os.curdir)
+
+ def setup_shlib_compiler(self):
+ compiler = self.shlib_compiler = new_compiler(
+ compiler=self.compiler, dry_run=self.dry_run, force=self.force
+ )
_customize_compiler_for_shlib(compiler)
-
- if self.include_dirs is not None:
- compiler.set_include_dirs(self.include_dirs)
- if self.define is not None:
- # 'define' option is a list of (name,value) tuples
- for (name, value) in self.define:
- compiler.define_macro(name, value)
- if self.undef is not None:
- for macro in self.undef:
- compiler.undefine_macro(macro)
- if self.libraries is not None:
- compiler.set_libraries(self.libraries)
- if self.library_dirs is not None:
- compiler.set_library_dirs(self.library_dirs)
- if self.rpath is not None:
- compiler.set_runtime_library_dirs(self.rpath)
- if self.link_objects is not None:
- compiler.set_link_objects(self.link_objects)
-
- # hack so distutils' build_extension() builds a library instead
- compiler.link_shared_object = link_shared_object.__get__(compiler)
-
- def get_export_symbols(self, ext):
- if isinstance(ext, Library):
- return ext.export_symbols
- return _build_ext.get_export_symbols(self, ext)
-
- def build_extension(self, ext):
- ext._convert_pyx_sources_to_lang()
- _compiler = self.compiler
- try:
- if isinstance(ext, Library):
- self.compiler = self.shlib_compiler
- _build_ext.build_extension(self, ext)
- if ext._needs_stub:
- cmd = self.get_finalized_command('build_py').build_lib
- self.write_stub(cmd, ext)
- finally:
- self.compiler = _compiler
-
- def links_to_dynamic(self, ext):
- """Return true if 'ext' links to a dynamic lib in the same package"""
- # XXX this should check to ensure the lib is actually being built
- # XXX as dynamic, and not just using a locally-found version or a
- # XXX static-compiled version
- libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
- pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
- return any(pkg + libname in libnames for libname in ext.libraries)
-
- def get_outputs(self):
- return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
-
- def __get_stubs_outputs(self):
- # assemble the base name for each extension that needs a stub
- ns_ext_bases = (
- os.path.join(self.build_lib, *ext._full_name.split('.'))
- for ext in self.extensions
- if ext._needs_stub
- )
- # pair each base with the extension
- pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
- return list(base + fnext for base, fnext in pairs)
-
- def __get_output_extensions(self):
- yield '.py'
- yield '.pyc'
- if self.get_finalized_command('build_py').optimize:
- yield '.pyo'
-
- def write_stub(self, output_dir, ext, compile=False):
- log.info("writing stub loader for %s to %s", ext._full_name,
- output_dir)
- stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
- '.py')
- if compile and os.path.exists(stub_file):
- raise DistutilsError(stub_file + " already exists! Please delete.")
- if not self.dry_run:
- f = open(stub_file, 'w')
- f.write(
- '\n'.join([
- "def __bootstrap__():",
- " global __bootstrap__, __file__, __loader__",
- " import sys, os, pkg_resources, imp" + if_dl(", dl"),
- " __file__ = pkg_resources.resource_filename"
- "(__name__,%r)"
- % os.path.basename(ext._file_name),
- " del __bootstrap__",
- " if '__loader__' in globals():",
- " del __loader__",
- if_dl(" old_flags = sys.getdlopenflags()"),
- " old_dir = os.getcwd()",
- " try:",
- " os.chdir(os.path.dirname(__file__))",
- if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
- " imp.load_dynamic(__name__,__file__)",
- " finally:",
- if_dl(" sys.setdlopenflags(old_flags)"),
- " os.chdir(old_dir)",
- "__bootstrap__()",
- "" # terminal \n
- ])
- )
- f.close()
- if compile:
- from distutils.util import byte_compile
-
- byte_compile([stub_file], optimize=0,
- force=True, dry_run=self.dry_run)
- optimize = self.get_finalized_command('install_lib').optimize
- if optimize > 0:
- byte_compile([stub_file], optimize=optimize,
- force=True, dry_run=self.dry_run)
- if os.path.exists(stub_file) and not self.dry_run:
- os.unlink(stub_file)
-
-
-if use_stubs or os.name == 'nt':
- # Build shared libraries
- #
- def link_shared_object(
- self, objects, output_libname, output_dir=None, libraries=None,
- library_dirs=None, runtime_library_dirs=None, export_symbols=None,
- debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
- target_lang=None):
- self.link(
- self.SHARED_LIBRARY, objects, output_libname,
- output_dir, libraries, library_dirs, runtime_library_dirs,
- export_symbols, debug, extra_preargs, extra_postargs,
- build_temp, target_lang
- )
-else:
- # Build static libraries everywhere else
- libtype = 'static'
-
- def link_shared_object(
- self, objects, output_libname, output_dir=None, libraries=None,
- library_dirs=None, runtime_library_dirs=None, export_symbols=None,
- debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
- target_lang=None):
- # XXX we need to either disallow these attrs on Library instances,
- # or warn/abort here if set, or something...
- # libraries=None, library_dirs=None, runtime_library_dirs=None,
- # export_symbols=None, extra_preargs=None, extra_postargs=None,
- # build_temp=None
-
- assert output_dir is None # distutils build_ext doesn't pass this
- output_dir, filename = os.path.split(output_libname)
- basename, ext = os.path.splitext(filename)
- if self.library_filename("x").startswith('lib'):
- # strip 'lib' prefix; this is kludgy if some platform uses
- # a different prefix
- basename = basename[3:]
-
- self.create_static_lib(
- objects, basename, output_dir, debug, target_lang
- )
+
+ if self.include_dirs is not None:
+ compiler.set_include_dirs(self.include_dirs)
+ if self.define is not None:
+ # 'define' option is a list of (name,value) tuples
+ for (name, value) in self.define:
+ compiler.define_macro(name, value)
+ if self.undef is not None:
+ for macro in self.undef:
+ compiler.undefine_macro(macro)
+ if self.libraries is not None:
+ compiler.set_libraries(self.libraries)
+ if self.library_dirs is not None:
+ compiler.set_library_dirs(self.library_dirs)
+ if self.rpath is not None:
+ compiler.set_runtime_library_dirs(self.rpath)
+ if self.link_objects is not None:
+ compiler.set_link_objects(self.link_objects)
+
+ # hack so distutils' build_extension() builds a library instead
+ compiler.link_shared_object = link_shared_object.__get__(compiler)
+
+ def get_export_symbols(self, ext):
+ if isinstance(ext, Library):
+ return ext.export_symbols
+ return _build_ext.get_export_symbols(self, ext)
+
+ def build_extension(self, ext):
+ ext._convert_pyx_sources_to_lang()
+ _compiler = self.compiler
+ try:
+ if isinstance(ext, Library):
+ self.compiler = self.shlib_compiler
+ _build_ext.build_extension(self, ext)
+ if ext._needs_stub:
+ cmd = self.get_finalized_command('build_py').build_lib
+ self.write_stub(cmd, ext)
+ finally:
+ self.compiler = _compiler
+
+ def links_to_dynamic(self, ext):
+ """Return true if 'ext' links to a dynamic lib in the same package"""
+ # XXX this should check to ensure the lib is actually being built
+ # XXX as dynamic, and not just using a locally-found version or a
+ # XXX static-compiled version
+ libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
+ pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
+ return any(pkg + libname in libnames for libname in ext.libraries)
+
+ def get_outputs(self):
+ return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
+
+ def __get_stubs_outputs(self):
+ # assemble the base name for each extension that needs a stub
+ ns_ext_bases = (
+ os.path.join(self.build_lib, *ext._full_name.split('.'))
+ for ext in self.extensions
+ if ext._needs_stub
+ )
+ # pair each base with the extension
+ pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
+ return list(base + fnext for base, fnext in pairs)
+
+ def __get_output_extensions(self):
+ yield '.py'
+ yield '.pyc'
+ if self.get_finalized_command('build_py').optimize:
+ yield '.pyo'
+
+ def write_stub(self, output_dir, ext, compile=False):
+ log.info("writing stub loader for %s to %s", ext._full_name,
+ output_dir)
+ stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
+ '.py')
+ if compile and os.path.exists(stub_file):
+ raise DistutilsError(stub_file + " already exists! Please delete.")
+ if not self.dry_run:
+ f = open(stub_file, 'w')
+ f.write(
+ '\n'.join([
+ "def __bootstrap__():",
+ " global __bootstrap__, __file__, __loader__",
+ " import sys, os, pkg_resources, imp" + if_dl(", dl"),
+ " __file__ = pkg_resources.resource_filename"
+ "(__name__,%r)"
+ % os.path.basename(ext._file_name),
+ " del __bootstrap__",
+ " if '__loader__' in globals():",
+ " del __loader__",
+ if_dl(" old_flags = sys.getdlopenflags()"),
+ " old_dir = os.getcwd()",
+ " try:",
+ " os.chdir(os.path.dirname(__file__))",
+ if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
+ " imp.load_dynamic(__name__,__file__)",
+ " finally:",
+ if_dl(" sys.setdlopenflags(old_flags)"),
+ " os.chdir(old_dir)",
+ "__bootstrap__()",
+ "" # terminal \n
+ ])
+ )
+ f.close()
+ if compile:
+ from distutils.util import byte_compile
+
+ byte_compile([stub_file], optimize=0,
+ force=True, dry_run=self.dry_run)
+ optimize = self.get_finalized_command('install_lib').optimize
+ if optimize > 0:
+ byte_compile([stub_file], optimize=optimize,
+ force=True, dry_run=self.dry_run)
+ if os.path.exists(stub_file) and not self.dry_run:
+ os.unlink(stub_file)
+
+
+if use_stubs or os.name == 'nt':
+ # Build shared libraries
+ #
+ def link_shared_object(
+ self, objects, output_libname, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None, export_symbols=None,
+ debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
+ target_lang=None):
+ self.link(
+ self.SHARED_LIBRARY, objects, output_libname,
+ output_dir, libraries, library_dirs, runtime_library_dirs,
+ export_symbols, debug, extra_preargs, extra_postargs,
+ build_temp, target_lang
+ )
+else:
+ # Build static libraries everywhere else
+ libtype = 'static'
+
+ def link_shared_object(
+ self, objects, output_libname, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None, export_symbols=None,
+ debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
+ target_lang=None):
+ # XXX we need to either disallow these attrs on Library instances,
+ # or warn/abort here if set, or something...
+ # libraries=None, library_dirs=None, runtime_library_dirs=None,
+ # export_symbols=None, extra_preargs=None, extra_postargs=None,
+ # build_temp=None
+
+ assert output_dir is None # distutils build_ext doesn't pass this
+ output_dir, filename = os.path.split(output_libname)
+ basename, ext = os.path.splitext(filename)
+ if self.library_filename("x").startswith('lib'):
+ # strip 'lib' prefix; this is kludgy if some platform uses
+ # a different prefix
+ basename = basename[3:]
+
+ self.create_static_lib(
+ objects, basename, output_dir, debug, target_lang
+ )
diff --git a/contrib/python/setuptools/py2/setuptools/command/build_py.py b/contrib/python/setuptools/py2/setuptools/command/build_py.py
index 733e283005..b0314fd413 100644
--- a/contrib/python/setuptools/py2/setuptools/command/build_py.py
+++ b/contrib/python/setuptools/py2/setuptools/command/build_py.py
@@ -1,103 +1,103 @@
-from glob import glob
-from distutils.util import convert_path
-import distutils.command.build_py as orig
-import os
-import fnmatch
-import textwrap
-import io
-import distutils.errors
-import itertools
-
+from glob import glob
+from distutils.util import convert_path
+import distutils.command.build_py as orig
+import os
+import fnmatch
+import textwrap
+import io
+import distutils.errors
+import itertools
+
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter, filterfalse
-
-try:
- from setuptools.lib2to3_ex import Mixin2to3
-except ImportError:
-
- class Mixin2to3:
- def run_2to3(self, files, doctests=True):
- "do nothing"
-
-
-class build_py(orig.build_py, Mixin2to3):
- """Enhanced 'build_py' command that includes data files with packages
-
- The data files are specified via a 'package_data' argument to 'setup()'.
- See 'setuptools.dist.Distribution' for more details.
-
- Also, this version of the 'build_py' command allows you to specify both
- 'py_modules' and 'packages' in the same setup operation.
- """
-
- def finalize_options(self):
- orig.build_py.finalize_options(self)
- self.package_data = self.distribution.package_data
- self.exclude_package_data = (self.distribution.exclude_package_data or
- {})
- if 'data_files' in self.__dict__:
- del self.__dict__['data_files']
- self.__updated_files = []
- self.__doctests_2to3 = []
-
- def run(self):
- """Build modules, packages, and copy data files to build directory"""
- if not self.py_modules and not self.packages:
- return
-
- if self.py_modules:
- self.build_modules()
-
- if self.packages:
- self.build_packages()
- self.build_package_data()
-
- self.run_2to3(self.__updated_files, False)
- self.run_2to3(self.__updated_files, True)
- self.run_2to3(self.__doctests_2to3, True)
-
- # Only compile actual .py files, using our base class' idea of what our
- # output files are.
- self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
-
- def __getattr__(self, attr):
- "lazily compute data files"
- if attr == 'data_files':
- self.data_files = self._get_data_files()
- return self.data_files
- return orig.build_py.__getattr__(self, attr)
-
- def build_module(self, module, module_file, package):
+
+try:
+ from setuptools.lib2to3_ex import Mixin2to3
+except ImportError:
+
+ class Mixin2to3:
+ def run_2to3(self, files, doctests=True):
+ "do nothing"
+
+
+class build_py(orig.build_py, Mixin2to3):
+ """Enhanced 'build_py' command that includes data files with packages
+
+ The data files are specified via a 'package_data' argument to 'setup()'.
+ See 'setuptools.dist.Distribution' for more details.
+
+ Also, this version of the 'build_py' command allows you to specify both
+ 'py_modules' and 'packages' in the same setup operation.
+ """
+
+ def finalize_options(self):
+ orig.build_py.finalize_options(self)
+ self.package_data = self.distribution.package_data
+ self.exclude_package_data = (self.distribution.exclude_package_data or
+ {})
+ if 'data_files' in self.__dict__:
+ del self.__dict__['data_files']
+ self.__updated_files = []
+ self.__doctests_2to3 = []
+
+ def run(self):
+ """Build modules, packages, and copy data files to build directory"""
+ if not self.py_modules and not self.packages:
+ return
+
+ if self.py_modules:
+ self.build_modules()
+
+ if self.packages:
+ self.build_packages()
+ self.build_package_data()
+
+ self.run_2to3(self.__updated_files, False)
+ self.run_2to3(self.__updated_files, True)
+ self.run_2to3(self.__doctests_2to3, True)
+
+ # Only compile actual .py files, using our base class' idea of what our
+ # output files are.
+ self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
+
+ def __getattr__(self, attr):
+ "lazily compute data files"
+ if attr == 'data_files':
+ self.data_files = self._get_data_files()
+ return self.data_files
+ return orig.build_py.__getattr__(self, attr)
+
+ def build_module(self, module, module_file, package):
if six.PY2 and isinstance(package, six.string_types):
# avoid errors on Python 2 when unicode is passed (#190)
package = package.split('.')
- outfile, copied = orig.build_py.build_module(self, module, module_file,
- package)
- if copied:
- self.__updated_files.append(outfile)
- return outfile, copied
-
- def _get_data_files(self):
- """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
- self.analyze_manifest()
- return list(map(self._get_pkg_data_files, self.packages or ()))
-
- def _get_pkg_data_files(self, package):
- # Locate package source directory
- src_dir = self.get_package_dir(package)
-
- # Compute package build directory
- build_dir = os.path.join(*([self.build_lib] + package.split('.')))
-
- # Strip directory from globbed filenames
- filenames = [
- os.path.relpath(file, src_dir)
- for file in self.find_data_files(package, src_dir)
- ]
- return package, src_dir, build_dir, filenames
-
- def find_data_files(self, package, src_dir):
- """Return filenames for package's data files in 'src_dir'"""
+ outfile, copied = orig.build_py.build_module(self, module, module_file,
+ package)
+ if copied:
+ self.__updated_files.append(outfile)
+ return outfile, copied
+
+ def _get_data_files(self):
+ """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
+ self.analyze_manifest()
+ return list(map(self._get_pkg_data_files, self.packages or ()))
+
+ def _get_pkg_data_files(self, package):
+ # Locate package source directory
+ src_dir = self.get_package_dir(package)
+
+ # Compute package build directory
+ build_dir = os.path.join(*([self.build_lib] + package.split('.')))
+
+ # Strip directory from globbed filenames
+ filenames = [
+ os.path.relpath(file, src_dir)
+ for file in self.find_data_files(package, src_dir)
+ ]
+ return package, src_dir, build_dir, filenames
+
+ def find_data_files(self, package, src_dir):
+ """Return filenames for package's data files in 'src_dir'"""
patterns = self._get_platform_patterns(
self.package_data,
package,
@@ -111,111 +111,111 @@ class build_py(orig.build_py, Mixin2to3):
self.manifest_files.get(package, []),
glob_files,
)
- return self.exclude_data_files(package, src_dir, files)
-
- def build_package_data(self):
- """Copy data files into build directory"""
- for package, src_dir, build_dir, filenames in self.data_files:
- for filename in filenames:
- target = os.path.join(build_dir, filename)
- self.mkpath(os.path.dirname(target))
- srcfile = os.path.join(src_dir, filename)
- outf, copied = self.copy_file(srcfile, target)
- srcfile = os.path.abspath(srcfile)
- if (copied and
- srcfile in self.distribution.convert_2to3_doctests):
- self.__doctests_2to3.append(outf)
-
- def analyze_manifest(self):
- self.manifest_files = mf = {}
- if not self.distribution.include_package_data:
- return
- src_dirs = {}
- for package in self.packages or ():
- # Locate package source directory
- src_dirs[assert_relative(self.get_package_dir(package))] = package
-
- self.run_command('egg_info')
- ei_cmd = self.get_finalized_command('egg_info')
- for path in ei_cmd.filelist.files:
- d, f = os.path.split(assert_relative(path))
- prev = None
- oldf = f
- while d and d != prev and d not in src_dirs:
- prev = d
- d, df = os.path.split(d)
- f = os.path.join(df, f)
- if d in src_dirs:
- if path.endswith('.py') and f == oldf:
- continue # it's a module, not data
- mf.setdefault(src_dirs[d], []).append(path)
-
- def get_data_files(self):
- pass # Lazily compute data files in _get_data_files() function.
-
- def check_package(self, package, package_dir):
- """Check namespace packages' __init__ for declare_namespace"""
- try:
- return self.packages_checked[package]
- except KeyError:
- pass
-
- init_py = orig.build_py.check_package(self, package, package_dir)
- self.packages_checked[package] = init_py
-
- if not init_py or not self.distribution.namespace_packages:
- return init_py
-
- for pkg in self.distribution.namespace_packages:
- if pkg == package or pkg.startswith(package + '.'):
- break
- else:
- return init_py
-
- with io.open(init_py, 'rb') as f:
- contents = f.read()
- if b'declare_namespace' not in contents:
- raise distutils.errors.DistutilsError(
- "Namespace package problem: %s is a namespace package, but "
- "its\n__init__.py does not call declare_namespace()! Please "
- 'fix it.\n(See the setuptools manual under '
- '"Namespace Packages" for details.)\n"' % (package,)
- )
- return init_py
-
- def initialize_options(self):
- self.packages_checked = {}
- orig.build_py.initialize_options(self)
-
- def get_package_dir(self, package):
- res = orig.build_py.get_package_dir(self, package)
- if self.distribution.src_root is not None:
- return os.path.join(self.distribution.src_root, res)
- return res
-
- def exclude_data_files(self, package, src_dir, files):
- """Filter filenames for package's data files in 'src_dir'"""
+ return self.exclude_data_files(package, src_dir, files)
+
+ def build_package_data(self):
+ """Copy data files into build directory"""
+ for package, src_dir, build_dir, filenames in self.data_files:
+ for filename in filenames:
+ target = os.path.join(build_dir, filename)
+ self.mkpath(os.path.dirname(target))
+ srcfile = os.path.join(src_dir, filename)
+ outf, copied = self.copy_file(srcfile, target)
+ srcfile = os.path.abspath(srcfile)
+ if (copied and
+ srcfile in self.distribution.convert_2to3_doctests):
+ self.__doctests_2to3.append(outf)
+
+ def analyze_manifest(self):
+ self.manifest_files = mf = {}
+ if not self.distribution.include_package_data:
+ return
+ src_dirs = {}
+ for package in self.packages or ():
+ # Locate package source directory
+ src_dirs[assert_relative(self.get_package_dir(package))] = package
+
+ self.run_command('egg_info')
+ ei_cmd = self.get_finalized_command('egg_info')
+ for path in ei_cmd.filelist.files:
+ d, f = os.path.split(assert_relative(path))
+ prev = None
+ oldf = f
+ while d and d != prev and d not in src_dirs:
+ prev = d
+ d, df = os.path.split(d)
+ f = os.path.join(df, f)
+ if d in src_dirs:
+ if path.endswith('.py') and f == oldf:
+ continue # it's a module, not data
+ mf.setdefault(src_dirs[d], []).append(path)
+
+ def get_data_files(self):
+ pass # Lazily compute data files in _get_data_files() function.
+
+ def check_package(self, package, package_dir):
+ """Check namespace packages' __init__ for declare_namespace"""
+ try:
+ return self.packages_checked[package]
+ except KeyError:
+ pass
+
+ init_py = orig.build_py.check_package(self, package, package_dir)
+ self.packages_checked[package] = init_py
+
+ if not init_py or not self.distribution.namespace_packages:
+ return init_py
+
+ for pkg in self.distribution.namespace_packages:
+ if pkg == package or pkg.startswith(package + '.'):
+ break
+ else:
+ return init_py
+
+ with io.open(init_py, 'rb') as f:
+ contents = f.read()
+ if b'declare_namespace' not in contents:
+ raise distutils.errors.DistutilsError(
+ "Namespace package problem: %s is a namespace package, but "
+ "its\n__init__.py does not call declare_namespace()! Please "
+ 'fix it.\n(See the setuptools manual under '
+ '"Namespace Packages" for details.)\n"' % (package,)
+ )
+ return init_py
+
+ def initialize_options(self):
+ self.packages_checked = {}
+ orig.build_py.initialize_options(self)
+
+ def get_package_dir(self, package):
+ res = orig.build_py.get_package_dir(self, package)
+ if self.distribution.src_root is not None:
+ return os.path.join(self.distribution.src_root, res)
+ return res
+
+ def exclude_data_files(self, package, src_dir, files):
+ """Filter filenames for package's data files in 'src_dir'"""
files = list(files)
patterns = self._get_platform_patterns(
self.exclude_package_data,
package,
src_dir,
- )
+ )
match_groups = (
fnmatch.filter(files, pattern)
for pattern in patterns
- )
+ )
# flatten the groups of matches into an iterable of matches
matches = itertools.chain.from_iterable(match_groups)
bad = set(matches)
keepers = (
- fn
- for fn in files
- if fn not in bad
+ fn
+ for fn in files
+ if fn not in bad
)
# ditch dupes
return list(_unique_everseen(keepers))
-
+
@staticmethod
def _get_platform_patterns(spec, package, src_dir):
"""
@@ -233,7 +233,7 @@ class build_py(orig.build_py, Mixin2to3):
os.path.join(src_dir, convert_path(pattern))
for pattern in raw_patterns
)
-
+
# from Python docs
def _unique_everseen(iterable, key=None):
@@ -254,17 +254,17 @@ def _unique_everseen(iterable, key=None):
yield element
-def assert_relative(path):
- if not os.path.isabs(path):
- return path
- from distutils.errors import DistutilsSetupError
-
- msg = textwrap.dedent("""
- Error: setup script specifies an absolute path:
-
- %s
-
- setup() arguments must *always* be /-separated paths relative to the
- setup.py directory, *never* absolute paths.
- """).lstrip() % path
- raise DistutilsSetupError(msg)
+def assert_relative(path):
+ if not os.path.isabs(path):
+ return path
+ from distutils.errors import DistutilsSetupError
+
+ msg = textwrap.dedent("""
+ Error: setup script specifies an absolute path:
+
+ %s
+
+ setup() arguments must *always* be /-separated paths relative to the
+ setup.py directory, *never* absolute paths.
+ """).lstrip() % path
+ raise DistutilsSetupError(msg)
diff --git a/contrib/python/setuptools/py2/setuptools/command/develop.py b/contrib/python/setuptools/py2/setuptools/command/develop.py
index 6cb7d37a16..b561924609 100644
--- a/contrib/python/setuptools/py2/setuptools/command/develop.py
+++ b/contrib/python/setuptools/py2/setuptools/command/develop.py
@@ -1,86 +1,86 @@
-from distutils.util import convert_path
-from distutils import log
-from distutils.errors import DistutilsError, DistutilsOptionError
-import os
-import glob
-import io
-
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsError, DistutilsOptionError
+import os
+import glob
+import io
+
from setuptools.extern import six
-
+
import pkg_resources
-from setuptools.command.easy_install import easy_install
+from setuptools.command.easy_install import easy_install
from setuptools import namespaces
-import setuptools
-
+import setuptools
+
__metaclass__ = type
-
+
class develop(namespaces.DevelopInstaller, easy_install):
- """Set up package for development"""
-
- description = "install package in 'development mode'"
-
- user_options = easy_install.user_options + [
- ("uninstall", "u", "Uninstall this source package"),
- ("egg-path=", None, "Set the path to be used in the .egg-link file"),
- ]
-
- boolean_options = easy_install.boolean_options + ['uninstall']
-
- command_consumes_arguments = False # override base
-
- def run(self):
- if self.uninstall:
- self.multi_version = True
- self.uninstall_link()
+ """Set up package for development"""
+
+ description = "install package in 'development mode'"
+
+ user_options = easy_install.user_options + [
+ ("uninstall", "u", "Uninstall this source package"),
+ ("egg-path=", None, "Set the path to be used in the .egg-link file"),
+ ]
+
+ boolean_options = easy_install.boolean_options + ['uninstall']
+
+ command_consumes_arguments = False # override base
+
+ def run(self):
+ if self.uninstall:
+ self.multi_version = True
+ self.uninstall_link()
self.uninstall_namespaces()
- else:
- self.install_for_development()
- self.warn_deprecated_options()
-
- def initialize_options(self):
- self.uninstall = None
- self.egg_path = None
- easy_install.initialize_options(self)
- self.setup_path = None
- self.always_copy_from = '.' # always copy eggs installed in curdir
-
- def finalize_options(self):
- ei = self.get_finalized_command("egg_info")
- if ei.broken_egg_info:
- template = "Please rename %r to %r before using 'develop'"
- args = ei.egg_info, ei.broken_egg_info
- raise DistutilsError(template % args)
- self.args = [ei.egg_name]
-
- easy_install.finalize_options(self)
- self.expand_basedirs()
- self.expand_dirs()
- # pick up setup-dir .egg files only: no .egg-info
- self.package_index.scan(glob.glob('*.egg'))
-
- egg_link_fn = ei.egg_name + '.egg-link'
- self.egg_link = os.path.join(self.install_dir, egg_link_fn)
- self.egg_base = ei.egg_base
- if self.egg_path is None:
- self.egg_path = os.path.abspath(ei.egg_base)
-
+ else:
+ self.install_for_development()
+ self.warn_deprecated_options()
+
+ def initialize_options(self):
+ self.uninstall = None
+ self.egg_path = None
+ easy_install.initialize_options(self)
+ self.setup_path = None
+ self.always_copy_from = '.' # always copy eggs installed in curdir
+
+ def finalize_options(self):
+ ei = self.get_finalized_command("egg_info")
+ if ei.broken_egg_info:
+ template = "Please rename %r to %r before using 'develop'"
+ args = ei.egg_info, ei.broken_egg_info
+ raise DistutilsError(template % args)
+ self.args = [ei.egg_name]
+
+ easy_install.finalize_options(self)
+ self.expand_basedirs()
+ self.expand_dirs()
+ # pick up setup-dir .egg files only: no .egg-info
+ self.package_index.scan(glob.glob('*.egg'))
+
+ egg_link_fn = ei.egg_name + '.egg-link'
+ self.egg_link = os.path.join(self.install_dir, egg_link_fn)
+ self.egg_base = ei.egg_base
+ if self.egg_path is None:
+ self.egg_path = os.path.abspath(ei.egg_base)
+
target = pkg_resources.normalize_path(self.egg_base)
egg_path = pkg_resources.normalize_path(
os.path.join(self.install_dir, self.egg_path))
- if egg_path != target:
- raise DistutilsOptionError(
- "--egg-path must be a relative path from the install"
- " directory to " + target
- )
-
- # Make a distribution for the package's source
+ if egg_path != target:
+ raise DistutilsOptionError(
+ "--egg-path must be a relative path from the install"
+ " directory to " + target
+ )
+
+ # Make a distribution for the package's source
self.dist = pkg_resources.Distribution(
- target,
+ target,
pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)),
- project_name=ei.egg_name
- )
-
+ project_name=ei.egg_name
+ )
+
self.setup_path = self._resolve_setup_path(
self.egg_base,
self.install_dir,
@@ -101,121 +101,121 @@ class develop(namespaces.DevelopInstaller, easy_install):
os.path.join(install_dir, egg_path, path_to_setup)
)
if resolved != pkg_resources.normalize_path(os.curdir):
- raise DistutilsOptionError(
- "Can't get a consistent path to setup script from"
+ raise DistutilsOptionError(
+ "Can't get a consistent path to setup script from"
" installation directory", resolved,
pkg_resources.normalize_path(os.curdir))
return path_to_setup
-
- def install_for_development(self):
+
+ def install_for_development(self):
if not six.PY2 and getattr(self.distribution, 'use_2to3', False):
- # If we run 2to3 we can not do this inplace:
-
- # Ensure metadata is up-to-date
- self.reinitialize_command('build_py', inplace=0)
- self.run_command('build_py')
- bpy_cmd = self.get_finalized_command("build_py")
+ # If we run 2to3 we can not do this inplace:
+
+ # Ensure metadata is up-to-date
+ self.reinitialize_command('build_py', inplace=0)
+ self.run_command('build_py')
+ bpy_cmd = self.get_finalized_command("build_py")
build_path = pkg_resources.normalize_path(bpy_cmd.build_lib)
-
- # Build extensions
- self.reinitialize_command('egg_info', egg_base=build_path)
- self.run_command('egg_info')
-
- self.reinitialize_command('build_ext', inplace=0)
- self.run_command('build_ext')
-
- # Fixup egg-link and easy-install.pth
- ei_cmd = self.get_finalized_command("egg_info")
- self.egg_path = build_path
- self.dist.location = build_path
- # XXX
+
+ # Build extensions
+ self.reinitialize_command('egg_info', egg_base=build_path)
+ self.run_command('egg_info')
+
+ self.reinitialize_command('build_ext', inplace=0)
+ self.run_command('build_ext')
+
+ # Fixup egg-link and easy-install.pth
+ ei_cmd = self.get_finalized_command("egg_info")
+ self.egg_path = build_path
+ self.dist.location = build_path
+ # XXX
self.dist._provider = pkg_resources.PathMetadata(
build_path, ei_cmd.egg_info)
- else:
- # Without 2to3 inplace works fine:
- self.run_command('egg_info')
-
- # Build extensions in-place
- self.reinitialize_command('build_ext', inplace=1)
- self.run_command('build_ext')
-
- self.install_site_py() # ensure that target dir is site-safe
- if setuptools.bootstrap_install_from:
- self.easy_install(setuptools.bootstrap_install_from)
- setuptools.bootstrap_install_from = None
-
+ else:
+ # Without 2to3 inplace works fine:
+ self.run_command('egg_info')
+
+ # Build extensions in-place
+ self.reinitialize_command('build_ext', inplace=1)
+ self.run_command('build_ext')
+
+ self.install_site_py() # ensure that target dir is site-safe
+ if setuptools.bootstrap_install_from:
+ self.easy_install(setuptools.bootstrap_install_from)
+ setuptools.bootstrap_install_from = None
+
self.install_namespaces()
- # create an .egg-link in the installation dir, pointing to our egg
- log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
- if not self.dry_run:
- with open(self.egg_link, "w") as f:
- f.write(self.egg_path + "\n" + self.setup_path)
- # postprocess the installed distro, fixing up .pth, installing scripts,
- # and handling requirements
- self.process_distribution(None, self.dist, not self.no_deps)
-
- def uninstall_link(self):
- if os.path.exists(self.egg_link):
- log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
- egg_link_file = open(self.egg_link)
- contents = [line.rstrip() for line in egg_link_file]
- egg_link_file.close()
- if contents not in ([self.egg_path],
- [self.egg_path, self.setup_path]):
- log.warn("Link points to %s: uninstall aborted", contents)
- return
- if not self.dry_run:
- os.unlink(self.egg_link)
- if not self.dry_run:
- self.update_pth(self.dist) # remove any .pth link to us
- if self.distribution.scripts:
- # XXX should also check for entry point scripts!
- log.warn("Note: you must uninstall or replace scripts manually!")
-
- def install_egg_scripts(self, dist):
- if dist is not self.dist:
- # Installing a dependency, so fall back to normal behavior
- return easy_install.install_egg_scripts(self, dist)
-
- # create wrapper scripts in the script dir, pointing to dist.scripts
-
- # new-style...
- self.install_wrapper_scripts(dist)
-
- # ...and old-style
- for script_name in self.distribution.scripts or []:
- script_path = os.path.abspath(convert_path(script_name))
- script_name = os.path.basename(script_path)
- with io.open(script_path) as strm:
- script_text = strm.read()
- self.install_script(dist, script_name, script_text, script_path)
-
- def install_wrapper_scripts(self, dist):
- dist = VersionlessRequirement(dist)
- return easy_install.install_wrapper_scripts(self, dist)
-
-
+ # create an .egg-link in the installation dir, pointing to our egg
+ log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
+ if not self.dry_run:
+ with open(self.egg_link, "w") as f:
+ f.write(self.egg_path + "\n" + self.setup_path)
+ # postprocess the installed distro, fixing up .pth, installing scripts,
+ # and handling requirements
+ self.process_distribution(None, self.dist, not self.no_deps)
+
+ def uninstall_link(self):
+ if os.path.exists(self.egg_link):
+ log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
+ egg_link_file = open(self.egg_link)
+ contents = [line.rstrip() for line in egg_link_file]
+ egg_link_file.close()
+ if contents not in ([self.egg_path],
+ [self.egg_path, self.setup_path]):
+ log.warn("Link points to %s: uninstall aborted", contents)
+ return
+ if not self.dry_run:
+ os.unlink(self.egg_link)
+ if not self.dry_run:
+ self.update_pth(self.dist) # remove any .pth link to us
+ if self.distribution.scripts:
+ # XXX should also check for entry point scripts!
+ log.warn("Note: you must uninstall or replace scripts manually!")
+
+ def install_egg_scripts(self, dist):
+ if dist is not self.dist:
+ # Installing a dependency, so fall back to normal behavior
+ return easy_install.install_egg_scripts(self, dist)
+
+ # create wrapper scripts in the script dir, pointing to dist.scripts
+
+ # new-style...
+ self.install_wrapper_scripts(dist)
+
+ # ...and old-style
+ for script_name in self.distribution.scripts or []:
+ script_path = os.path.abspath(convert_path(script_name))
+ script_name = os.path.basename(script_path)
+ with io.open(script_path) as strm:
+ script_text = strm.read()
+ self.install_script(dist, script_name, script_text, script_path)
+
+ def install_wrapper_scripts(self, dist):
+ dist = VersionlessRequirement(dist)
+ return easy_install.install_wrapper_scripts(self, dist)
+
+
class VersionlessRequirement:
- """
- Adapt a pkg_resources.Distribution to simply return the project
- name as the 'requirement' so that scripts will work across
- multiple versions.
-
+ """
+ Adapt a pkg_resources.Distribution to simply return the project
+ name as the 'requirement' so that scripts will work across
+ multiple versions.
+
>>> from pkg_resources import Distribution
- >>> dist = Distribution(project_name='foo', version='1.0')
- >>> str(dist.as_requirement())
- 'foo==1.0'
- >>> adapted_dist = VersionlessRequirement(dist)
- >>> str(adapted_dist.as_requirement())
- 'foo'
- """
-
- def __init__(self, dist):
- self.__dist = dist
-
- def __getattr__(self, name):
- return getattr(self.__dist, name)
-
- def as_requirement(self):
- return self.project_name
+ >>> dist = Distribution(project_name='foo', version='1.0')
+ >>> str(dist.as_requirement())
+ 'foo==1.0'
+ >>> adapted_dist = VersionlessRequirement(dist)
+ >>> str(adapted_dist.as_requirement())
+ 'foo'
+ """
+
+ def __init__(self, dist):
+ self.__dist = dist
+
+ def __getattr__(self, name):
+ return getattr(self.__dist, name)
+
+ def as_requirement(self):
+ return self.project_name
diff --git a/contrib/python/setuptools/py2/setuptools/command/easy_install.py b/contrib/python/setuptools/py2/setuptools/command/easy_install.py
index 037a85a5f4..426301d6f3 100644
--- a/contrib/python/setuptools/py2/setuptools/command/easy_install.py
+++ b/contrib/python/setuptools/py2/setuptools/command/easy_install.py
@@ -1,46 +1,46 @@
-#!/usr/bin/env python
-"""
-Easy Install
-------------
-
-A tool for doing automatic download/extract/build of distutils-based Python
-packages. For detailed documentation, see the accompanying EasyInstall.txt
-file, or visit the `EasyInstall home page`__.
-
+#!/usr/bin/env python
+"""
+Easy Install
+------------
+
+A tool for doing automatic download/extract/build of distutils-based Python
+packages. For detailed documentation, see the accompanying EasyInstall.txt
+file, or visit the `EasyInstall home page`__.
+
__ https://setuptools.readthedocs.io/en/latest/easy_install.html
-
-"""
-
-from glob import glob
-from distutils.util import get_platform
-from distutils.util import convert_path, subst_vars
+
+"""
+
+from glob import glob
+from distutils.util import get_platform
+from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
-from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
-from distutils import log, dir_util
-from distutils.command.build_scripts import first_line_re
-from distutils.spawn import find_executable
-import sys
-import os
-import zipimport
-import shutil
-import tempfile
-import zipfile
-import re
-import stat
-import random
-import textwrap
-import warnings
-import site
-import struct
-import contextlib
-import subprocess
-import shlex
-import io
-
-
+from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
+from distutils import log, dir_util
+from distutils.command.build_scripts import first_line_re
+from distutils.spawn import find_executable
+import sys
+import os
+import zipimport
+import shutil
+import tempfile
+import zipfile
+import re
+import stat
+import random
+import textwrap
+import warnings
+import site
+import struct
+import contextlib
+import subprocess
+import shlex
+import io
+
+
from sysconfig import get_config_vars, get_path
from setuptools import SetuptoolsDeprecationWarning
@@ -48,358 +48,358 @@ from setuptools import SetuptoolsDeprecationWarning
from setuptools.extern import six
from setuptools.extern.six.moves import configparser, map
-from setuptools import Command
-from setuptools.sandbox import run_setup
+from setuptools import Command
+from setuptools.sandbox import run_setup
from setuptools.py27compat import rmtree_safe
-from setuptools.command import setopt
-from setuptools.archive_util import unpack_archive
+from setuptools.command import setopt
+from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
-from setuptools.command import bdist_egg, egg_info
+from setuptools.command import bdist_egg, egg_info
from setuptools.wheel import Wheel
-from pkg_resources import (
- yield_lines, normalize_path, resource_string, ensure_directory,
- get_distribution, find_distributions, Environment, Requirement,
- Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
- VersionConflict, DEVELOP_DIST,
-)
+from pkg_resources import (
+ yield_lines, normalize_path, resource_string, ensure_directory,
+ get_distribution, find_distributions, Environment, Requirement,
+ Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
+ VersionConflict, DEVELOP_DIST,
+)
import pkg_resources.py31compat
-
+
__metaclass__ = type
-# Turn on PEP440Warnings
-warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
-
-__all__ = [
- 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
- 'main', 'get_exe_prefixes',
-]
-
-
-def is_64bit():
- return struct.calcsize("P") == 8
-
-
-def samefile(p1, p2):
+# Turn on PEP440Warnings
+warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
+
+__all__ = [
+ 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
+ 'main', 'get_exe_prefixes',
+]
+
+
+def is_64bit():
+ return struct.calcsize("P") == 8
+
+
+def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
- both_exist = os.path.exists(p1) and os.path.exists(p2)
- use_samefile = hasattr(os.path, 'samefile') and both_exist
- if use_samefile:
- return os.path.samefile(p1, p2)
- norm_p1 = os.path.normpath(os.path.normcase(p1))
- norm_p2 = os.path.normpath(os.path.normcase(p2))
- return norm_p1 == norm_p2
-
-
-if six.PY2:
+ both_exist = os.path.exists(p1) and os.path.exists(p2)
+ use_samefile = hasattr(os.path, 'samefile') and both_exist
+ if use_samefile:
+ return os.path.samefile(p1, p2)
+ norm_p1 = os.path.normpath(os.path.normcase(p1))
+ norm_p2 = os.path.normpath(os.path.normcase(p2))
+ return norm_p1 == norm_p2
+
+
+if six.PY2:
def _to_bytes(s):
- return s
-
- def isascii(s):
- try:
- six.text_type(s, 'ascii')
- return True
- except UnicodeError:
- return False
-else:
+ return s
+
+ def isascii(s):
+ try:
+ six.text_type(s, 'ascii')
+ return True
+ except UnicodeError:
+ return False
+else:
def _to_bytes(s):
return s.encode('utf8')
-
- def isascii(s):
- try:
- s.encode('ascii')
- return True
- except UnicodeError:
- return False
-
-
+
+ def isascii(s):
+ try:
+ s.encode('ascii')
+ return True
+ except UnicodeError:
+ return False
+
+
_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
-class easy_install(Command):
- """Manage a download/build/install process"""
- description = "Find/get/install Python packages"
- command_consumes_arguments = True
-
- user_options = [
- ('prefix=', None, "installation prefix"),
- ("zip-ok", "z", "install package as a zipfile"),
- ("multi-version", "m", "make apps have to require() a version"),
- ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
- ("install-dir=", "d", "install package to DIR"),
- ("script-dir=", "s", "install scripts to DIR"),
- ("exclude-scripts", "x", "Don't install scripts"),
- ("always-copy", "a", "Copy all needed packages to install dir"),
- ("index-url=", "i", "base URL of Python Package Index"),
- ("find-links=", "f", "additional URL(s) to search for packages"),
- ("build-directory=", "b",
- "download/extract/build in DIR; keep the results"),
- ('optimize=', 'O',
- "also compile with optimization: -O1 for \"python -O\", "
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
- ('record=', None,
- "filename in which to record list of installed files"),
- ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
- ('site-dirs=', 'S', "list of directories where .pth files work"),
- ('editable', 'e', "Install specified packages in editable form"),
- ('no-deps', 'N', "don't install dependencies"),
- ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
- ('local-snapshots-ok', 'l',
- "allow building eggs from local checkouts"),
- ('version', None, "print version information and exit"),
- ('no-find-links', None,
- "Don't load find-links defined in packages being installed")
- ]
- boolean_options = [
- 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
- 'editable',
- 'no-deps', 'local-snapshots-ok', 'version'
- ]
-
- if site.ENABLE_USER_SITE:
- help_msg = "install in user site-package '%s'" % site.USER_SITE
- user_options.append(('user', None, help_msg))
- boolean_options.append('user')
-
- negative_opt = {'always-unzip': 'zip-ok'}
- create_index = PackageIndex
-
- def initialize_options(self):
- # the --user option seems to be an opt-in one,
- # so the default should be False.
- self.user = 0
- self.zip_ok = self.local_snapshots_ok = None
- self.install_dir = self.script_dir = self.exclude_scripts = None
- self.index_url = None
- self.find_links = None
- self.build_directory = None
- self.args = None
- self.optimize = self.record = None
- self.upgrade = self.always_copy = self.multi_version = None
- self.editable = self.no_deps = self.allow_hosts = None
- self.root = self.prefix = self.no_report = None
- self.version = None
- self.install_purelib = None # for pure module distributions
- self.install_platlib = None # non-pure (dists w/ extensions)
- self.install_headers = None # for C/C++ headers
- self.install_lib = None # set to either purelib or platlib
- self.install_scripts = None
- self.install_data = None
- self.install_base = None
- self.install_platbase = None
- if site.ENABLE_USER_SITE:
- self.install_userbase = site.USER_BASE
- self.install_usersite = site.USER_SITE
- else:
- self.install_userbase = None
- self.install_usersite = None
- self.no_find_links = None
-
- # Options not specifiable via command line
- self.package_index = None
- self.pth_file = self.always_copy_from = None
- self.site_dirs = None
- self.installed_projects = {}
- self.sitepy_installed = False
- # Always read easy_install options, even if we are subclassed, or have
- # an independent instance created. This ensures that defaults will
- # always come from the standard configuration file(s)' "easy_install"
- # section, even if this is a "develop" or "install" command, or some
- # other embedding.
- self._dry_run = None
- self.verbose = self.distribution.verbose
- self.distribution._set_command_options(
- self, self.distribution.get_option_dict('easy_install')
- )
-
- def delete_blockers(self, blockers):
- extant_blockers = (
- filename for filename in blockers
- if os.path.exists(filename) or os.path.islink(filename)
- )
- list(map(self._delete_path, extant_blockers))
-
- def _delete_path(self, path):
- log.info("Deleting %s", path)
- if self.dry_run:
- return
-
- is_tree = os.path.isdir(path) and not os.path.islink(path)
- remover = rmtree if is_tree else os.unlink
- remover(path)
-
- @staticmethod
- def _render_version():
- """
- Render the Setuptools version and installation details, then exit.
- """
+class easy_install(Command):
+ """Manage a download/build/install process"""
+ description = "Find/get/install Python packages"
+ command_consumes_arguments = True
+
+ user_options = [
+ ('prefix=', None, "installation prefix"),
+ ("zip-ok", "z", "install package as a zipfile"),
+ ("multi-version", "m", "make apps have to require() a version"),
+ ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
+ ("install-dir=", "d", "install package to DIR"),
+ ("script-dir=", "s", "install scripts to DIR"),
+ ("exclude-scripts", "x", "Don't install scripts"),
+ ("always-copy", "a", "Copy all needed packages to install dir"),
+ ("index-url=", "i", "base URL of Python Package Index"),
+ ("find-links=", "f", "additional URL(s) to search for packages"),
+ ("build-directory=", "b",
+ "download/extract/build in DIR; keep the results"),
+ ('optimize=', 'O',
+ "also compile with optimization: -O1 for \"python -O\", "
+ "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+ ('record=', None,
+ "filename in which to record list of installed files"),
+ ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
+ ('site-dirs=', 'S', "list of directories where .pth files work"),
+ ('editable', 'e', "Install specified packages in editable form"),
+ ('no-deps', 'N', "don't install dependencies"),
+ ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
+ ('local-snapshots-ok', 'l',
+ "allow building eggs from local checkouts"),
+ ('version', None, "print version information and exit"),
+ ('no-find-links', None,
+ "Don't load find-links defined in packages being installed")
+ ]
+ boolean_options = [
+ 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
+ 'editable',
+ 'no-deps', 'local-snapshots-ok', 'version'
+ ]
+
+ if site.ENABLE_USER_SITE:
+ help_msg = "install in user site-package '%s'" % site.USER_SITE
+ user_options.append(('user', None, help_msg))
+ boolean_options.append('user')
+
+ negative_opt = {'always-unzip': 'zip-ok'}
+ create_index = PackageIndex
+
+ def initialize_options(self):
+ # the --user option seems to be an opt-in one,
+ # so the default should be False.
+ self.user = 0
+ self.zip_ok = self.local_snapshots_ok = None
+ self.install_dir = self.script_dir = self.exclude_scripts = None
+ self.index_url = None
+ self.find_links = None
+ self.build_directory = None
+ self.args = None
+ self.optimize = self.record = None
+ self.upgrade = self.always_copy = self.multi_version = None
+ self.editable = self.no_deps = self.allow_hosts = None
+ self.root = self.prefix = self.no_report = None
+ self.version = None
+ self.install_purelib = None # for pure module distributions
+ self.install_platlib = None # non-pure (dists w/ extensions)
+ self.install_headers = None # for C/C++ headers
+ self.install_lib = None # set to either purelib or platlib
+ self.install_scripts = None
+ self.install_data = None
+ self.install_base = None
+ self.install_platbase = None
+ if site.ENABLE_USER_SITE:
+ self.install_userbase = site.USER_BASE
+ self.install_usersite = site.USER_SITE
+ else:
+ self.install_userbase = None
+ self.install_usersite = None
+ self.no_find_links = None
+
+ # Options not specifiable via command line
+ self.package_index = None
+ self.pth_file = self.always_copy_from = None
+ self.site_dirs = None
+ self.installed_projects = {}
+ self.sitepy_installed = False
+ # Always read easy_install options, even if we are subclassed, or have
+ # an independent instance created. This ensures that defaults will
+ # always come from the standard configuration file(s)' "easy_install"
+ # section, even if this is a "develop" or "install" command, or some
+ # other embedding.
+ self._dry_run = None
+ self.verbose = self.distribution.verbose
+ self.distribution._set_command_options(
+ self, self.distribution.get_option_dict('easy_install')
+ )
+
+ def delete_blockers(self, blockers):
+ extant_blockers = (
+ filename for filename in blockers
+ if os.path.exists(filename) or os.path.islink(filename)
+ )
+ list(map(self._delete_path, extant_blockers))
+
+ def _delete_path(self, path):
+ log.info("Deleting %s", path)
+ if self.dry_run:
+ return
+
+ is_tree = os.path.isdir(path) and not os.path.islink(path)
+ remover = rmtree if is_tree else os.unlink
+ remover(path)
+
+ @staticmethod
+ def _render_version():
+ """
+ Render the Setuptools version and installation details, then exit.
+ """
ver = '{}.{}'.format(*sys.version_info)
- dist = get_distribution('setuptools')
- tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
- print(tmpl.format(**locals()))
- raise SystemExit()
-
- def finalize_options(self):
- self.version and self._render_version()
-
- py_version = sys.version.split()[0]
- prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
-
- self.config_vars = {
- 'dist_name': self.distribution.get_name(),
- 'dist_version': self.distribution.get_version(),
- 'dist_fullname': self.distribution.get_fullname(),
- 'py_version': py_version,
- 'py_version_short': py_version[0:3],
- 'py_version_nodot': py_version[0] + py_version[2],
- 'sys_prefix': prefix,
- 'prefix': prefix,
- 'sys_exec_prefix': exec_prefix,
- 'exec_prefix': exec_prefix,
- # Only python 3.2+ has abiflags
- 'abiflags': getattr(sys, 'abiflags', ''),
- }
-
- if site.ENABLE_USER_SITE:
- self.config_vars['userbase'] = self.install_userbase
- self.config_vars['usersite'] = self.install_usersite
-
- self._fix_install_dir_for_user_site()
-
- self.expand_basedirs()
- self.expand_dirs()
-
+ dist = get_distribution('setuptools')
+ tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
+ print(tmpl.format(**locals()))
+ raise SystemExit()
+
+ def finalize_options(self):
+ self.version and self._render_version()
+
+ py_version = sys.version.split()[0]
+ prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
+
+ self.config_vars = {
+ 'dist_name': self.distribution.get_name(),
+ 'dist_version': self.distribution.get_version(),
+ 'dist_fullname': self.distribution.get_fullname(),
+ 'py_version': py_version,
+ 'py_version_short': py_version[0:3],
+ 'py_version_nodot': py_version[0] + py_version[2],
+ 'sys_prefix': prefix,
+ 'prefix': prefix,
+ 'sys_exec_prefix': exec_prefix,
+ 'exec_prefix': exec_prefix,
+ # Only python 3.2+ has abiflags
+ 'abiflags': getattr(sys, 'abiflags', ''),
+ }
+
+ if site.ENABLE_USER_SITE:
+ self.config_vars['userbase'] = self.install_userbase
+ self.config_vars['usersite'] = self.install_usersite
+
+ self._fix_install_dir_for_user_site()
+
+ self.expand_basedirs()
+ self.expand_dirs()
+
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
- # If a non-default installation directory was specified, default the
- # script directory to match it.
- if self.script_dir is None:
- self.script_dir = self.install_dir
-
- if self.no_find_links is None:
- self.no_find_links = False
-
- # Let install_dir get set by install_lib command, which in turn
- # gets its info from the install command, and takes into account
- # --prefix and --home and all that other crud.
- self.set_undefined_options(
- 'install_lib', ('install_dir', 'install_dir')
- )
- # Likewise, set default script_dir from 'install_scripts.install_dir'
- self.set_undefined_options(
- 'install_scripts', ('install_dir', 'script_dir')
- )
-
- if self.user and self.install_purelib:
- self.install_dir = self.install_purelib
- self.script_dir = self.install_scripts
- # default --record from the install command
- self.set_undefined_options('install', ('record', 'record'))
- # Should this be moved to the if statement below? It's not used
- # elsewhere
- normpath = map(normalize_path, sys.path)
- self.all_site_dirs = get_site_dirs()
- if self.site_dirs is not None:
- site_dirs = [
- os.path.expanduser(s.strip()) for s in
- self.site_dirs.split(',')
- ]
- for d in site_dirs:
- if not os.path.isdir(d):
- log.warn("%s (in --site-dirs) does not exist", d)
- elif normalize_path(d) not in normpath:
- raise DistutilsOptionError(
- d + " (in --site-dirs) is not on sys.path"
- )
- else:
- self.all_site_dirs.append(normalize_path(d))
- if not self.editable:
- self.check_site_dir()
+ # If a non-default installation directory was specified, default the
+ # script directory to match it.
+ if self.script_dir is None:
+ self.script_dir = self.install_dir
+
+ if self.no_find_links is None:
+ self.no_find_links = False
+
+ # Let install_dir get set by install_lib command, which in turn
+ # gets its info from the install command, and takes into account
+ # --prefix and --home and all that other crud.
+ self.set_undefined_options(
+ 'install_lib', ('install_dir', 'install_dir')
+ )
+ # Likewise, set default script_dir from 'install_scripts.install_dir'
+ self.set_undefined_options(
+ 'install_scripts', ('install_dir', 'script_dir')
+ )
+
+ if self.user and self.install_purelib:
+ self.install_dir = self.install_purelib
+ self.script_dir = self.install_scripts
+ # default --record from the install command
+ self.set_undefined_options('install', ('record', 'record'))
+ # Should this be moved to the if statement below? It's not used
+ # elsewhere
+ normpath = map(normalize_path, sys.path)
+ self.all_site_dirs = get_site_dirs()
+ if self.site_dirs is not None:
+ site_dirs = [
+ os.path.expanduser(s.strip()) for s in
+ self.site_dirs.split(',')
+ ]
+ for d in site_dirs:
+ if not os.path.isdir(d):
+ log.warn("%s (in --site-dirs) does not exist", d)
+ elif normalize_path(d) not in normpath:
+ raise DistutilsOptionError(
+ d + " (in --site-dirs) is not on sys.path"
+ )
+ else:
+ self.all_site_dirs.append(normalize_path(d))
+ if not self.editable:
+ self.check_site_dir()
self.index_url = self.index_url or "https://pypi.org/simple/"
- self.shadow_path = self.all_site_dirs[:]
- for path_item in self.install_dir, normalize_path(self.script_dir):
- if path_item not in self.shadow_path:
- self.shadow_path.insert(0, path_item)
-
- if self.allow_hosts is not None:
- hosts = [s.strip() for s in self.allow_hosts.split(',')]
- else:
- hosts = ['*']
- if self.package_index is None:
- self.package_index = self.create_index(
- self.index_url, search_path=self.shadow_path, hosts=hosts,
- )
- self.local_index = Environment(self.shadow_path + sys.path)
-
- if self.find_links is not None:
- if isinstance(self.find_links, six.string_types):
- self.find_links = self.find_links.split()
- else:
- self.find_links = []
- if self.local_snapshots_ok:
- self.package_index.scan_egg_links(self.shadow_path + sys.path)
- if not self.no_find_links:
- self.package_index.add_find_links(self.find_links)
- self.set_undefined_options('install_lib', ('optimize', 'optimize'))
- if not isinstance(self.optimize, int):
- try:
- self.optimize = int(self.optimize)
- if not (0 <= self.optimize <= 2):
- raise ValueError
- except ValueError:
- raise DistutilsOptionError("--optimize must be 0, 1, or 2")
-
- if self.editable and not self.build_directory:
- raise DistutilsArgError(
- "Must specify a build directory (-b) when using --editable"
- )
- if not self.args:
- raise DistutilsArgError(
- "No urls, filenames, or requirements specified (see --help)")
-
- self.outputs = []
-
- def _fix_install_dir_for_user_site(self):
- """
- Fix the install_dir if "--user" was used.
- """
- if not self.user or not site.ENABLE_USER_SITE:
- return
-
- self.create_home_path()
- if self.install_userbase is None:
- msg = "User base directory is not specified"
- raise DistutilsPlatformError(msg)
- self.install_base = self.install_platbase = self.install_userbase
- scheme_name = os.name.replace('posix', 'unix') + '_user'
- self.select_scheme(scheme_name)
-
- def _expand_attrs(self, attrs):
- for attr in attrs:
- val = getattr(self, attr)
- if val is not None:
- if os.name == 'posix' or os.name == 'nt':
- val = os.path.expanduser(val)
- val = subst_vars(val, self.config_vars)
- setattr(self, attr, val)
-
- def expand_basedirs(self):
- """Calls `os.path.expanduser` on install_base, install_platbase and
- root."""
- self._expand_attrs(['install_base', 'install_platbase', 'root'])
-
- def expand_dirs(self):
- """Calls `os.path.expanduser` on install dirs."""
+ self.shadow_path = self.all_site_dirs[:]
+ for path_item in self.install_dir, normalize_path(self.script_dir):
+ if path_item not in self.shadow_path:
+ self.shadow_path.insert(0, path_item)
+
+ if self.allow_hosts is not None:
+ hosts = [s.strip() for s in self.allow_hosts.split(',')]
+ else:
+ hosts = ['*']
+ if self.package_index is None:
+ self.package_index = self.create_index(
+ self.index_url, search_path=self.shadow_path, hosts=hosts,
+ )
+ self.local_index = Environment(self.shadow_path + sys.path)
+
+ if self.find_links is not None:
+ if isinstance(self.find_links, six.string_types):
+ self.find_links = self.find_links.split()
+ else:
+ self.find_links = []
+ if self.local_snapshots_ok:
+ self.package_index.scan_egg_links(self.shadow_path + sys.path)
+ if not self.no_find_links:
+ self.package_index.add_find_links(self.find_links)
+ self.set_undefined_options('install_lib', ('optimize', 'optimize'))
+ if not isinstance(self.optimize, int):
+ try:
+ self.optimize = int(self.optimize)
+ if not (0 <= self.optimize <= 2):
+ raise ValueError
+ except ValueError:
+ raise DistutilsOptionError("--optimize must be 0, 1, or 2")
+
+ if self.editable and not self.build_directory:
+ raise DistutilsArgError(
+ "Must specify a build directory (-b) when using --editable"
+ )
+ if not self.args:
+ raise DistutilsArgError(
+ "No urls, filenames, or requirements specified (see --help)")
+
+ self.outputs = []
+
+ def _fix_install_dir_for_user_site(self):
+ """
+ Fix the install_dir if "--user" was used.
+ """
+ if not self.user or not site.ENABLE_USER_SITE:
+ return
+
+ self.create_home_path()
+ if self.install_userbase is None:
+ msg = "User base directory is not specified"
+ raise DistutilsPlatformError(msg)
+ self.install_base = self.install_platbase = self.install_userbase
+ scheme_name = os.name.replace('posix', 'unix') + '_user'
+ self.select_scheme(scheme_name)
+
+ def _expand_attrs(self, attrs):
+ for attr in attrs:
+ val = getattr(self, attr)
+ if val is not None:
+ if os.name == 'posix' or os.name == 'nt':
+ val = os.path.expanduser(val)
+ val = subst_vars(val, self.config_vars)
+ setattr(self, attr, val)
+
+ def expand_basedirs(self):
+ """Calls `os.path.expanduser` on install_base, install_platbase and
+ root."""
+ self._expand_attrs(['install_base', 'install_platbase', 'root'])
+
+ def expand_dirs(self):
+ """Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
@@ -409,7 +409,7 @@ class easy_install(Command):
'install_data',
]
self._expand_attrs(dirs)
-
+
def run(self, show_deprecation=True):
if show_deprecation:
self.announce(
@@ -417,228 +417,228 @@ class easy_install(Command):
"and will be removed in a future version."
, log.WARN,
)
- if self.verbose != self.distribution.verbose:
- log.set_verbosity(self.verbose)
- try:
- for spec in self.args:
- self.easy_install(spec, not self.no_deps)
- if self.record:
- outputs = self.outputs
- if self.root: # strip any package prefix
- root_len = len(self.root)
- for counter in range(len(outputs)):
- outputs[counter] = outputs[counter][root_len:]
- from distutils import file_util
-
- self.execute(
- file_util.write_file, (self.record, outputs),
- "writing list of installed files to '%s'" %
- self.record
- )
- self.warn_deprecated_options()
- finally:
- log.set_verbosity(self.distribution.verbose)
-
- def pseudo_tempname(self):
- """Return a pseudo-tempname base in the install directory.
- This code is intentionally naive; if a malicious party can write to
- the target directory you're already in deep doodoo.
- """
- try:
- pid = os.getpid()
+ if self.verbose != self.distribution.verbose:
+ log.set_verbosity(self.verbose)
+ try:
+ for spec in self.args:
+ self.easy_install(spec, not self.no_deps)
+ if self.record:
+ outputs = self.outputs
+ if self.root: # strip any package prefix
+ root_len = len(self.root)
+ for counter in range(len(outputs)):
+ outputs[counter] = outputs[counter][root_len:]
+ from distutils import file_util
+
+ self.execute(
+ file_util.write_file, (self.record, outputs),
+ "writing list of installed files to '%s'" %
+ self.record
+ )
+ self.warn_deprecated_options()
+ finally:
+ log.set_verbosity(self.distribution.verbose)
+
+ def pseudo_tempname(self):
+ """Return a pseudo-tempname base in the install directory.
+ This code is intentionally naive; if a malicious party can write to
+ the target directory you're already in deep doodoo.
+ """
+ try:
+ pid = os.getpid()
except Exception:
- pid = random.randint(0, sys.maxsize)
- return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
-
- def warn_deprecated_options(self):
- pass
-
- def check_site_dir(self):
- """Verify that self.install_dir is .pth-capable dir, if needed"""
-
- instdir = normalize_path(self.install_dir)
- pth_file = os.path.join(instdir, 'easy-install.pth')
-
- # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
- is_site_dir = instdir in self.all_site_dirs
-
- if not is_site_dir and not self.multi_version:
- # No? Then directly test whether it does .pth file processing
- is_site_dir = self.check_pth_processing()
- else:
- # make sure we can write to target dir
- testfile = self.pseudo_tempname() + '.write-test'
- test_exists = os.path.exists(testfile)
- try:
- if test_exists:
- os.unlink(testfile)
- open(testfile, 'w').close()
- os.unlink(testfile)
- except (OSError, IOError):
- self.cant_write_to_target()
-
- if not is_site_dir and not self.multi_version:
- # Can't install non-multi to non-site dir
- raise DistutilsError(self.no_default_version_msg())
-
- if is_site_dir:
- if self.pth_file is None:
- self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
- else:
- self.pth_file = None
-
+ pid = random.randint(0, sys.maxsize)
+ return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
+
+ def warn_deprecated_options(self):
+ pass
+
+ def check_site_dir(self):
+ """Verify that self.install_dir is .pth-capable dir, if needed"""
+
+ instdir = normalize_path(self.install_dir)
+ pth_file = os.path.join(instdir, 'easy-install.pth')
+
+ # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
+ is_site_dir = instdir in self.all_site_dirs
+
+ if not is_site_dir and not self.multi_version:
+ # No? Then directly test whether it does .pth file processing
+ is_site_dir = self.check_pth_processing()
+ else:
+ # make sure we can write to target dir
+ testfile = self.pseudo_tempname() + '.write-test'
+ test_exists = os.path.exists(testfile)
+ try:
+ if test_exists:
+ os.unlink(testfile)
+ open(testfile, 'w').close()
+ os.unlink(testfile)
+ except (OSError, IOError):
+ self.cant_write_to_target()
+
+ if not is_site_dir and not self.multi_version:
+ # Can't install non-multi to non-site dir
+ raise DistutilsError(self.no_default_version_msg())
+
+ if is_site_dir:
+ if self.pth_file is None:
+ self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
+ else:
+ self.pth_file = None
+
if instdir not in map(normalize_path, _pythonpath()):
- # only PYTHONPATH dirs need a site.py, so pretend it's there
- self.sitepy_installed = True
- elif self.multi_version and not os.path.exists(pth_file):
- self.sitepy_installed = True # don't need site.py in this case
- self.pth_file = None # and don't create a .pth file
- self.install_dir = instdir
-
- __cant_write_msg = textwrap.dedent("""
- can't create or remove files in install directory
-
- The following error occurred while trying to add or remove files in the
- installation directory:
-
- %s
-
- The installation directory you specified (via --install-dir, --prefix, or
- the distutils default setting) was:
-
- %s
- """).lstrip()
-
- __not_exists_id = textwrap.dedent("""
- This directory does not currently exist. Please create it and try again, or
- choose a different installation directory (using the -d or --install-dir
- option).
- """).lstrip()
-
- __access_msg = textwrap.dedent("""
- Perhaps your account does not have write access to this directory? If the
- installation directory is a system-owned directory, you may need to sign in
- as the administrator or "root" account. If you do not have administrative
- access to this machine, you may wish to choose a different installation
- directory, preferably one that is listed in your PYTHONPATH environment
- variable.
-
- For information on other options, you may wish to consult the
- documentation at:
-
+ # only PYTHONPATH dirs need a site.py, so pretend it's there
+ self.sitepy_installed = True
+ elif self.multi_version and not os.path.exists(pth_file):
+ self.sitepy_installed = True # don't need site.py in this case
+ self.pth_file = None # and don't create a .pth file
+ self.install_dir = instdir
+
+ __cant_write_msg = textwrap.dedent("""
+ can't create or remove files in install directory
+
+ The following error occurred while trying to add or remove files in the
+ installation directory:
+
+ %s
+
+ The installation directory you specified (via --install-dir, --prefix, or
+ the distutils default setting) was:
+
+ %s
+ """).lstrip()
+
+ __not_exists_id = textwrap.dedent("""
+ This directory does not currently exist. Please create it and try again, or
+ choose a different installation directory (using the -d or --install-dir
+ option).
+ """).lstrip()
+
+ __access_msg = textwrap.dedent("""
+ Perhaps your account does not have write access to this directory? If the
+ installation directory is a system-owned directory, you may need to sign in
+ as the administrator or "root" account. If you do not have administrative
+ access to this machine, you may wish to choose a different installation
+ directory, preferably one that is listed in your PYTHONPATH environment
+ variable.
+
+ For information on other options, you may wish to consult the
+ documentation at:
+
https://setuptools.readthedocs.io/en/latest/easy_install.html
-
- Please make the appropriate changes for your system and try again.
- """).lstrip()
-
- def cant_write_to_target(self):
- msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
-
- if not os.path.exists(self.install_dir):
- msg += '\n' + self.__not_exists_id
- else:
- msg += '\n' + self.__access_msg
- raise DistutilsError(msg)
-
- def check_pth_processing(self):
- """Empirically verify whether .pth files are supported in inst. dir"""
- instdir = self.install_dir
- log.info("Checking .pth file support in %s", instdir)
- pth_file = self.pseudo_tempname() + ".pth"
- ok_file = pth_file + '.ok'
- ok_exists = os.path.exists(ok_file)
+
+ Please make the appropriate changes for your system and try again.
+ """).lstrip()
+
+ def cant_write_to_target(self):
+ msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
+
+ if not os.path.exists(self.install_dir):
+ msg += '\n' + self.__not_exists_id
+ else:
+ msg += '\n' + self.__access_msg
+ raise DistutilsError(msg)
+
+ def check_pth_processing(self):
+ """Empirically verify whether .pth files are supported in inst. dir"""
+ instdir = self.install_dir
+ log.info("Checking .pth file support in %s", instdir)
+ pth_file = self.pseudo_tempname() + ".pth"
+ ok_file = pth_file + '.ok'
+ ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
- try:
- if ok_exists:
- os.unlink(ok_file)
- dirname = os.path.dirname(ok_file)
+ try:
+ if ok_exists:
+ os.unlink(ok_file)
+ dirname = os.path.dirname(ok_file)
pkg_resources.py31compat.makedirs(dirname, exist_ok=True)
- f = open(pth_file, 'w')
- except (OSError, IOError):
- self.cant_write_to_target()
- else:
- try:
+ f = open(pth_file, 'w')
+ except (OSError, IOError):
+ self.cant_write_to_target()
+ else:
+ try:
f.write(tmpl.format(**locals()))
- f.close()
- f = None
- executable = sys.executable
- if os.name == 'nt':
- dirname, basename = os.path.split(executable)
- alt = os.path.join(dirname, 'pythonw.exe')
+ f.close()
+ f = None
+ executable = sys.executable
+ if os.name == 'nt':
+ dirname, basename = os.path.split(executable)
+ alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
- # use pythonw.exe to avoid opening a console window
- executable = alt
-
- from distutils.spawn import spawn
-
- spawn([executable, '-E', '-c', 'pass'], 0)
-
- if os.path.exists(ok_file):
- log.info(
- "TEST PASSED: %s appears to support .pth files",
- instdir
- )
- return True
- finally:
- if f:
- f.close()
- if os.path.exists(ok_file):
- os.unlink(ok_file)
- if os.path.exists(pth_file):
- os.unlink(pth_file)
- if not self.multi_version:
- log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
- return False
-
- def install_egg_scripts(self, dist):
- """Write all the scripts for `dist`, unless scripts are excluded"""
- if not self.exclude_scripts and dist.metadata_isdir('scripts'):
- for script_name in dist.metadata_listdir('scripts'):
- if dist.metadata_isdir('scripts/' + script_name):
- # The "script" is a directory, likely a Python 3
- # __pycache__ directory, so skip it.
- continue
- self.install_script(
- dist, script_name,
- dist.get_metadata('scripts/' + script_name)
- )
- self.install_wrapper_scripts(dist)
-
- def add_output(self, path):
- if os.path.isdir(path):
- for base, dirs, files in os.walk(path):
- for filename in files:
- self.outputs.append(os.path.join(base, filename))
- else:
- self.outputs.append(path)
-
- def not_editable(self, spec):
- if self.editable:
- raise DistutilsArgError(
- "Invalid argument %r: you can't use filenames or URLs "
- "with --editable (except via the --find-links option)."
- % (spec,)
- )
-
- def check_editable(self, spec):
- if not self.editable:
- return
-
- if os.path.exists(os.path.join(self.build_directory, spec.key)):
- raise DistutilsArgError(
- "%r already exists in %s; can't do a checkout there" %
- (spec.key, self.build_directory)
- )
-
+ # use pythonw.exe to avoid opening a console window
+ executable = alt
+
+ from distutils.spawn import spawn
+
+ spawn([executable, '-E', '-c', 'pass'], 0)
+
+ if os.path.exists(ok_file):
+ log.info(
+ "TEST PASSED: %s appears to support .pth files",
+ instdir
+ )
+ return True
+ finally:
+ if f:
+ f.close()
+ if os.path.exists(ok_file):
+ os.unlink(ok_file)
+ if os.path.exists(pth_file):
+ os.unlink(pth_file)
+ if not self.multi_version:
+ log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
+ return False
+
+ def install_egg_scripts(self, dist):
+ """Write all the scripts for `dist`, unless scripts are excluded"""
+ if not self.exclude_scripts and dist.metadata_isdir('scripts'):
+ for script_name in dist.metadata_listdir('scripts'):
+ if dist.metadata_isdir('scripts/' + script_name):
+ # The "script" is a directory, likely a Python 3
+ # __pycache__ directory, so skip it.
+ continue
+ self.install_script(
+ dist, script_name,
+ dist.get_metadata('scripts/' + script_name)
+ )
+ self.install_wrapper_scripts(dist)
+
+ def add_output(self, path):
+ if os.path.isdir(path):
+ for base, dirs, files in os.walk(path):
+ for filename in files:
+ self.outputs.append(os.path.join(base, filename))
+ else:
+ self.outputs.append(path)
+
+ def not_editable(self, spec):
+ if self.editable:
+ raise DistutilsArgError(
+ "Invalid argument %r: you can't use filenames or URLs "
+ "with --editable (except via the --find-links option)."
+ % (spec,)
+ )
+
+ def check_editable(self, spec):
+ if not self.editable:
+ return
+
+ if os.path.exists(os.path.join(self.build_directory, spec.key)):
+ raise DistutilsArgError(
+ "%r already exists in %s; can't do a checkout there" %
+ (spec.key, self.build_directory)
+ )
+
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
@@ -648,296 +648,296 @@ class easy_install(Command):
finally:
os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
- def easy_install(self, spec, deps=False):
- if not self.editable:
- self.install_site_py()
-
+ def easy_install(self, spec, deps=False):
+ if not self.editable:
+ self.install_site_py()
+
with self._tmpdir() as tmpdir:
- if not isinstance(spec, Requirement):
- if URL_SCHEME(spec):
- # It's a url, download it to tmpdir and process
- self.not_editable(spec)
+ if not isinstance(spec, Requirement):
+ if URL_SCHEME(spec):
+ # It's a url, download it to tmpdir and process
+ self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
-
- elif os.path.exists(spec):
- # Existing file or directory, just process it directly
- self.not_editable(spec)
- return self.install_item(None, spec, tmpdir, deps, True)
- else:
- spec = parse_requirement_arg(spec)
-
- self.check_editable(spec)
- dist = self.package_index.fetch_distribution(
- spec, tmpdir, self.upgrade, self.editable,
- not self.always_copy, self.local_index
- )
- if dist is None:
- msg = "Could not find suitable distribution for %r" % spec
- if self.always_copy:
- msg += " (--always-copy skips system and development eggs)"
- raise DistutilsError(msg)
- elif dist.precedence == DEVELOP_DIST:
- # .egg-info dists don't need installing, just process deps
- self.process_distribution(spec, dist, deps, "Using")
- return dist
- else:
- return self.install_item(spec, dist.location, tmpdir, deps)
-
- def install_item(self, spec, download, tmpdir, deps, install_needed=False):
-
- # Installation is also needed if file in tmpdir or is not an egg
- install_needed = install_needed or self.always_copy
- install_needed = install_needed or os.path.dirname(download) == tmpdir
- install_needed = install_needed or not download.endswith('.egg')
- install_needed = install_needed or (
- self.always_copy_from is not None and
- os.path.dirname(normalize_path(download)) ==
- normalize_path(self.always_copy_from)
- )
-
- if spec and not install_needed:
- # at this point, we know it's a local .egg, we just don't know if
- # it's already installed.
- for dist in self.local_index[spec.project_name]:
- if dist.location == download:
- break
- else:
- install_needed = True # it's not in the local index
-
- log.info("Processing %s", os.path.basename(download))
-
- if install_needed:
- dists = self.install_eggs(spec, download, tmpdir)
- for dist in dists:
- self.process_distribution(spec, dist, deps)
- else:
- dists = [self.egg_distribution(download)]
- self.process_distribution(spec, dists[0], deps, "Using")
-
- if spec is not None:
- for dist in dists:
- if dist in spec:
- return dist
-
- def select_scheme(self, name):
- """Sets the install directories by applying the install schemes."""
- # it's the caller's problem if they supply a bad name!
- scheme = INSTALL_SCHEMES[name]
- for key in SCHEME_KEYS:
- attrname = 'install_' + key
- if getattr(self, attrname) is None:
- setattr(self, attrname, scheme[key])
-
- def process_distribution(self, requirement, dist, deps=True, *info):
- self.update_pth(dist)
- self.package_index.add(dist)
- if dist in self.local_index[dist.key]:
- self.local_index.remove(dist)
- self.local_index.add(dist)
- self.install_egg_scripts(dist)
- self.installed_projects[dist.key] = dist
- log.info(self.installation_report(requirement, dist, *info))
- if (dist.has_metadata('dependency_links.txt') and
- not self.no_find_links):
- self.package_index.add_find_links(
- dist.get_metadata_lines('dependency_links.txt')
- )
- if not deps and not self.always_copy:
- return
- elif requirement is not None and dist.key != requirement.key:
- log.warn("Skipping dependencies for %s", dist)
- return # XXX this is not the distribution we were looking for
- elif requirement is None or dist not in requirement:
- # if we wound up with a different version, resolve what we've got
- distreq = dist.as_requirement()
+
+ elif os.path.exists(spec):
+ # Existing file or directory, just process it directly
+ self.not_editable(spec)
+ return self.install_item(None, spec, tmpdir, deps, True)
+ else:
+ spec = parse_requirement_arg(spec)
+
+ self.check_editable(spec)
+ dist = self.package_index.fetch_distribution(
+ spec, tmpdir, self.upgrade, self.editable,
+ not self.always_copy, self.local_index
+ )
+ if dist is None:
+ msg = "Could not find suitable distribution for %r" % spec
+ if self.always_copy:
+ msg += " (--always-copy skips system and development eggs)"
+ raise DistutilsError(msg)
+ elif dist.precedence == DEVELOP_DIST:
+ # .egg-info dists don't need installing, just process deps
+ self.process_distribution(spec, dist, deps, "Using")
+ return dist
+ else:
+ return self.install_item(spec, dist.location, tmpdir, deps)
+
+ def install_item(self, spec, download, tmpdir, deps, install_needed=False):
+
+ # Installation is also needed if file in tmpdir or is not an egg
+ install_needed = install_needed or self.always_copy
+ install_needed = install_needed or os.path.dirname(download) == tmpdir
+ install_needed = install_needed or not download.endswith('.egg')
+ install_needed = install_needed or (
+ self.always_copy_from is not None and
+ os.path.dirname(normalize_path(download)) ==
+ normalize_path(self.always_copy_from)
+ )
+
+ if spec and not install_needed:
+ # at this point, we know it's a local .egg, we just don't know if
+ # it's already installed.
+ for dist in self.local_index[spec.project_name]:
+ if dist.location == download:
+ break
+ else:
+ install_needed = True # it's not in the local index
+
+ log.info("Processing %s", os.path.basename(download))
+
+ if install_needed:
+ dists = self.install_eggs(spec, download, tmpdir)
+ for dist in dists:
+ self.process_distribution(spec, dist, deps)
+ else:
+ dists = [self.egg_distribution(download)]
+ self.process_distribution(spec, dists[0], deps, "Using")
+
+ if spec is not None:
+ for dist in dists:
+ if dist in spec:
+ return dist
+
+ def select_scheme(self, name):
+ """Sets the install directories by applying the install schemes."""
+ # it's the caller's problem if they supply a bad name!
+ scheme = INSTALL_SCHEMES[name]
+ for key in SCHEME_KEYS:
+ attrname = 'install_' + key
+ if getattr(self, attrname) is None:
+ setattr(self, attrname, scheme[key])
+
+ def process_distribution(self, requirement, dist, deps=True, *info):
+ self.update_pth(dist)
+ self.package_index.add(dist)
+ if dist in self.local_index[dist.key]:
+ self.local_index.remove(dist)
+ self.local_index.add(dist)
+ self.install_egg_scripts(dist)
+ self.installed_projects[dist.key] = dist
+ log.info(self.installation_report(requirement, dist, *info))
+ if (dist.has_metadata('dependency_links.txt') and
+ not self.no_find_links):
+ self.package_index.add_find_links(
+ dist.get_metadata_lines('dependency_links.txt')
+ )
+ if not deps and not self.always_copy:
+ return
+ elif requirement is not None and dist.key != requirement.key:
+ log.warn("Skipping dependencies for %s", dist)
+ return # XXX this is not the distribution we were looking for
+ elif requirement is None or dist not in requirement:
+ # if we wound up with a different version, resolve what we've got
+ distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
- log.info("Processing dependencies for %s", requirement)
- try:
- distros = WorkingSet([]).resolve(
- [requirement], self.local_index, self.easy_install
- )
- except DistributionNotFound as e:
- raise DistutilsError(str(e))
- except VersionConflict as e:
- raise DistutilsError(e.report())
- if self.always_copy or self.always_copy_from:
- # Force all the relevant distros to be copied or activated
- for dist in distros:
- if dist.key not in self.installed_projects:
- self.easy_install(dist.as_requirement())
- log.info("Finished processing dependencies for %s", requirement)
-
- def should_unzip(self, dist):
- if self.zip_ok is not None:
- return not self.zip_ok
- if dist.has_metadata('not-zip-safe'):
- return True
- if not dist.has_metadata('zip-safe'):
- return True
- return False
-
- def maybe_move(self, spec, dist_filename, setup_base):
- dst = os.path.join(self.build_directory, spec.key)
- if os.path.exists(dst):
+ log.info("Processing dependencies for %s", requirement)
+ try:
+ distros = WorkingSet([]).resolve(
+ [requirement], self.local_index, self.easy_install
+ )
+ except DistributionNotFound as e:
+ raise DistutilsError(str(e))
+ except VersionConflict as e:
+ raise DistutilsError(e.report())
+ if self.always_copy or self.always_copy_from:
+ # Force all the relevant distros to be copied or activated
+ for dist in distros:
+ if dist.key not in self.installed_projects:
+ self.easy_install(dist.as_requirement())
+ log.info("Finished processing dependencies for %s", requirement)
+
+ def should_unzip(self, dist):
+ if self.zip_ok is not None:
+ return not self.zip_ok
+ if dist.has_metadata('not-zip-safe'):
+ return True
+ if not dist.has_metadata('zip-safe'):
+ return True
+ return False
+
+ def maybe_move(self, spec, dist_filename, setup_base):
+ dst = os.path.join(self.build_directory, spec.key)
+ if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
- log.warn(msg, spec.key, self.build_directory, setup_base)
- return setup_base
- if os.path.isdir(dist_filename):
- setup_base = dist_filename
- else:
- if os.path.dirname(dist_filename) == setup_base:
- os.unlink(dist_filename) # get it out of the tmp dir
- contents = os.listdir(setup_base)
- if len(contents) == 1:
- dist_filename = os.path.join(setup_base, contents[0])
- if os.path.isdir(dist_filename):
- # if the only thing there is a directory, move it instead
- setup_base = dist_filename
- ensure_directory(dst)
- shutil.move(setup_base, dst)
- return dst
-
- def install_wrapper_scripts(self, dist):
- if self.exclude_scripts:
- return
- for args in ScriptWriter.best().get_args(dist):
- self.write_script(*args)
-
- def install_script(self, dist, script_name, script_text, dev_path=None):
- """Generate a legacy script wrapper and install it"""
- spec = str(dist.as_requirement())
- is_script = is_python_script(script_text, script_name)
-
- if is_script:
- body = self._load_template(dev_path) % locals()
- script_text = ScriptWriter.get_header(script_text) + body
+ log.warn(msg, spec.key, self.build_directory, setup_base)
+ return setup_base
+ if os.path.isdir(dist_filename):
+ setup_base = dist_filename
+ else:
+ if os.path.dirname(dist_filename) == setup_base:
+ os.unlink(dist_filename) # get it out of the tmp dir
+ contents = os.listdir(setup_base)
+ if len(contents) == 1:
+ dist_filename = os.path.join(setup_base, contents[0])
+ if os.path.isdir(dist_filename):
+ # if the only thing there is a directory, move it instead
+ setup_base = dist_filename
+ ensure_directory(dst)
+ shutil.move(setup_base, dst)
+ return dst
+
+ def install_wrapper_scripts(self, dist):
+ if self.exclude_scripts:
+ return
+ for args in ScriptWriter.best().get_args(dist):
+ self.write_script(*args)
+
+ def install_script(self, dist, script_name, script_text, dev_path=None):
+ """Generate a legacy script wrapper and install it"""
+ spec = str(dist.as_requirement())
+ is_script = is_python_script(script_text, script_name)
+
+ if is_script:
+ body = self._load_template(dev_path) % locals()
+ script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_bytes(script_text), 'b')
-
- @staticmethod
- def _load_template(dev_path):
- """
- There are a couple of template scripts in the package. This
- function loads one of them and prepares it for use.
- """
+
+ @staticmethod
+ def _load_template(dev_path):
+ """
+ There are a couple of template scripts in the package. This
+ function loads one of them and prepares it for use.
+ """
# See https://github.com/pypa/setuptools/issues/134 for info
- # on script file naming and downstream issues with SVR4
- name = 'script.tmpl'
- if dev_path:
- name = name.replace('.tmpl', ' (dev).tmpl')
-
- raw_bytes = resource_string('setuptools', name)
- return raw_bytes.decode('utf-8')
-
- def write_script(self, script_name, contents, mode="t", blockers=()):
- """Write an executable file to the scripts directory"""
- self.delete_blockers( # clean up old .py/.pyw w/o a script
- [os.path.join(self.script_dir, x) for x in blockers]
- )
- log.info("Installing %s script to %s", script_name, self.script_dir)
- target = os.path.join(self.script_dir, script_name)
- self.add_output(target)
-
+ # on script file naming and downstream issues with SVR4
+ name = 'script.tmpl'
+ if dev_path:
+ name = name.replace('.tmpl', ' (dev).tmpl')
+
+ raw_bytes = resource_string('setuptools', name)
+ return raw_bytes.decode('utf-8')
+
+ def write_script(self, script_name, contents, mode="t", blockers=()):
+ """Write an executable file to the scripts directory"""
+ self.delete_blockers( # clean up old .py/.pyw w/o a script
+ [os.path.join(self.script_dir, x) for x in blockers]
+ )
+ log.info("Installing %s script to %s", script_name, self.script_dir)
+ target = os.path.join(self.script_dir, script_name)
+ self.add_output(target)
+
if self.dry_run:
return
- mask = current_umask()
+ mask = current_umask()
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
-
- def install_eggs(self, spec, dist_filename, tmpdir):
- # .egg dirs or files are already built, so just return them
- if dist_filename.lower().endswith('.egg'):
- return [self.install_egg(dist_filename, tmpdir)]
- elif dist_filename.lower().endswith('.exe'):
- return [self.install_exe(dist_filename, tmpdir)]
+
+ def install_eggs(self, spec, dist_filename, tmpdir):
+ # .egg dirs or files are already built, so just return them
+ if dist_filename.lower().endswith('.egg'):
+ return [self.install_egg(dist_filename, tmpdir)]
+ elif dist_filename.lower().endswith('.exe'):
+ return [self.install_exe(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.whl'):
return [self.install_wheel(dist_filename, tmpdir)]
-
- # Anything else, try to extract and build
- setup_base = tmpdir
- if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
- unpack_archive(dist_filename, tmpdir, self.unpack_progress)
- elif os.path.isdir(dist_filename):
- setup_base = os.path.abspath(dist_filename)
-
- if (setup_base.startswith(tmpdir) # something we downloaded
- and self.build_directory and spec is not None):
- setup_base = self.maybe_move(spec, dist_filename, setup_base)
-
- # Find the setup.py file
- setup_script = os.path.join(setup_base, 'setup.py')
-
- if not os.path.exists(setup_script):
- setups = glob(os.path.join(setup_base, '*', 'setup.py'))
- if not setups:
- raise DistutilsError(
- "Couldn't find a setup script in %s" %
- os.path.abspath(dist_filename)
- )
- if len(setups) > 1:
- raise DistutilsError(
- "Multiple setup scripts in %s" %
- os.path.abspath(dist_filename)
- )
- setup_script = setups[0]
-
- # Now run it, and return the result
- if self.editable:
- log.info(self.report_editable(spec, setup_script))
- return []
- else:
- return self.build_and_install(setup_script, setup_base)
-
- def egg_distribution(self, egg_path):
- if os.path.isdir(egg_path):
- metadata = PathMetadata(egg_path, os.path.join(egg_path,
- 'EGG-INFO'))
- else:
- metadata = EggMetadata(zipimport.zipimporter(egg_path))
- return Distribution.from_filename(egg_path, metadata=metadata)
-
- def install_egg(self, egg_path, tmpdir):
+
+ # Anything else, try to extract and build
+ setup_base = tmpdir
+ if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
+ unpack_archive(dist_filename, tmpdir, self.unpack_progress)
+ elif os.path.isdir(dist_filename):
+ setup_base = os.path.abspath(dist_filename)
+
+ if (setup_base.startswith(tmpdir) # something we downloaded
+ and self.build_directory and spec is not None):
+ setup_base = self.maybe_move(spec, dist_filename, setup_base)
+
+ # Find the setup.py file
+ setup_script = os.path.join(setup_base, 'setup.py')
+
+ if not os.path.exists(setup_script):
+ setups = glob(os.path.join(setup_base, '*', 'setup.py'))
+ if not setups:
+ raise DistutilsError(
+ "Couldn't find a setup script in %s" %
+ os.path.abspath(dist_filename)
+ )
+ if len(setups) > 1:
+ raise DistutilsError(
+ "Multiple setup scripts in %s" %
+ os.path.abspath(dist_filename)
+ )
+ setup_script = setups[0]
+
+ # Now run it, and return the result
+ if self.editable:
+ log.info(self.report_editable(spec, setup_script))
+ return []
+ else:
+ return self.build_and_install(setup_script, setup_base)
+
+ def egg_distribution(self, egg_path):
+ if os.path.isdir(egg_path):
+ metadata = PathMetadata(egg_path, os.path.join(egg_path,
+ 'EGG-INFO'))
+ else:
+ metadata = EggMetadata(zipimport.zipimporter(egg_path))
+ return Distribution.from_filename(egg_path, metadata=metadata)
+
+ def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
- destination = os.path.abspath(destination)
- if not self.dry_run:
- ensure_directory(destination)
-
- dist = self.egg_distribution(egg_path)
- if not samefile(egg_path, destination):
- if os.path.isdir(destination) and not os.path.islink(destination):
- dir_util.remove_tree(destination, dry_run=self.dry_run)
- elif os.path.exists(destination):
+ destination = os.path.abspath(destination)
+ if not self.dry_run:
+ ensure_directory(destination)
+
+ dist = self.egg_distribution(egg_path)
+ if not samefile(egg_path, destination):
+ if os.path.isdir(destination) and not os.path.islink(destination):
+ dir_util.remove_tree(destination, dry_run=self.dry_run)
+ elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
- try:
- new_dist_is_zipped = False
- if os.path.isdir(egg_path):
- if egg_path.startswith(tmpdir):
- f, m = shutil.move, "Moving"
- else:
- f, m = shutil.copytree, "Copying"
- elif self.should_unzip(dist):
- self.mkpath(destination)
- f, m = self.unpack_and_compile, "Extracting"
- else:
- new_dist_is_zipped = True
- if egg_path.startswith(tmpdir):
- f, m = shutil.move, "Moving"
- else:
- f, m = shutil.copy2, "Copying"
+ try:
+ new_dist_is_zipped = False
+ if os.path.isdir(egg_path):
+ if egg_path.startswith(tmpdir):
+ f, m = shutil.move, "Moving"
+ else:
+ f, m = shutil.copytree, "Copying"
+ elif self.should_unzip(dist):
+ self.mkpath(destination)
+ f, m = self.unpack_and_compile, "Extracting"
+ else:
+ new_dist_is_zipped = True
+ if egg_path.startswith(tmpdir):
+ f, m = shutil.move, "Moving"
+ else:
+ f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
@@ -951,110 +951,110 @@ class easy_install(Command):
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
- update_dist_caches(destination, fix_zipimporter_caches=False)
- raise
-
- self.add_output(destination)
- return self.egg_distribution(destination)
-
- def install_exe(self, dist_filename, tmpdir):
- # See if it's valid, get data
- cfg = extract_wininst_cfg(dist_filename)
- if cfg is None:
- raise DistutilsError(
- "%s is not a valid distutils Windows .exe" % dist_filename
- )
- # Create a dummy distribution object until we build the real distro
- dist = Distribution(
- None,
- project_name=cfg.get('metadata', 'name'),
- version=cfg.get('metadata', 'version'), platform=get_platform(),
- )
-
- # Convert the .exe to an unpacked egg
+ update_dist_caches(destination, fix_zipimporter_caches=False)
+ raise
+
+ self.add_output(destination)
+ return self.egg_distribution(destination)
+
+ def install_exe(self, dist_filename, tmpdir):
+ # See if it's valid, get data
+ cfg = extract_wininst_cfg(dist_filename)
+ if cfg is None:
+ raise DistutilsError(
+ "%s is not a valid distutils Windows .exe" % dist_filename
+ )
+ # Create a dummy distribution object until we build the real distro
+ dist = Distribution(
+ None,
+ project_name=cfg.get('metadata', 'name'),
+ version=cfg.get('metadata', 'version'), platform=get_platform(),
+ )
+
+ # Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
- egg_tmp = egg_path + '.tmp'
- _egg_info = os.path.join(egg_tmp, 'EGG-INFO')
- pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
- ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
- dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
- self.exe_to_egg(dist_filename, egg_tmp)
-
- # Write EGG-INFO/PKG-INFO
- if not os.path.exists(pkg_inf):
- f = open(pkg_inf, 'w')
- f.write('Metadata-Version: 1.0\n')
- for k, v in cfg.items('metadata'):
- if k != 'target_version':
- f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
- f.close()
- script_dir = os.path.join(_egg_info, 'scripts')
- # delete entry-point scripts to avoid duping
+ egg_tmp = egg_path + '.tmp'
+ _egg_info = os.path.join(egg_tmp, 'EGG-INFO')
+ pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
+ ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
+ dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
+ self.exe_to_egg(dist_filename, egg_tmp)
+
+ # Write EGG-INFO/PKG-INFO
+ if not os.path.exists(pkg_inf):
+ f = open(pkg_inf, 'w')
+ f.write('Metadata-Version: 1.0\n')
+ for k, v in cfg.items('metadata'):
+ if k != 'target_version':
+ f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
+ f.close()
+ script_dir = os.path.join(_egg_info, 'scripts')
+ # delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
- # Build .egg file from tmpdir
- bdist_egg.make_zipfile(
+ # Build .egg file from tmpdir
+ bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
- )
- # install the .egg
- return self.install_egg(egg_path, tmpdir)
-
- def exe_to_egg(self, dist_filename, egg_tmp):
- """Extract a bdist_wininst to the directories an egg would use"""
- # Check for .pth file and set up prefix translations
- prefixes = get_exe_prefixes(dist_filename)
- to_compile = []
- native_libs = []
- top_level = {}
-
- def process(src, dst):
- s = src.lower()
- for old, new in prefixes:
- if s.startswith(old):
- src = new + src[len(old):]
- parts = src.split('/')
- dst = os.path.join(egg_tmp, *parts)
- dl = dst.lower()
- if dl.endswith('.pyd') or dl.endswith('.dll'):
- parts[-1] = bdist_egg.strip_module(parts[-1])
- top_level[os.path.splitext(parts[0])[0]] = 1
- native_libs.append(src)
- elif dl.endswith('.py') and old != 'SCRIPTS/':
- top_level[os.path.splitext(parts[0])[0]] = 1
- to_compile.append(dst)
- return dst
- if not src.endswith('.pth'):
- log.warn("WARNING: can't process %s", src)
- return None
-
- # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
- unpack_archive(dist_filename, egg_tmp, process)
- stubs = []
- for res in native_libs:
- if res.lower().endswith('.pyd'): # create stubs for .pyd's
- parts = res.split('/')
- resource = parts[-1]
- parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
- pyfile = os.path.join(egg_tmp, *parts)
- to_compile.append(pyfile)
- stubs.append(pyfile)
- bdist_egg.write_stub(resource, pyfile)
- self.byte_compile(to_compile) # compile .py's
- bdist_egg.write_safety_flag(
- os.path.join(egg_tmp, 'EGG-INFO'),
- bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
-
- for name in 'top_level', 'native_libs':
- if locals()[name]:
- txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
- if not os.path.exists(txt):
- f = open(txt, 'w')
- f.write('\n'.join(locals()[name]) + '\n')
- f.close()
-
+ )
+ # install the .egg
+ return self.install_egg(egg_path, tmpdir)
+
+ def exe_to_egg(self, dist_filename, egg_tmp):
+ """Extract a bdist_wininst to the directories an egg would use"""
+ # Check for .pth file and set up prefix translations
+ prefixes = get_exe_prefixes(dist_filename)
+ to_compile = []
+ native_libs = []
+ top_level = {}
+
+ def process(src, dst):
+ s = src.lower()
+ for old, new in prefixes:
+ if s.startswith(old):
+ src = new + src[len(old):]
+ parts = src.split('/')
+ dst = os.path.join(egg_tmp, *parts)
+ dl = dst.lower()
+ if dl.endswith('.pyd') or dl.endswith('.dll'):
+ parts[-1] = bdist_egg.strip_module(parts[-1])
+ top_level[os.path.splitext(parts[0])[0]] = 1
+ native_libs.append(src)
+ elif dl.endswith('.py') and old != 'SCRIPTS/':
+ top_level[os.path.splitext(parts[0])[0]] = 1
+ to_compile.append(dst)
+ return dst
+ if not src.endswith('.pth'):
+ log.warn("WARNING: can't process %s", src)
+ return None
+
+ # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
+ unpack_archive(dist_filename, egg_tmp, process)
+ stubs = []
+ for res in native_libs:
+ if res.lower().endswith('.pyd'): # create stubs for .pyd's
+ parts = res.split('/')
+ resource = parts[-1]
+ parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
+ pyfile = os.path.join(egg_tmp, *parts)
+ to_compile.append(pyfile)
+ stubs.append(pyfile)
+ bdist_egg.write_stub(resource, pyfile)
+ self.byte_compile(to_compile) # compile .py's
+ bdist_egg.write_safety_flag(
+ os.path.join(egg_tmp, 'EGG-INFO'),
+ bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
+
+ for name in 'top_level', 'native_libs':
+ if locals()[name]:
+ txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
+ if not os.path.exists(txt):
+ f = open(txt, 'w')
+ f.write('\n'.join(locals()[name]) + '\n')
+ f.close()
+
def install_wheel(self, wheel_path, tmpdir):
wheel = Wheel(wheel_path)
assert wheel.is_compatible()
@@ -1084,318 +1084,318 @@ class easy_install(Command):
self.add_output(destination)
return self.egg_distribution(destination)
- __mv_warning = textwrap.dedent("""
- Because this distribution was installed --multi-version, before you can
- import modules from this package in an application, you will need to
- 'import pkg_resources' and then use a 'require()' call similar to one of
- these examples, in order to select the desired version:
-
- pkg_resources.require("%(name)s") # latest installed version
- pkg_resources.require("%(name)s==%(version)s") # this exact version
- pkg_resources.require("%(name)s>=%(version)s") # this version or higher
- """).lstrip()
-
- __id_warning = textwrap.dedent("""
- Note also that the installation directory must be on sys.path at runtime for
- this to work. (e.g. by being the application's script directory, by being on
- PYTHONPATH, or by being added to sys.path by your code.)
- """)
-
- def installation_report(self, req, dist, what="Installed"):
- """Helpful installation message for display to package users"""
- msg = "\n%(what)s %(eggloc)s%(extras)s"
- if self.multi_version and not self.no_report:
- msg += '\n' + self.__mv_warning
- if self.install_dir not in map(normalize_path, sys.path):
- msg += '\n' + self.__id_warning
-
- eggloc = dist.location
- name = dist.project_name
- version = dist.version
- extras = '' # TODO: self.report_extras(req, dist)
- return msg % locals()
-
- __editable_msg = textwrap.dedent("""
- Extracted editable version of %(spec)s to %(dirname)s
-
- If it uses setuptools in its setup script, you can activate it in
- "development" mode by going to that directory and running::
-
- %(python)s setup.py develop
-
- See the setuptools documentation for the "develop" command for more info.
- """).lstrip()
-
- def report_editable(self, spec, setup_script):
- dirname = os.path.dirname(setup_script)
- python = sys.executable
- return '\n' + self.__editable_msg % locals()
-
- def run_setup(self, setup_script, setup_base, args):
- sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
- sys.modules.setdefault('distutils.command.egg_info', egg_info)
-
- args = list(args)
- if self.verbose > 2:
- v = 'v' * (self.verbose - 1)
- args.insert(0, '-' + v)
- elif self.verbose < 2:
- args.insert(0, '-q')
- if self.dry_run:
- args.insert(0, '-n')
- log.info(
- "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
- )
- try:
- run_setup(setup_script, args)
- except SystemExit as v:
- raise DistutilsError("Setup script exited with %s" % (v.args[0],))
-
- def build_and_install(self, setup_script, setup_base):
- args = ['bdist_egg', '--dist-dir']
-
- dist_dir = tempfile.mkdtemp(
- prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
- )
- try:
- self._set_fetcher_options(os.path.dirname(setup_script))
- args.append(dist_dir)
-
- self.run_setup(setup_script, setup_base, args)
- all_eggs = Environment([dist_dir])
- eggs = []
- for key in all_eggs:
- for dist in all_eggs[key]:
- eggs.append(self.install_egg(dist.location, setup_base))
- if not eggs and not self.dry_run:
- log.warn("No eggs found in %s (setup script problem?)",
- dist_dir)
- return eggs
- finally:
- rmtree(dist_dir)
- log.set_verbosity(self.verbose) # restore our log verbosity
-
- def _set_fetcher_options(self, base):
- """
- When easy_install is about to run bdist_egg on a source dist, that
- source dist might have 'setup_requires' directives, requiring
- additional fetching. Ensure the fetcher options given to easy_install
- are available to that command as well.
- """
- # find the fetch options from easy_install and write them out
- # to the setup.cfg file.
- ei_opts = self.distribution.get_option_dict('easy_install').copy()
- fetch_directives = (
+ __mv_warning = textwrap.dedent("""
+ Because this distribution was installed --multi-version, before you can
+ import modules from this package in an application, you will need to
+ 'import pkg_resources' and then use a 'require()' call similar to one of
+ these examples, in order to select the desired version:
+
+ pkg_resources.require("%(name)s") # latest installed version
+ pkg_resources.require("%(name)s==%(version)s") # this exact version
+ pkg_resources.require("%(name)s>=%(version)s") # this version or higher
+ """).lstrip()
+
+ __id_warning = textwrap.dedent("""
+ Note also that the installation directory must be on sys.path at runtime for
+ this to work. (e.g. by being the application's script directory, by being on
+ PYTHONPATH, or by being added to sys.path by your code.)
+ """)
+
+ def installation_report(self, req, dist, what="Installed"):
+ """Helpful installation message for display to package users"""
+ msg = "\n%(what)s %(eggloc)s%(extras)s"
+ if self.multi_version and not self.no_report:
+ msg += '\n' + self.__mv_warning
+ if self.install_dir not in map(normalize_path, sys.path):
+ msg += '\n' + self.__id_warning
+
+ eggloc = dist.location
+ name = dist.project_name
+ version = dist.version
+ extras = '' # TODO: self.report_extras(req, dist)
+ return msg % locals()
+
+ __editable_msg = textwrap.dedent("""
+ Extracted editable version of %(spec)s to %(dirname)s
+
+ If it uses setuptools in its setup script, you can activate it in
+ "development" mode by going to that directory and running::
+
+ %(python)s setup.py develop
+
+ See the setuptools documentation for the "develop" command for more info.
+ """).lstrip()
+
+ def report_editable(self, spec, setup_script):
+ dirname = os.path.dirname(setup_script)
+ python = sys.executable
+ return '\n' + self.__editable_msg % locals()
+
+ def run_setup(self, setup_script, setup_base, args):
+ sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
+ sys.modules.setdefault('distutils.command.egg_info', egg_info)
+
+ args = list(args)
+ if self.verbose > 2:
+ v = 'v' * (self.verbose - 1)
+ args.insert(0, '-' + v)
+ elif self.verbose < 2:
+ args.insert(0, '-q')
+ if self.dry_run:
+ args.insert(0, '-n')
+ log.info(
+ "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
+ )
+ try:
+ run_setup(setup_script, args)
+ except SystemExit as v:
+ raise DistutilsError("Setup script exited with %s" % (v.args[0],))
+
+ def build_and_install(self, setup_script, setup_base):
+ args = ['bdist_egg', '--dist-dir']
+
+ dist_dir = tempfile.mkdtemp(
+ prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
+ )
+ try:
+ self._set_fetcher_options(os.path.dirname(setup_script))
+ args.append(dist_dir)
+
+ self.run_setup(setup_script, setup_base, args)
+ all_eggs = Environment([dist_dir])
+ eggs = []
+ for key in all_eggs:
+ for dist in all_eggs[key]:
+ eggs.append(self.install_egg(dist.location, setup_base))
+ if not eggs and not self.dry_run:
+ log.warn("No eggs found in %s (setup script problem?)",
+ dist_dir)
+ return eggs
+ finally:
+ rmtree(dist_dir)
+ log.set_verbosity(self.verbose) # restore our log verbosity
+
+ def _set_fetcher_options(self, base):
+ """
+ When easy_install is about to run bdist_egg on a source dist, that
+ source dist might have 'setup_requires' directives, requiring
+ additional fetching. Ensure the fetcher options given to easy_install
+ are available to that command as well.
+ """
+ # find the fetch options from easy_install and write them out
+ # to the setup.cfg file.
+ ei_opts = self.distribution.get_option_dict('easy_install').copy()
+ fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts',
- )
- fetch_options = {}
- for key, val in ei_opts.items():
- if key not in fetch_directives:
- continue
- fetch_options[key.replace('_', '-')] = val[1]
- # create a settings dictionary suitable for `edit_config`
- settings = dict(easy_install=fetch_options)
- cfg_filename = os.path.join(base, 'setup.cfg')
- setopt.edit_config(cfg_filename, settings)
-
- def update_pth(self, dist):
- if self.pth_file is None:
- return
-
- for d in self.pth_file[dist.key]: # drop old entries
- if self.multi_version or d.location != dist.location:
- log.info("Removing %s from easy-install.pth file", d)
- self.pth_file.remove(d)
- if d.location in self.shadow_path:
- self.shadow_path.remove(d.location)
-
- if not self.multi_version:
- if dist.location in self.pth_file.paths:
- log.info(
- "%s is already the active version in easy-install.pth",
+ )
+ fetch_options = {}
+ for key, val in ei_opts.items():
+ if key not in fetch_directives:
+ continue
+ fetch_options[key.replace('_', '-')] = val[1]
+ # create a settings dictionary suitable for `edit_config`
+ settings = dict(easy_install=fetch_options)
+ cfg_filename = os.path.join(base, 'setup.cfg')
+ setopt.edit_config(cfg_filename, settings)
+
+ def update_pth(self, dist):
+ if self.pth_file is None:
+ return
+
+ for d in self.pth_file[dist.key]: # drop old entries
+ if self.multi_version or d.location != dist.location:
+ log.info("Removing %s from easy-install.pth file", d)
+ self.pth_file.remove(d)
+ if d.location in self.shadow_path:
+ self.shadow_path.remove(d.location)
+
+ if not self.multi_version:
+ if dist.location in self.pth_file.paths:
+ log.info(
+ "%s is already the active version in easy-install.pth",
dist,
- )
- else:
- log.info("Adding %s to easy-install.pth file", dist)
- self.pth_file.add(dist) # add new entry
- if dist.location not in self.shadow_path:
- self.shadow_path.append(dist.location)
-
- if not self.dry_run:
-
- self.pth_file.save()
-
- if dist.key == 'setuptools':
- # Ensure that setuptools itself never becomes unavailable!
- # XXX should this check for latest version?
- filename = os.path.join(self.install_dir, 'setuptools.pth')
- if os.path.islink(filename):
- os.unlink(filename)
- f = open(filename, 'wt')
- f.write(self.pth_file.make_relative(dist.location) + '\n')
- f.close()
-
- def unpack_progress(self, src, dst):
- # Progress filter for unpacking
- log.debug("Unpacking %s to %s", src, dst)
- return dst # only unpack-and-compile skips files for dry run
-
- def unpack_and_compile(self, egg_path, destination):
- to_compile = []
- to_chmod = []
-
- def pf(src, dst):
- if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
- to_compile.append(dst)
- elif dst.endswith('.dll') or dst.endswith('.so'):
- to_chmod.append(dst)
- self.unpack_progress(src, dst)
- return not self.dry_run and dst or None
-
- unpack_archive(egg_path, destination, pf)
- self.byte_compile(to_compile)
- if not self.dry_run:
- for f in to_chmod:
- mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
- chmod(f, mode)
-
- def byte_compile(self, to_compile):
- if sys.dont_write_bytecode:
- return
-
- from distutils.util import byte_compile
-
- try:
- # try to make the byte compile messages quieter
- log.set_verbosity(self.verbose - 1)
-
- byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
- if self.optimize:
- byte_compile(
- to_compile, optimize=self.optimize, force=1,
+ )
+ else:
+ log.info("Adding %s to easy-install.pth file", dist)
+ self.pth_file.add(dist) # add new entry
+ if dist.location not in self.shadow_path:
+ self.shadow_path.append(dist.location)
+
+ if not self.dry_run:
+
+ self.pth_file.save()
+
+ if dist.key == 'setuptools':
+ # Ensure that setuptools itself never becomes unavailable!
+ # XXX should this check for latest version?
+ filename = os.path.join(self.install_dir, 'setuptools.pth')
+ if os.path.islink(filename):
+ os.unlink(filename)
+ f = open(filename, 'wt')
+ f.write(self.pth_file.make_relative(dist.location) + '\n')
+ f.close()
+
+ def unpack_progress(self, src, dst):
+ # Progress filter for unpacking
+ log.debug("Unpacking %s to %s", src, dst)
+ return dst # only unpack-and-compile skips files for dry run
+
+ def unpack_and_compile(self, egg_path, destination):
+ to_compile = []
+ to_chmod = []
+
+ def pf(src, dst):
+ if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
+ to_compile.append(dst)
+ elif dst.endswith('.dll') or dst.endswith('.so'):
+ to_chmod.append(dst)
+ self.unpack_progress(src, dst)
+ return not self.dry_run and dst or None
+
+ unpack_archive(egg_path, destination, pf)
+ self.byte_compile(to_compile)
+ if not self.dry_run:
+ for f in to_chmod:
+ mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
+ chmod(f, mode)
+
+ def byte_compile(self, to_compile):
+ if sys.dont_write_bytecode:
+ return
+
+ from distutils.util import byte_compile
+
+ try:
+ # try to make the byte compile messages quieter
+ log.set_verbosity(self.verbose - 1)
+
+ byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
+ if self.optimize:
+ byte_compile(
+ to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
- )
- finally:
- log.set_verbosity(self.verbose) # restore original verbosity
-
- __no_default_msg = textwrap.dedent("""
- bad install directory or PYTHONPATH
-
- You are attempting to install a package to a directory that is not
- on PYTHONPATH and which Python does not read ".pth" files from. The
- installation directory you specified (via --install-dir, --prefix, or
- the distutils default setting) was:
-
- %s
-
- and your PYTHONPATH environment variable currently contains:
-
- %r
-
- Here are some of your options for correcting the problem:
-
- * You can choose a different installation directory, i.e., one that is
- on PYTHONPATH or supports .pth files
-
- * You can add the installation directory to the PYTHONPATH environment
- variable. (It must then also be on PYTHONPATH whenever you run
- Python and want to use the package(s) you are installing.)
-
- * You can set up the installation directory to support ".pth" files by
- using one of the approaches described here:
-
+ )
+ finally:
+ log.set_verbosity(self.verbose) # restore original verbosity
+
+ __no_default_msg = textwrap.dedent("""
+ bad install directory or PYTHONPATH
+
+ You are attempting to install a package to a directory that is not
+ on PYTHONPATH and which Python does not read ".pth" files from. The
+ installation directory you specified (via --install-dir, --prefix, or
+ the distutils default setting) was:
+
+ %s
+
+ and your PYTHONPATH environment variable currently contains:
+
+ %r
+
+ Here are some of your options for correcting the problem:
+
+ * You can choose a different installation directory, i.e., one that is
+ on PYTHONPATH or supports .pth files
+
+ * You can add the installation directory to the PYTHONPATH environment
+ variable. (It must then also be on PYTHONPATH whenever you run
+ Python and want to use the package(s) you are installing.)
+
+ * You can set up the installation directory to support ".pth" files by
+ using one of the approaches described here:
+
https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
-
-
- Please make the appropriate changes for your system and try again.""").lstrip()
-
- def no_default_version_msg(self):
- template = self.__no_default_msg
- return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
-
- def install_site_py(self):
- """Make sure there's a site.py in the target dir, if needed"""
-
- if self.sitepy_installed:
- return # already did it, or don't need to
-
- sitepy = os.path.join(self.install_dir, "site.py")
- source = resource_string("setuptools", "site-patch.py")
+
+
+ Please make the appropriate changes for your system and try again.""").lstrip()
+
+ def no_default_version_msg(self):
+ template = self.__no_default_msg
+ return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
+
+ def install_site_py(self):
+ """Make sure there's a site.py in the target dir, if needed"""
+
+ if self.sitepy_installed:
+ return # already did it, or don't need to
+
+ sitepy = os.path.join(self.install_dir, "site.py")
+ source = resource_string("setuptools", "site-patch.py")
source = source.decode('utf-8')
- current = ""
-
- if os.path.exists(sitepy):
- log.debug("Checking existing site.py in %s", self.install_dir)
+ current = ""
+
+ if os.path.exists(sitepy):
+ log.debug("Checking existing site.py in %s", self.install_dir)
with io.open(sitepy) as strm:
current = strm.read()
-
- if not current.startswith('def __boot():'):
- raise DistutilsError(
- "%s is not a setuptools-generated site.py; please"
- " remove it." % sitepy
- )
-
- if current != source:
- log.info("Creating %s", sitepy)
- if not self.dry_run:
- ensure_directory(sitepy)
+
+ if not current.startswith('def __boot():'):
+ raise DistutilsError(
+ "%s is not a setuptools-generated site.py; please"
+ " remove it." % sitepy
+ )
+
+ if current != source:
+ log.info("Creating %s", sitepy)
+ if not self.dry_run:
+ ensure_directory(sitepy)
with io.open(sitepy, 'w', encoding='utf-8') as strm:
strm.write(source)
- self.byte_compile([sitepy])
-
- self.sitepy_installed = True
-
- def create_home_path(self):
- """Create directories under ~."""
- if not self.user:
- return
- home = convert_path(os.path.expanduser("~"))
- for name, path in six.iteritems(self.config_vars):
- if path.startswith(home) and not os.path.isdir(path):
- self.debug_print("os.makedirs('%s', 0o700)" % path)
- os.makedirs(path, 0o700)
-
- INSTALL_SCHEMES = dict(
- posix=dict(
- install_dir='$base/lib/python$py_version_short/site-packages',
- script_dir='$base/bin',
- ),
- )
-
- DEFAULT_SCHEME = dict(
- install_dir='$base/Lib/site-packages',
- script_dir='$base/Scripts',
- )
-
- def _expand(self, *attrs):
- config_vars = self.get_finalized_command('install').config_vars
-
- if self.prefix:
- # Set default install_dir/scripts from --prefix
- config_vars = config_vars.copy()
- config_vars['base'] = self.prefix
- scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
- for attr, val in scheme.items():
- if getattr(self, attr, None) is None:
- setattr(self, attr, val)
-
- from distutils.util import subst_vars
-
- for attr in attrs:
- val = getattr(self, attr)
- if val is not None:
- val = subst_vars(val, config_vars)
- if os.name == 'posix':
- val = os.path.expanduser(val)
- setattr(self, attr, val)
-
-
+ self.byte_compile([sitepy])
+
+ self.sitepy_installed = True
+
+ def create_home_path(self):
+ """Create directories under ~."""
+ if not self.user:
+ return
+ home = convert_path(os.path.expanduser("~"))
+ for name, path in six.iteritems(self.config_vars):
+ if path.startswith(home) and not os.path.isdir(path):
+ self.debug_print("os.makedirs('%s', 0o700)" % path)
+ os.makedirs(path, 0o700)
+
+ INSTALL_SCHEMES = dict(
+ posix=dict(
+ install_dir='$base/lib/python$py_version_short/site-packages',
+ script_dir='$base/bin',
+ ),
+ )
+
+ DEFAULT_SCHEME = dict(
+ install_dir='$base/Lib/site-packages',
+ script_dir='$base/Scripts',
+ )
+
+ def _expand(self, *attrs):
+ config_vars = self.get_finalized_command('install').config_vars
+
+ if self.prefix:
+ # Set default install_dir/scripts from --prefix
+ config_vars = config_vars.copy()
+ config_vars['base'] = self.prefix
+ scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
+ for attr, val in scheme.items():
+ if getattr(self, attr, None) is None:
+ setattr(self, attr, val)
+
+ from distutils.util import subst_vars
+
+ for attr in attrs:
+ val = getattr(self, attr)
+ if val is not None:
+ val = subst_vars(val, config_vars)
+ if os.name == 'posix':
+ val = os.path.expanduser(val)
+ setattr(self, attr, val)
+
+
def _pythonpath():
items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
return filter(None, items)
-def get_site_dirs():
+def get_site_dirs():
"""
Return a list of 'site' dirs
"""
@@ -1405,14 +1405,14 @@ def get_site_dirs():
# start with PYTHONPATH
sitedirs.extend(_pythonpath())
- prefixes = [sys.prefix]
- if sys.exec_prefix != sys.prefix:
- prefixes.append(sys.exec_prefix)
- for prefix in prefixes:
- if prefix:
- if sys.platform in ('os2emx', 'riscos'):
- sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
- elif os.sep == '/':
+ prefixes = [sys.prefix]
+ if sys.exec_prefix != sys.prefix:
+ prefixes.append(sys.exec_prefix)
+ for prefix in prefixes:
+ if prefix:
+ if sys.platform in ('os2emx', 'riscos'):
+ sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
+ elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
@@ -1422,18 +1422,18 @@ def get_site_dirs():
),
os.path.join(prefix, "lib", "site-python"),
])
- else:
+ else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
- if sys.platform == 'darwin':
- # for framework builds *only* we add the standard Apple
- # locations. Currently only per-user, but /Library and
- # /Network/Library could be added too
- if 'Python.framework' in prefix:
- home = os.environ.get('HOME')
- if home:
+ if sys.platform == 'darwin':
+ # for framework builds *only* we add the standard Apple
+ # locations. Currently only per-user, but /Library and
+ # /Network/Library could be added too
+ if 'Python.framework' in prefix:
+ home = os.environ.get('HOME')
+ if home:
home_sp = os.path.join(
home,
'Library',
@@ -1442,602 +1442,602 @@ def get_site_dirs():
'site-packages',
)
sitedirs.append(home_sp)
- lib_paths = get_path('purelib'), get_path('platlib')
- for site_lib in lib_paths:
- if site_lib not in sitedirs:
- sitedirs.append(site_lib)
-
- if site.ENABLE_USER_SITE:
- sitedirs.append(site.USER_SITE)
-
+ lib_paths = get_path('purelib'), get_path('platlib')
+ for site_lib in lib_paths:
+ if site_lib not in sitedirs:
+ sitedirs.append(site_lib)
+
+ if site.ENABLE_USER_SITE:
+ sitedirs.append(site.USER_SITE)
+
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
- sitedirs = list(map(normalize_path, sitedirs))
-
- return sitedirs
-
-
-def expand_paths(inputs):
- """Yield sys.path directories that might contain "old-style" packages"""
-
- seen = {}
-
- for dirname in inputs:
- dirname = normalize_path(dirname)
- if dirname in seen:
- continue
-
- seen[dirname] = 1
- if not os.path.isdir(dirname):
- continue
-
- files = os.listdir(dirname)
- yield dirname, files
-
- for name in files:
- if not name.endswith('.pth'):
- # We only care about the .pth files
- continue
- if name in ('easy-install.pth', 'setuptools.pth'):
- # Ignore .pth files that we control
- continue
-
- # Read the .pth file
- f = open(os.path.join(dirname, name))
- lines = list(yield_lines(f))
- f.close()
-
- # Yield existing non-dupe, non-import directory lines from it
- for line in lines:
- if not line.startswith("import"):
- line = normalize_path(line.rstrip())
- if line not in seen:
- seen[line] = 1
- if not os.path.isdir(line):
- continue
- yield line, os.listdir(line)
-
-
-def extract_wininst_cfg(dist_filename):
- """Extract configuration data from a bdist_wininst .exe
-
- Returns a configparser.RawConfigParser, or None
- """
- f = open(dist_filename, 'rb')
- try:
- endrec = zipfile._EndRecData(f)
- if endrec is None:
- return None
-
- prepended = (endrec[9] - endrec[5]) - endrec[6]
- if prepended < 12: # no wininst data here
- return None
- f.seek(prepended - 12)
-
- tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
- if tag not in (0x1234567A, 0x1234567B):
- return None # not a valid tag
-
- f.seek(prepended - (12 + cfglen))
+ sitedirs = list(map(normalize_path, sitedirs))
+
+ return sitedirs
+
+
+def expand_paths(inputs):
+ """Yield sys.path directories that might contain "old-style" packages"""
+
+ seen = {}
+
+ for dirname in inputs:
+ dirname = normalize_path(dirname)
+ if dirname in seen:
+ continue
+
+ seen[dirname] = 1
+ if not os.path.isdir(dirname):
+ continue
+
+ files = os.listdir(dirname)
+ yield dirname, files
+
+ for name in files:
+ if not name.endswith('.pth'):
+ # We only care about the .pth files
+ continue
+ if name in ('easy-install.pth', 'setuptools.pth'):
+ # Ignore .pth files that we control
+ continue
+
+ # Read the .pth file
+ f = open(os.path.join(dirname, name))
+ lines = list(yield_lines(f))
+ f.close()
+
+ # Yield existing non-dupe, non-import directory lines from it
+ for line in lines:
+ if not line.startswith("import"):
+ line = normalize_path(line.rstrip())
+ if line not in seen:
+ seen[line] = 1
+ if not os.path.isdir(line):
+ continue
+ yield line, os.listdir(line)
+
+
+def extract_wininst_cfg(dist_filename):
+ """Extract configuration data from a bdist_wininst .exe
+
+ Returns a configparser.RawConfigParser, or None
+ """
+ f = open(dist_filename, 'rb')
+ try:
+ endrec = zipfile._EndRecData(f)
+ if endrec is None:
+ return None
+
+ prepended = (endrec[9] - endrec[5]) - endrec[6]
+ if prepended < 12: # no wininst data here
+ return None
+ f.seek(prepended - 12)
+
+ tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
+ if tag not in (0x1234567A, 0x1234567B):
+ return None # not a valid tag
+
+ f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
- try:
- part = f.read(cfglen)
- # Read up to the first null byte.
- config = part.split(b'\0', 1)[0]
- # Now the config is in bytes, but for RawConfigParser, it should
- # be text, so decode it.
- config = config.decode(sys.getfilesystemencoding())
- cfg.readfp(six.StringIO(config))
- except configparser.Error:
- return None
- if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
- return None
- return cfg
-
- finally:
- f.close()
-
-
-def get_exe_prefixes(exe_filename):
- """Get exe->egg path translations for a given .exe file"""
-
- prefixes = [
+ try:
+ part = f.read(cfglen)
+ # Read up to the first null byte.
+ config = part.split(b'\0', 1)[0]
+ # Now the config is in bytes, but for RawConfigParser, it should
+ # be text, so decode it.
+ config = config.decode(sys.getfilesystemencoding())
+ cfg.readfp(six.StringIO(config))
+ except configparser.Error:
+ return None
+ if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
+ return None
+ return cfg
+
+ finally:
+ f.close()
+
+
+def get_exe_prefixes(exe_filename):
+ """Get exe->egg path translations for a given .exe file"""
+
+ prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
- ('PLATLIB/', ''),
- ('SCRIPTS/', 'EGG-INFO/scripts/'),
- ('DATA/lib/site-packages', ''),
- ]
- z = zipfile.ZipFile(exe_filename)
- try:
- for info in z.infolist():
- name = info.filename
- parts = name.split('/')
- if len(parts) == 3 and parts[2] == 'PKG-INFO':
- if parts[1].endswith('.egg-info'):
- prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
- break
- if len(parts) != 2 or not name.endswith('.pth'):
- continue
- if name.endswith('-nspkg.pth'):
- continue
- if parts[0].upper() in ('PURELIB', 'PLATLIB'):
- contents = z.read(name)
+ ('PLATLIB/', ''),
+ ('SCRIPTS/', 'EGG-INFO/scripts/'),
+ ('DATA/lib/site-packages', ''),
+ ]
+ z = zipfile.ZipFile(exe_filename)
+ try:
+ for info in z.infolist():
+ name = info.filename
+ parts = name.split('/')
+ if len(parts) == 3 and parts[2] == 'PKG-INFO':
+ if parts[1].endswith('.egg-info'):
+ prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
+ break
+ if len(parts) != 2 or not name.endswith('.pth'):
+ continue
+ if name.endswith('-nspkg.pth'):
+ continue
+ if parts[0].upper() in ('PURELIB', 'PLATLIB'):
+ contents = z.read(name)
if not six.PY2:
- contents = contents.decode()
- for pth in yield_lines(contents):
- pth = pth.strip().replace('\\', '/')
- if not pth.startswith('import'):
- prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
- finally:
- z.close()
- prefixes = [(x.lower(), y) for x, y in prefixes]
- prefixes.sort()
- prefixes.reverse()
- return prefixes
-
-
-class PthDistributions(Environment):
- """A .pth file with Distribution paths in it"""
-
- dirty = False
-
- def __init__(self, filename, sitedirs=()):
- self.filename = filename
- self.sitedirs = list(map(normalize_path, sitedirs))
- self.basedir = normalize_path(os.path.dirname(self.filename))
- self._load()
- Environment.__init__(self, [], None, None)
- for path in yield_lines(self.paths):
- list(map(self.add, find_distributions(path, True)))
-
- def _load(self):
- self.paths = []
- saw_import = False
- seen = dict.fromkeys(self.sitedirs)
- if os.path.isfile(self.filename):
- f = open(self.filename, 'rt')
- for line in f:
- if line.startswith('import'):
- saw_import = True
- continue
- path = line.rstrip()
- self.paths.append(path)
- if not path.strip() or path.strip().startswith('#'):
- continue
- # skip non-existent paths, in case somebody deleted a package
- # manually, and duplicate paths as well
- path = self.paths[-1] = normalize_path(
- os.path.join(self.basedir, path)
- )
- if not os.path.exists(path) or path in seen:
- self.paths.pop() # skip it
- self.dirty = True # we cleaned up, so we're dirty now :)
- continue
- seen[path] = 1
- f.close()
-
- if self.paths and not saw_import:
- self.dirty = True # ensure anything we touch has import wrappers
- while self.paths and not self.paths[-1].strip():
- self.paths.pop()
-
- def save(self):
- """Write changed .pth file back to disk"""
- if not self.dirty:
- return
-
- rel_paths = list(map(self.make_relative, self.paths))
- if rel_paths:
- log.debug("Saving %s", self.filename)
- lines = self._wrap_lines(rel_paths)
- data = '\n'.join(lines) + '\n'
-
- if os.path.islink(self.filename):
- os.unlink(self.filename)
- with open(self.filename, 'wt') as f:
- f.write(data)
-
- elif os.path.exists(self.filename):
- log.debug("Deleting empty %s", self.filename)
- os.unlink(self.filename)
-
- self.dirty = False
-
- @staticmethod
- def _wrap_lines(lines):
- return lines
-
- def add(self, dist):
- """Add `dist` to the distribution map"""
- new_path = (
- dist.location not in self.paths and (
- dist.location not in self.sitedirs or
- # account for '.' being in PYTHONPATH
- dist.location == os.getcwd()
- )
- )
- if new_path:
- self.paths.append(dist.location)
- self.dirty = True
- Environment.add(self, dist)
-
- def remove(self, dist):
- """Remove `dist` from the distribution map"""
- while dist.location in self.paths:
- self.paths.remove(dist.location)
- self.dirty = True
- Environment.remove(self, dist)
-
- def make_relative(self, path):
- npath, last = os.path.split(normalize_path(path))
- baselen = len(self.basedir)
- parts = [last]
- sep = os.altsep == '/' and '/' or os.sep
- while len(npath) >= baselen:
- if npath == self.basedir:
- parts.append(os.curdir)
- parts.reverse()
- return sep.join(parts)
- npath, last = os.path.split(npath)
- parts.append(last)
- else:
- return path
-
-
-class RewritePthDistributions(PthDistributions):
- @classmethod
- def _wrap_lines(cls, lines):
- yield cls.prelude
- for line in lines:
- yield line
- yield cls.postlude
-
+ contents = contents.decode()
+ for pth in yield_lines(contents):
+ pth = pth.strip().replace('\\', '/')
+ if not pth.startswith('import'):
+ prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
+ finally:
+ z.close()
+ prefixes = [(x.lower(), y) for x, y in prefixes]
+ prefixes.sort()
+ prefixes.reverse()
+ return prefixes
+
+
+class PthDistributions(Environment):
+ """A .pth file with Distribution paths in it"""
+
+ dirty = False
+
+ def __init__(self, filename, sitedirs=()):
+ self.filename = filename
+ self.sitedirs = list(map(normalize_path, sitedirs))
+ self.basedir = normalize_path(os.path.dirname(self.filename))
+ self._load()
+ Environment.__init__(self, [], None, None)
+ for path in yield_lines(self.paths):
+ list(map(self.add, find_distributions(path, True)))
+
+ def _load(self):
+ self.paths = []
+ saw_import = False
+ seen = dict.fromkeys(self.sitedirs)
+ if os.path.isfile(self.filename):
+ f = open(self.filename, 'rt')
+ for line in f:
+ if line.startswith('import'):
+ saw_import = True
+ continue
+ path = line.rstrip()
+ self.paths.append(path)
+ if not path.strip() or path.strip().startswith('#'):
+ continue
+ # skip non-existent paths, in case somebody deleted a package
+ # manually, and duplicate paths as well
+ path = self.paths[-1] = normalize_path(
+ os.path.join(self.basedir, path)
+ )
+ if not os.path.exists(path) or path in seen:
+ self.paths.pop() # skip it
+ self.dirty = True # we cleaned up, so we're dirty now :)
+ continue
+ seen[path] = 1
+ f.close()
+
+ if self.paths and not saw_import:
+ self.dirty = True # ensure anything we touch has import wrappers
+ while self.paths and not self.paths[-1].strip():
+ self.paths.pop()
+
+ def save(self):
+ """Write changed .pth file back to disk"""
+ if not self.dirty:
+ return
+
+ rel_paths = list(map(self.make_relative, self.paths))
+ if rel_paths:
+ log.debug("Saving %s", self.filename)
+ lines = self._wrap_lines(rel_paths)
+ data = '\n'.join(lines) + '\n'
+
+ if os.path.islink(self.filename):
+ os.unlink(self.filename)
+ with open(self.filename, 'wt') as f:
+ f.write(data)
+
+ elif os.path.exists(self.filename):
+ log.debug("Deleting empty %s", self.filename)
+ os.unlink(self.filename)
+
+ self.dirty = False
+
+ @staticmethod
+ def _wrap_lines(lines):
+ return lines
+
+ def add(self, dist):
+ """Add `dist` to the distribution map"""
+ new_path = (
+ dist.location not in self.paths and (
+ dist.location not in self.sitedirs or
+ # account for '.' being in PYTHONPATH
+ dist.location == os.getcwd()
+ )
+ )
+ if new_path:
+ self.paths.append(dist.location)
+ self.dirty = True
+ Environment.add(self, dist)
+
+ def remove(self, dist):
+ """Remove `dist` from the distribution map"""
+ while dist.location in self.paths:
+ self.paths.remove(dist.location)
+ self.dirty = True
+ Environment.remove(self, dist)
+
+ def make_relative(self, path):
+ npath, last = os.path.split(normalize_path(path))
+ baselen = len(self.basedir)
+ parts = [last]
+ sep = os.altsep == '/' and '/' or os.sep
+ while len(npath) >= baselen:
+ if npath == self.basedir:
+ parts.append(os.curdir)
+ parts.reverse()
+ return sep.join(parts)
+ npath, last = os.path.split(npath)
+ parts.append(last)
+ else:
+ return path
+
+
+class RewritePthDistributions(PthDistributions):
+ @classmethod
+ def _wrap_lines(cls, lines):
+ yield cls.prelude
+ for line in lines:
+ yield line
+ yield cls.postlude
+
prelude = _one_liner("""
- import sys
- sys.__plen = len(sys.path)
- """)
+ import sys
+ sys.__plen = len(sys.path)
+ """)
postlude = _one_liner("""
- import sys
- new = sys.path[sys.__plen:]
- del sys.path[sys.__plen:]
- p = getattr(sys, '__egginsert', 0)
- sys.path[p:p] = new
- sys.__egginsert = p + len(new)
- """)
-
-
+ import sys
+ new = sys.path[sys.__plen:]
+ del sys.path[sys.__plen:]
+ p = getattr(sys, '__egginsert', 0)
+ sys.path[p:p] = new
+ sys.__egginsert = p + len(new)
+ """)
+
+
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
- PthDistributions = RewritePthDistributions
-
-
-def _first_line_re():
- """
- Return a regular expression based on first_line_re suitable for matching
- strings.
- """
- if isinstance(first_line_re.pattern, str):
- return first_line_re
-
- # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
- return re.compile(first_line_re.pattern.decode())
-
-
-def auto_chmod(func, arg, exc):
+ PthDistributions = RewritePthDistributions
+
+
+def _first_line_re():
+ """
+ Return a regular expression based on first_line_re suitable for matching
+ strings.
+ """
+ if isinstance(first_line_re.pattern, str):
+ return first_line_re
+
+ # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
+ return re.compile(first_line_re.pattern.decode())
+
+
+def auto_chmod(func, arg, exc):
if func in [os.unlink, os.remove] and os.name == 'nt':
- chmod(arg, stat.S_IWRITE)
- return func(arg)
- et, ev, _ = sys.exc_info()
- six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
-
-
-def update_dist_caches(dist_path, fix_zipimporter_caches):
- """
- Fix any globally cached `dist_path` related data
-
- `dist_path` should be a path of a newly installed egg distribution (zipped
- or unzipped).
-
- sys.path_importer_cache contains finder objects that have been cached when
- importing data from the original distribution. Any such finders need to be
- cleared since the replacement distribution might be packaged differently,
- e.g. a zipped egg distribution might get replaced with an unzipped egg
- folder or vice versa. Having the old finders cached may then cause Python
- to attempt loading modules from the replacement distribution using an
- incorrect loader.
-
- zipimport.zipimporter objects are Python loaders charged with importing
- data packaged inside zip archives. If stale loaders referencing the
- original distribution, are left behind, they can fail to load modules from
- the replacement distribution. E.g. if an old zipimport.zipimporter instance
- is used to load data from a new zipped egg archive, it may cause the
- operation to attempt to locate the requested data in the wrong location -
- one indicated by the original distribution's zip archive directory
- information. Such an operation may then fail outright, e.g. report having
- read a 'bad local file header', or even worse, it may fail silently &
- return invalid data.
-
- zipimport._zip_directory_cache contains cached zip archive directory
- information for all existing zipimport.zipimporter instances and all such
- instances connected to the same archive share the same cached directory
- information.
-
- If asked, and the underlying Python implementation allows it, we can fix
- all existing zipimport.zipimporter instances instead of having to track
- them down and remove them one by one, by updating their shared cached zip
- archive directory information. This, of course, assumes that the
- replacement distribution is packaged as a zipped egg.
-
- If not asked to fix existing zipimport.zipimporter instances, we still do
- our best to clear any remaining zipimport.zipimporter related cached data
- that might somehow later get used when attempting to load data from the new
- distribution and thus cause such load operations to fail. Note that when
- tracking down such remaining stale data, we can not catch every conceivable
- usage from here, and we clear only those that we know of and have found to
- cause problems if left alive. Any remaining caches should be updated by
- whomever is in charge of maintaining them, i.e. they should be ready to
- handle us replacing their zip archives with new distributions at runtime.
-
- """
- # There are several other known sources of stale zipimport.zipimporter
- # instances that we do not clear here, but might if ever given a reason to
- # do so:
- # * Global setuptools pkg_resources.working_set (a.k.a. 'master working
- # set') may contain distributions which may in turn contain their
- # zipimport.zipimporter loaders.
- # * Several zipimport.zipimporter loaders held by local variables further
- # up the function call stack when running the setuptools installation.
- # * Already loaded modules may have their __loader__ attribute set to the
- # exact loader instance used when importing them. Python 3.4 docs state
- # that this information is intended mostly for introspection and so is
- # not expected to cause us problems.
- normalized_path = normalize_path(dist_path)
- _uncache(normalized_path, sys.path_importer_cache)
- if fix_zipimporter_caches:
- _replace_zip_directory_cache_data(normalized_path)
- else:
- # Here, even though we do not want to fix existing and now stale
- # zipimporter cache information, we still want to remove it. Related to
- # Python's zip archive directory information cache, we clear each of
- # its stale entries in two phases:
- # 1. Clear the entry so attempting to access zip archive information
- # via any existing stale zipimport.zipimporter instances fails.
- # 2. Remove the entry from the cache so any newly constructed
- # zipimport.zipimporter instances do not end up using old stale
- # zip archive directory information.
- # This whole stale data removal step does not seem strictly necessary,
- # but has been left in because it was done before we started replacing
- # the zip archive directory information cache content if possible, and
- # there are no relevant unit tests that we can depend on to tell us if
- # this is really needed.
- _remove_and_clear_zip_directory_cache_data(normalized_path)
-
-
-def _collect_zipimporter_cache_entries(normalized_path, cache):
- """
- Return zipimporter cache entry keys related to a given normalized path.
-
- Alternative path spellings (e.g. those using different character case or
- those using alternative path separators) related to the same path are
- included. Any sub-path entries are included as well, i.e. those
- corresponding to zip archives embedded in other zip archives.
-
- """
- result = []
- prefix_len = len(normalized_path)
- for p in cache:
- np = normalize_path(p)
- if (np.startswith(normalized_path) and
- np[prefix_len:prefix_len + 1] in (os.sep, '')):
- result.append(p)
- return result
-
-
-def _update_zipimporter_cache(normalized_path, cache, updater=None):
- """
- Update zipimporter cache data for a given normalized path.
-
- Any sub-path entries are processed as well, i.e. those corresponding to zip
- archives embedded in other zip archives.
-
- Given updater is a callable taking a cache entry key and the original entry
- (after already removing the entry from the cache), and expected to update
- the entry and possibly return a new one to be inserted in its place.
- Returning None indicates that the entry should not be replaced with a new
- one. If no updater is given, the cache entries are simply removed without
- any additional processing, the same as if the updater simply returned None.
-
- """
- for p in _collect_zipimporter_cache_entries(normalized_path, cache):
- # N.B. pypy's custom zipimport._zip_directory_cache implementation does
- # not support the complete dict interface:
- # * Does not support item assignment, thus not allowing this function
- # to be used only for removing existing cache entries.
- # * Does not support the dict.pop() method, forcing us to use the
- # get/del patterns instead. For more detailed information see the
- # following links:
+ chmod(arg, stat.S_IWRITE)
+ return func(arg)
+ et, ev, _ = sys.exc_info()
+ six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
+
+
+def update_dist_caches(dist_path, fix_zipimporter_caches):
+ """
+ Fix any globally cached `dist_path` related data
+
+ `dist_path` should be a path of a newly installed egg distribution (zipped
+ or unzipped).
+
+ sys.path_importer_cache contains finder objects that have been cached when
+ importing data from the original distribution. Any such finders need to be
+ cleared since the replacement distribution might be packaged differently,
+ e.g. a zipped egg distribution might get replaced with an unzipped egg
+ folder or vice versa. Having the old finders cached may then cause Python
+ to attempt loading modules from the replacement distribution using an
+ incorrect loader.
+
+ zipimport.zipimporter objects are Python loaders charged with importing
+ data packaged inside zip archives. If stale loaders referencing the
+ original distribution, are left behind, they can fail to load modules from
+ the replacement distribution. E.g. if an old zipimport.zipimporter instance
+ is used to load data from a new zipped egg archive, it may cause the
+ operation to attempt to locate the requested data in the wrong location -
+ one indicated by the original distribution's zip archive directory
+ information. Such an operation may then fail outright, e.g. report having
+ read a 'bad local file header', or even worse, it may fail silently &
+ return invalid data.
+
+ zipimport._zip_directory_cache contains cached zip archive directory
+ information for all existing zipimport.zipimporter instances and all such
+ instances connected to the same archive share the same cached directory
+ information.
+
+ If asked, and the underlying Python implementation allows it, we can fix
+ all existing zipimport.zipimporter instances instead of having to track
+ them down and remove them one by one, by updating their shared cached zip
+ archive directory information. This, of course, assumes that the
+ replacement distribution is packaged as a zipped egg.
+
+ If not asked to fix existing zipimport.zipimporter instances, we still do
+ our best to clear any remaining zipimport.zipimporter related cached data
+ that might somehow later get used when attempting to load data from the new
+ distribution and thus cause such load operations to fail. Note that when
+ tracking down such remaining stale data, we can not catch every conceivable
+ usage from here, and we clear only those that we know of and have found to
+ cause problems if left alive. Any remaining caches should be updated by
+ whomever is in charge of maintaining them, i.e. they should be ready to
+ handle us replacing their zip archives with new distributions at runtime.
+
+ """
+ # There are several other known sources of stale zipimport.zipimporter
+ # instances that we do not clear here, but might if ever given a reason to
+ # do so:
+ # * Global setuptools pkg_resources.working_set (a.k.a. 'master working
+ # set') may contain distributions which may in turn contain their
+ # zipimport.zipimporter loaders.
+ # * Several zipimport.zipimporter loaders held by local variables further
+ # up the function call stack when running the setuptools installation.
+ # * Already loaded modules may have their __loader__ attribute set to the
+ # exact loader instance used when importing them. Python 3.4 docs state
+ # that this information is intended mostly for introspection and so is
+ # not expected to cause us problems.
+ normalized_path = normalize_path(dist_path)
+ _uncache(normalized_path, sys.path_importer_cache)
+ if fix_zipimporter_caches:
+ _replace_zip_directory_cache_data(normalized_path)
+ else:
+ # Here, even though we do not want to fix existing and now stale
+ # zipimporter cache information, we still want to remove it. Related to
+ # Python's zip archive directory information cache, we clear each of
+ # its stale entries in two phases:
+ # 1. Clear the entry so attempting to access zip archive information
+ # via any existing stale zipimport.zipimporter instances fails.
+ # 2. Remove the entry from the cache so any newly constructed
+ # zipimport.zipimporter instances do not end up using old stale
+ # zip archive directory information.
+ # This whole stale data removal step does not seem strictly necessary,
+ # but has been left in because it was done before we started replacing
+ # the zip archive directory information cache content if possible, and
+ # there are no relevant unit tests that we can depend on to tell us if
+ # this is really needed.
+ _remove_and_clear_zip_directory_cache_data(normalized_path)
+
+
+def _collect_zipimporter_cache_entries(normalized_path, cache):
+ """
+ Return zipimporter cache entry keys related to a given normalized path.
+
+ Alternative path spellings (e.g. those using different character case or
+ those using alternative path separators) related to the same path are
+ included. Any sub-path entries are included as well, i.e. those
+ corresponding to zip archives embedded in other zip archives.
+
+ """
+ result = []
+ prefix_len = len(normalized_path)
+ for p in cache:
+ np = normalize_path(p)
+ if (np.startswith(normalized_path) and
+ np[prefix_len:prefix_len + 1] in (os.sep, '')):
+ result.append(p)
+ return result
+
+
+def _update_zipimporter_cache(normalized_path, cache, updater=None):
+ """
+ Update zipimporter cache data for a given normalized path.
+
+ Any sub-path entries are processed as well, i.e. those corresponding to zip
+ archives embedded in other zip archives.
+
+ Given updater is a callable taking a cache entry key and the original entry
+ (after already removing the entry from the cache), and expected to update
+ the entry and possibly return a new one to be inserted in its place.
+ Returning None indicates that the entry should not be replaced with a new
+ one. If no updater is given, the cache entries are simply removed without
+ any additional processing, the same as if the updater simply returned None.
+
+ """
+ for p in _collect_zipimporter_cache_entries(normalized_path, cache):
+ # N.B. pypy's custom zipimport._zip_directory_cache implementation does
+ # not support the complete dict interface:
+ # * Does not support item assignment, thus not allowing this function
+ # to be used only for removing existing cache entries.
+ # * Does not support the dict.pop() method, forcing us to use the
+ # get/del patterns instead. For more detailed information see the
+ # following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# http://bit.ly/2h9itJX
- old_entry = cache[p]
- del cache[p]
- new_entry = updater and updater(p, old_entry)
- if new_entry is not None:
- cache[p] = new_entry
-
-
-def _uncache(normalized_path, cache):
- _update_zipimporter_cache(normalized_path, cache)
-
-
-def _remove_and_clear_zip_directory_cache_data(normalized_path):
- def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
- old_entry.clear()
-
- _update_zipimporter_cache(
- normalized_path, zipimport._zip_directory_cache,
- updater=clear_and_remove_cached_zip_archive_directory_data)
-
-
-# PyPy Python implementation does not allow directly writing to the
-# zipimport._zip_directory_cache and so prevents us from attempting to correct
-# its content. The best we can do there is clear the problematic cache content
-# and have PyPy repopulate it as needed. The downside is that if there are any
-# stale zipimport.zipimporter instances laying around, attempting to use them
-# will fail due to not having its zip archive directory information available
-# instead of being automatically corrected to use the new correct zip archive
-# directory information.
-if '__pypy__' in sys.builtin_module_names:
- _replace_zip_directory_cache_data = \
- _remove_and_clear_zip_directory_cache_data
-else:
-
- def _replace_zip_directory_cache_data(normalized_path):
- def replace_cached_zip_archive_directory_data(path, old_entry):
- # N.B. In theory, we could load the zip directory information just
- # once for all updated path spellings, and then copy it locally and
- # update its contained path strings to contain the correct
- # spelling, but that seems like a way too invasive move (this cache
- # structure is not officially documented anywhere and could in
- # theory change with new Python releases) for no significant
- # benefit.
- old_entry.clear()
- zipimport.zipimporter(path)
- old_entry.update(zipimport._zip_directory_cache[path])
- return old_entry
-
- _update_zipimporter_cache(
- normalized_path, zipimport._zip_directory_cache,
- updater=replace_cached_zip_archive_directory_data)
-
-
-def is_python(text, filename='<string>'):
- "Is this string a valid Python script?"
- try:
- compile(text, filename, 'exec')
- except (SyntaxError, TypeError):
- return False
- else:
- return True
-
-
-def is_sh(executable):
- """Determine if the specified executable is a .sh (contains a #! line)"""
- try:
- with io.open(executable, encoding='latin-1') as fp:
- magic = fp.read(2)
- except (OSError, IOError):
- return executable
- return magic == '#!'
-
-
-def nt_quote_arg(arg):
- """Quote a command line argument according to Windows parsing rules"""
- return subprocess.list2cmdline([arg])
-
-
-def is_python_script(script_text, filename):
- """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
- """
- if filename.endswith('.py') or filename.endswith('.pyw'):
- return True # extension says it's Python
- if is_python(script_text, filename):
- return True # it's syntactically valid Python
- if script_text.startswith('#!'):
- # It begins with a '#!' line, so check if 'python' is in it somewhere
- return 'python' in script_text.splitlines()[0].lower()
-
- return False # Not any Python I can recognize
-
-
-try:
- from os import chmod as _chmod
-except ImportError:
- # Jython compatibility
- def _chmod(*args):
- pass
-
-
-def chmod(path, mode):
- log.debug("changing mode of %s to %o", path, mode)
- try:
- _chmod(path, mode)
- except os.error as e:
- log.debug("chmod failed: %s", e)
-
-
-class CommandSpec(list):
- """
- A command spec for a #! header, specified as a list of arguments akin to
- those passed to Popen.
- """
-
- options = []
- split_args = dict()
-
- @classmethod
- def best(cls):
- """
- Choose the best CommandSpec class based on environmental conditions.
- """
- return cls
-
- @classmethod
- def _sys_executable(cls):
- _default = os.path.normpath(sys.executable)
- return os.environ.get('__PYVENV_LAUNCHER__', _default)
-
- @classmethod
- def from_param(cls, param):
- """
- Construct a CommandSpec from a parameter to build_scripts, which may
- be None.
- """
- if isinstance(param, cls):
- return param
- if isinstance(param, list):
- return cls(param)
- if param is None:
- return cls.from_environment()
- # otherwise, assume it's a string.
- return cls.from_string(param)
-
- @classmethod
- def from_environment(cls):
- return cls([cls._sys_executable()])
-
- @classmethod
- def from_string(cls, string):
- """
- Construct a command spec from a simple string representing a command
- line parseable by shlex.split.
- """
- items = shlex.split(string, **cls.split_args)
- return cls(items)
-
- def install_options(self, script_text):
- self.options = shlex.split(self._extract_options(script_text))
- cmdline = subprocess.list2cmdline(self)
- if not isascii(cmdline):
- self.options[:0] = ['-x']
-
- @staticmethod
- def _extract_options(orig_script):
- """
- Extract any options from the first line of the script.
- """
- first = (orig_script + '\n').splitlines()[0]
- match = _first_line_re().match(first)
- options = match.group(1) or '' if match else ''
- return options.strip()
-
- def as_header(self):
- return self._render(self + list(self.options))
-
- @staticmethod
+ old_entry = cache[p]
+ del cache[p]
+ new_entry = updater and updater(p, old_entry)
+ if new_entry is not None:
+ cache[p] = new_entry
+
+
+def _uncache(normalized_path, cache):
+ _update_zipimporter_cache(normalized_path, cache)
+
+
+def _remove_and_clear_zip_directory_cache_data(normalized_path):
+ def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
+ old_entry.clear()
+
+ _update_zipimporter_cache(
+ normalized_path, zipimport._zip_directory_cache,
+ updater=clear_and_remove_cached_zip_archive_directory_data)
+
+
+# PyPy Python implementation does not allow directly writing to the
+# zipimport._zip_directory_cache and so prevents us from attempting to correct
+# its content. The best we can do there is clear the problematic cache content
+# and have PyPy repopulate it as needed. The downside is that if there are any
+# stale zipimport.zipimporter instances laying around, attempting to use them
+# will fail due to not having its zip archive directory information available
+# instead of being automatically corrected to use the new correct zip archive
+# directory information.
+if '__pypy__' in sys.builtin_module_names:
+ _replace_zip_directory_cache_data = \
+ _remove_and_clear_zip_directory_cache_data
+else:
+
+ def _replace_zip_directory_cache_data(normalized_path):
+ def replace_cached_zip_archive_directory_data(path, old_entry):
+ # N.B. In theory, we could load the zip directory information just
+ # once for all updated path spellings, and then copy it locally and
+ # update its contained path strings to contain the correct
+ # spelling, but that seems like a way too invasive move (this cache
+ # structure is not officially documented anywhere and could in
+ # theory change with new Python releases) for no significant
+ # benefit.
+ old_entry.clear()
+ zipimport.zipimporter(path)
+ old_entry.update(zipimport._zip_directory_cache[path])
+ return old_entry
+
+ _update_zipimporter_cache(
+ normalized_path, zipimport._zip_directory_cache,
+ updater=replace_cached_zip_archive_directory_data)
+
+
+def is_python(text, filename='<string>'):
+ "Is this string a valid Python script?"
+ try:
+ compile(text, filename, 'exec')
+ except (SyntaxError, TypeError):
+ return False
+ else:
+ return True
+
+
+def is_sh(executable):
+ """Determine if the specified executable is a .sh (contains a #! line)"""
+ try:
+ with io.open(executable, encoding='latin-1') as fp:
+ magic = fp.read(2)
+ except (OSError, IOError):
+ return executable
+ return magic == '#!'
+
+
+def nt_quote_arg(arg):
+ """Quote a command line argument according to Windows parsing rules"""
+ return subprocess.list2cmdline([arg])
+
+
+def is_python_script(script_text, filename):
+ """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
+ """
+ if filename.endswith('.py') or filename.endswith('.pyw'):
+ return True # extension says it's Python
+ if is_python(script_text, filename):
+ return True # it's syntactically valid Python
+ if script_text.startswith('#!'):
+ # It begins with a '#!' line, so check if 'python' is in it somewhere
+ return 'python' in script_text.splitlines()[0].lower()
+
+ return False # Not any Python I can recognize
+
+
+try:
+ from os import chmod as _chmod
+except ImportError:
+ # Jython compatibility
+ def _chmod(*args):
+ pass
+
+
+def chmod(path, mode):
+ log.debug("changing mode of %s to %o", path, mode)
+ try:
+ _chmod(path, mode)
+ except os.error as e:
+ log.debug("chmod failed: %s", e)
+
+
+class CommandSpec(list):
+ """
+ A command spec for a #! header, specified as a list of arguments akin to
+ those passed to Popen.
+ """
+
+ options = []
+ split_args = dict()
+
+ @classmethod
+ def best(cls):
+ """
+ Choose the best CommandSpec class based on environmental conditions.
+ """
+ return cls
+
+ @classmethod
+ def _sys_executable(cls):
+ _default = os.path.normpath(sys.executable)
+ return os.environ.get('__PYVENV_LAUNCHER__', _default)
+
+ @classmethod
+ def from_param(cls, param):
+ """
+ Construct a CommandSpec from a parameter to build_scripts, which may
+ be None.
+ """
+ if isinstance(param, cls):
+ return param
+ if isinstance(param, list):
+ return cls(param)
+ if param is None:
+ return cls.from_environment()
+ # otherwise, assume it's a string.
+ return cls.from_string(param)
+
+ @classmethod
+ def from_environment(cls):
+ return cls([cls._sys_executable()])
+
+ @classmethod
+ def from_string(cls, string):
+ """
+ Construct a command spec from a simple string representing a command
+ line parseable by shlex.split.
+ """
+ items = shlex.split(string, **cls.split_args)
+ return cls(items)
+
+ def install_options(self, script_text):
+ self.options = shlex.split(self._extract_options(script_text))
+ cmdline = subprocess.list2cmdline(self)
+ if not isascii(cmdline):
+ self.options[:0] = ['-x']
+
+ @staticmethod
+ def _extract_options(orig_script):
+ """
+ Extract any options from the first line of the script.
+ """
+ first = (orig_script + '\n').splitlines()[0]
+ match = _first_line_re().match(first)
+ options = match.group(1) or '' if match else ''
+ return options.strip()
+
+ def as_header(self):
+ return self._render(self + list(self.options))
+
+ @staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
@@ -2046,301 +2046,301 @@ class CommandSpec(list):
return item
@staticmethod
- def _render(items):
+ def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
- return '#!' + cmdline + '\n'
-
-
-# For pbr compat; will be removed in a future version.
-sys_executable = CommandSpec._sys_executable()
-
-
-class WindowsCommandSpec(CommandSpec):
- split_args = dict(posix=False)
-
-
+ return '#!' + cmdline + '\n'
+
+
+# For pbr compat; will be removed in a future version.
+sys_executable = CommandSpec._sys_executable()
+
+
+class WindowsCommandSpec(CommandSpec):
+ split_args = dict(posix=False)
+
+
class ScriptWriter:
- """
- Encapsulates behavior around writing entry point scripts for console and
- gui apps.
- """
-
+ """
+ Encapsulates behavior around writing entry point scripts for console and
+ gui apps.
+ """
+
template = textwrap.dedent(r"""
- # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
- __requires__ = %(spec)r
+ # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
+ __requires__ = %(spec)r
import re
- import sys
- from pkg_resources import load_entry_point
-
- if __name__ == '__main__':
+ import sys
+ from pkg_resources import load_entry_point
+
+ if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(
- load_entry_point(%(spec)r, %(group)r, %(name)r)()
- )
- """).lstrip()
-
- command_spec_class = CommandSpec
-
- @classmethod
- def get_script_args(cls, dist, executable=None, wininst=False):
- # for backward compatibility
+ sys.exit(
+ load_entry_point(%(spec)r, %(group)r, %(name)r)()
+ )
+ """).lstrip()
+
+ command_spec_class = CommandSpec
+
+ @classmethod
+ def get_script_args(cls, dist, executable=None, wininst=False):
+ # for backward compatibility
warnings.warn("Use get_args", EasyInstallDeprecationWarning)
- writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
- header = cls.get_script_header("", executable, wininst)
- return writer.get_args(dist, header)
-
- @classmethod
- def get_script_header(cls, script_text, executable=None, wininst=False):
- # for backward compatibility
+ writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
+ header = cls.get_script_header("", executable, wininst)
+ return writer.get_args(dist, header)
+
+ @classmethod
+ def get_script_header(cls, script_text, executable=None, wininst=False):
+ # for backward compatibility
warnings.warn("Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
- if wininst:
- executable = "python.exe"
+ if wininst:
+ executable = "python.exe"
return cls.get_header(script_text, executable)
-
- @classmethod
- def get_args(cls, dist, header=None):
- """
- Yield write_script() argument tuples for a distribution's
- console_scripts and gui_scripts entry points.
- """
- if header is None:
- header = cls.get_header()
- spec = str(dist.as_requirement())
- for type_ in 'console', 'gui':
- group = type_ + '_scripts'
- for name, ep in dist.get_entry_map(group).items():
- cls._ensure_safe_name(name)
- script_text = cls.template % locals()
- args = cls._get_script_args(type_, name, header, script_text)
- for res in args:
- yield res
-
- @staticmethod
- def _ensure_safe_name(name):
- """
- Prevent paths in *_scripts entry point names.
- """
- has_path_sep = re.search(r'[\\/]', name)
- if has_path_sep:
- raise ValueError("Path separators not allowed in script names")
-
- @classmethod
- def get_writer(cls, force_windows):
- # for backward compatibility
+
+ @classmethod
+ def get_args(cls, dist, header=None):
+ """
+ Yield write_script() argument tuples for a distribution's
+ console_scripts and gui_scripts entry points.
+ """
+ if header is None:
+ header = cls.get_header()
+ spec = str(dist.as_requirement())
+ for type_ in 'console', 'gui':
+ group = type_ + '_scripts'
+ for name, ep in dist.get_entry_map(group).items():
+ cls._ensure_safe_name(name)
+ script_text = cls.template % locals()
+ args = cls._get_script_args(type_, name, header, script_text)
+ for res in args:
+ yield res
+
+ @staticmethod
+ def _ensure_safe_name(name):
+ """
+ Prevent paths in *_scripts entry point names.
+ """
+ has_path_sep = re.search(r'[\\/]', name)
+ if has_path_sep:
+ raise ValueError("Path separators not allowed in script names")
+
+ @classmethod
+ def get_writer(cls, force_windows):
+ # for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
- return WindowsScriptWriter.best() if force_windows else cls.best()
-
- @classmethod
- def best(cls):
- """
- Select the best ScriptWriter for this environment.
- """
- if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
- return WindowsScriptWriter.best()
- else:
- return cls
-
- @classmethod
- def _get_script_args(cls, type_, name, header, script_text):
- # Simply write the stub with no extension.
- yield (name, header + script_text)
-
- @classmethod
- def get_header(cls, script_text="", executable=None):
- """Create a #! line, getting options (if any) from script_text"""
- cmd = cls.command_spec_class.best().from_param(executable)
- cmd.install_options(script_text)
- return cmd.as_header()
-
-
-class WindowsScriptWriter(ScriptWriter):
- command_spec_class = WindowsCommandSpec
-
- @classmethod
- def get_writer(cls):
- # for backward compatibility
+ return WindowsScriptWriter.best() if force_windows else cls.best()
+
+ @classmethod
+ def best(cls):
+ """
+ Select the best ScriptWriter for this environment.
+ """
+ if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
+ return WindowsScriptWriter.best()
+ else:
+ return cls
+
+ @classmethod
+ def _get_script_args(cls, type_, name, header, script_text):
+ # Simply write the stub with no extension.
+ yield (name, header + script_text)
+
+ @classmethod
+ def get_header(cls, script_text="", executable=None):
+ """Create a #! line, getting options (if any) from script_text"""
+ cmd = cls.command_spec_class.best().from_param(executable)
+ cmd.install_options(script_text)
+ return cmd.as_header()
+
+
+class WindowsScriptWriter(ScriptWriter):
+ command_spec_class = WindowsCommandSpec
+
+ @classmethod
+ def get_writer(cls):
+ # for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
- return cls.best()
-
- @classmethod
- def best(cls):
- """
- Select the best ScriptWriter suitable for Windows
- """
- writer_lookup = dict(
- executable=WindowsExecutableLauncherWriter,
- natural=cls,
- )
- # for compatibility, use the executable launcher by default
- launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
- return writer_lookup[launcher]
-
- @classmethod
- def _get_script_args(cls, type_, name, header, script_text):
- "For Windows, add a .py extension"
- ext = dict(console='.pya', gui='.pyw')[type_]
- if ext not in os.environ['PATHEXT'].lower().split(';'):
+ return cls.best()
+
+ @classmethod
+ def best(cls):
+ """
+ Select the best ScriptWriter suitable for Windows
+ """
+ writer_lookup = dict(
+ executable=WindowsExecutableLauncherWriter,
+ natural=cls,
+ )
+ # for compatibility, use the executable launcher by default
+ launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
+ return writer_lookup[launcher]
+
+ @classmethod
+ def _get_script_args(cls, type_, name, header, script_text):
+ "For Windows, add a .py extension"
+ ext = dict(console='.pya', gui='.pyw')[type_]
+ if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
- old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
- old.remove(ext)
- header = cls._adjust_header(type_, header)
- blockers = [name + x for x in old]
- yield name + ext, header + script_text, 't', blockers
-
- @classmethod
- def _adjust_header(cls, type_, orig_header):
- """
- Make sure 'pythonw' is used for gui and and 'python' is used for
- console (regardless of what sys.executable is).
- """
- pattern = 'pythonw.exe'
- repl = 'python.exe'
- if type_ == 'gui':
- pattern, repl = repl, pattern
- pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
- new_header = pattern_ob.sub(string=orig_header, repl=repl)
- return new_header if cls._use_header(new_header) else orig_header
-
- @staticmethod
- def _use_header(new_header):
- """
- Should _adjust_header use the replaced header?
-
- On non-windows systems, always use. On
- Windows systems, only use the replaced header if it resolves
- to an executable on the system.
- """
- clean_header = new_header[2:-1].strip('"')
- return sys.platform != 'win32' or find_executable(clean_header)
-
-
-class WindowsExecutableLauncherWriter(WindowsScriptWriter):
- @classmethod
- def _get_script_args(cls, type_, name, header, script_text):
- """
- For Windows, add a .py extension and an .exe launcher
- """
- if type_ == 'gui':
- launcher_type = 'gui'
- ext = '-script.pyw'
- old = ['.pyw']
- else:
- launcher_type = 'cli'
- ext = '-script.py'
- old = ['.py', '.pyc', '.pyo']
- hdr = cls._adjust_header(type_, header)
- blockers = [name + x for x in old]
- yield (name + ext, hdr + script_text, 't', blockers)
- yield (
- name + '.exe', get_win_launcher(launcher_type),
- 'b' # write in binary mode
- )
- if not is_64bit():
- # install a manifest for the launcher to prevent Windows
- # from detecting it as an installer (which it will for
- # launchers like easy_install.exe). Consider only
- # adding a manifest for launchers detected as installers.
- # See Distribute #143 for details.
- m_name = name + '.exe.manifest'
- yield (m_name, load_launcher_manifest(name), 't')
-
-
-# for backward-compatibility
-get_script_args = ScriptWriter.get_script_args
-get_script_header = ScriptWriter.get_script_header
-
-
-def get_win_launcher(type):
- """
- Load the Windows launcher (executable) suitable for launching a script.
-
- `type` should be either 'cli' or 'gui'
-
- Returns the executable as a byte string.
- """
- launcher_fn = '%s.exe' % type
- if is_64bit():
- launcher_fn = launcher_fn.replace(".", "-64.")
- else:
- launcher_fn = launcher_fn.replace(".", "-32.")
- return resource_string('setuptools', launcher_fn)
-
-
-def load_launcher_manifest(name):
- manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
- if six.PY2:
- return manifest % vars()
- else:
- return manifest.decode('utf-8') % vars()
-
-
-def rmtree(path, ignore_errors=False, onerror=auto_chmod):
+ old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
+ old.remove(ext)
+ header = cls._adjust_header(type_, header)
+ blockers = [name + x for x in old]
+ yield name + ext, header + script_text, 't', blockers
+
+ @classmethod
+ def _adjust_header(cls, type_, orig_header):
+ """
+ Make sure 'pythonw' is used for gui and and 'python' is used for
+ console (regardless of what sys.executable is).
+ """
+ pattern = 'pythonw.exe'
+ repl = 'python.exe'
+ if type_ == 'gui':
+ pattern, repl = repl, pattern
+ pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
+ new_header = pattern_ob.sub(string=orig_header, repl=repl)
+ return new_header if cls._use_header(new_header) else orig_header
+
+ @staticmethod
+ def _use_header(new_header):
+ """
+ Should _adjust_header use the replaced header?
+
+ On non-windows systems, always use. On
+ Windows systems, only use the replaced header if it resolves
+ to an executable on the system.
+ """
+ clean_header = new_header[2:-1].strip('"')
+ return sys.platform != 'win32' or find_executable(clean_header)
+
+
+class WindowsExecutableLauncherWriter(WindowsScriptWriter):
+ @classmethod
+ def _get_script_args(cls, type_, name, header, script_text):
+ """
+ For Windows, add a .py extension and an .exe launcher
+ """
+ if type_ == 'gui':
+ launcher_type = 'gui'
+ ext = '-script.pyw'
+ old = ['.pyw']
+ else:
+ launcher_type = 'cli'
+ ext = '-script.py'
+ old = ['.py', '.pyc', '.pyo']
+ hdr = cls._adjust_header(type_, header)
+ blockers = [name + x for x in old]
+ yield (name + ext, hdr + script_text, 't', blockers)
+ yield (
+ name + '.exe', get_win_launcher(launcher_type),
+ 'b' # write in binary mode
+ )
+ if not is_64bit():
+ # install a manifest for the launcher to prevent Windows
+ # from detecting it as an installer (which it will for
+ # launchers like easy_install.exe). Consider only
+ # adding a manifest for launchers detected as installers.
+ # See Distribute #143 for details.
+ m_name = name + '.exe.manifest'
+ yield (m_name, load_launcher_manifest(name), 't')
+
+
+# for backward-compatibility
+get_script_args = ScriptWriter.get_script_args
+get_script_header = ScriptWriter.get_script_header
+
+
+def get_win_launcher(type):
+ """
+ Load the Windows launcher (executable) suitable for launching a script.
+
+ `type` should be either 'cli' or 'gui'
+
+ Returns the executable as a byte string.
+ """
+ launcher_fn = '%s.exe' % type
+ if is_64bit():
+ launcher_fn = launcher_fn.replace(".", "-64.")
+ else:
+ launcher_fn = launcher_fn.replace(".", "-32.")
+ return resource_string('setuptools', launcher_fn)
+
+
+def load_launcher_manifest(name):
+ manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
+ if six.PY2:
+ return manifest % vars()
+ else:
+ return manifest.decode('utf-8') % vars()
+
+
+def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
-
-
-def current_umask():
- tmp = os.umask(0o022)
- os.umask(tmp)
- return tmp
-
-
-def bootstrap():
- # This function is called when setuptools*.egg is run using /bin/sh
- import setuptools
-
- argv0 = os.path.dirname(setuptools.__path__[0])
- sys.argv[0] = argv0
- sys.argv.append(argv0)
- main()
-
-
-def main(argv=None, **kw):
- from setuptools import setup
- from setuptools.dist import Distribution
-
- class DistributionWithoutHelpCommands(Distribution):
- common_usage = ""
-
- def _show_help(self, *args, **kw):
- with _patch_usage():
- Distribution._show_help(self, *args, **kw)
-
- if argv is None:
- argv = sys.argv[1:]
-
- with _patch_usage():
- setup(
- script_args=['-q', 'easy_install', '-v'] + argv,
- script_name=sys.argv[0] or 'easy_install',
+
+
+def current_umask():
+ tmp = os.umask(0o022)
+ os.umask(tmp)
+ return tmp
+
+
+def bootstrap():
+ # This function is called when setuptools*.egg is run using /bin/sh
+ import setuptools
+
+ argv0 = os.path.dirname(setuptools.__path__[0])
+ sys.argv[0] = argv0
+ sys.argv.append(argv0)
+ main()
+
+
+def main(argv=None, **kw):
+ from setuptools import setup
+ from setuptools.dist import Distribution
+
+ class DistributionWithoutHelpCommands(Distribution):
+ common_usage = ""
+
+ def _show_help(self, *args, **kw):
+ with _patch_usage():
+ Distribution._show_help(self, *args, **kw)
+
+ if argv is None:
+ argv = sys.argv[1:]
+
+ with _patch_usage():
+ setup(
+ script_args=['-q', 'easy_install', '-v'] + argv,
+ script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
- )
-
-
-@contextlib.contextmanager
-def _patch_usage():
- import distutils.core
- USAGE = textwrap.dedent("""
- usage: %(script)s [options] requirement_or_url ...
- or: %(script)s --help
- """).lstrip()
-
- def gen_usage(script_name):
- return USAGE % dict(
- script=os.path.basename(script_name),
- )
-
- saved = distutils.core.gen_usage
- distutils.core.gen_usage = gen_usage
- try:
- yield
- finally:
- distutils.core.gen_usage = saved
+ )
+
+
+@contextlib.contextmanager
+def _patch_usage():
+ import distutils.core
+ USAGE = textwrap.dedent("""
+ usage: %(script)s [options] requirement_or_url ...
+ or: %(script)s --help
+ """).lstrip()
+
+ def gen_usage(script_name):
+ return USAGE % dict(
+ script=os.path.basename(script_name),
+ )
+
+ saved = distutils.core.gen_usage
+ distutils.core.gen_usage = gen_usage
+ try:
+ yield
+ finally:
+ distutils.core.gen_usage = saved
class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
"""Class for warning about deprecations in EasyInstall in SetupTools. Not ignored by default, unlike DeprecationWarning."""
diff --git a/contrib/python/setuptools/py2/setuptools/command/egg_info.py b/contrib/python/setuptools/py2/setuptools/command/egg_info.py
index 9760018aed..a5c5a2fc19 100644
--- a/contrib/python/setuptools/py2/setuptools/command/egg_info.py
+++ b/contrib/python/setuptools/py2/setuptools/command/egg_info.py
@@ -1,38 +1,38 @@
-"""setuptools.command.egg_info
-
-Create a distribution's .egg-info directory and contents"""
-
-from distutils.filelist import FileList as _FileList
+"""setuptools.command.egg_info
+
+Create a distribution's .egg-info directory and contents"""
+
+from distutils.filelist import FileList as _FileList
from distutils.errors import DistutilsInternalError
-from distutils.util import convert_path
-from distutils import log
-import distutils.errors
-import distutils.filelist
-import os
-import re
-import sys
-import io
-import warnings
-import time
+from distutils.util import convert_path
+from distutils import log
+import distutils.errors
+import distutils.filelist
+import os
+import re
+import sys
+import io
+import warnings
+import time
import collections
-
+
from setuptools.extern import six
from setuptools.extern.six.moves import map
-
-from setuptools import Command
-from setuptools.command.sdist import sdist
-from setuptools.command.sdist import walk_revctrl
-from setuptools.command.setopt import edit_config
-from setuptools.command import bdist_egg
-from pkg_resources import (
- parse_requirements, safe_name, parse_version,
- safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
-import setuptools.unicode_utils as unicode_utils
+
+from setuptools import Command
+from setuptools.command.sdist import sdist
+from setuptools.command.sdist import walk_revctrl
+from setuptools.command.setopt import edit_config
+from setuptools.command import bdist_egg
+from pkg_resources import (
+ parse_requirements, safe_name, parse_version,
+ safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
+import setuptools.unicode_utils as unicode_utils
from setuptools.glob import glob
-
+
from setuptools.extern import packaging
from setuptools import SetuptoolsDeprecationWarning
-
+
def translate_pattern(glob):
"""
Translate a file path glob like '*.txt' in to a regular expression.
@@ -41,7 +41,7 @@ def translate_pattern(glob):
directories.
"""
pat = ''
-
+
# This will split on '/' within [character classes]. This is deliberate.
chunks = glob.split(os.path.sep)
@@ -143,28 +143,28 @@ class InfoCommon:
class egg_info(InfoCommon, Command):
- description = "create a distribution's .egg-info directory"
-
- user_options = [
- ('egg-base=', 'e', "directory containing .egg-info directories"
- " (default: top of the source tree)"),
- ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
- ('tag-build=', 'b', "Specify explicit tag to add to version number"),
- ('no-date', 'D', "Don't include date stamp [default]"),
- ]
-
+ description = "create a distribution's .egg-info directory"
+
+ user_options = [
+ ('egg-base=', 'e', "directory containing .egg-info directories"
+ " (default: top of the source tree)"),
+ ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
+ ('tag-build=', 'b', "Specify explicit tag to add to version number"),
+ ('no-date', 'D', "Don't include date stamp [default]"),
+ ]
+
boolean_options = ['tag-date']
negative_opt = {
'no-date': 'tag-date',
}
-
- def initialize_options(self):
+
+ def initialize_options(self):
self.egg_base = None
- self.egg_name = None
+ self.egg_name = None
self.egg_info = None
- self.egg_version = None
- self.broken_egg_info = False
-
+ self.egg_version = None
+ self.broken_egg_info = False
+
####################################
# allow the 'tag_svn_revision' to be detected and
# set, supporting sdists built on older Setuptools.
@@ -177,7 +177,7 @@ class egg_info(InfoCommon, Command):
pass
####################################
- def save_version_info(self, filename):
+ def save_version_info(self, filename):
"""
Materialize the value of date into the
build tag. Install build keys in a deterministic order
@@ -189,139 +189,139 @@ class egg_info(InfoCommon, Command):
egg_info['tag_build'] = self.tags()
egg_info['tag_date'] = 0
edit_config(filename, dict(egg_info=egg_info))
-
- def finalize_options(self):
+
+ def finalize_options(self):
# Note: we need to capture the current value returned
# by `self.tagged_version()`, so we can later update
# `self.distribution.metadata.version` without
# repercussions.
self.egg_name = self.name
- self.egg_version = self.tagged_version()
- parsed_version = parse_version(self.egg_version)
-
- try:
- is_version = isinstance(parsed_version, packaging.version.Version)
- spec = (
- "%s==%s" if is_version else "%s===%s"
- )
- list(
- parse_requirements(spec % (self.egg_name, self.egg_version))
- )
- except ValueError:
- raise distutils.errors.DistutilsOptionError(
- "Invalid distribution name or version syntax: %s-%s" %
- (self.egg_name, self.egg_version)
- )
-
- if self.egg_base is None:
- dirs = self.distribution.package_dir
- self.egg_base = (dirs or {}).get('', os.curdir)
-
- self.ensure_dirname('egg_base')
- self.egg_info = to_filename(self.egg_name) + '.egg-info'
- if self.egg_base != os.curdir:
- self.egg_info = os.path.join(self.egg_base, self.egg_info)
- if '-' in self.egg_name:
- self.check_broken_egg_info()
-
- # Set package version for the benefit of dumber commands
- # (e.g. sdist, bdist_wininst, etc.)
- #
- self.distribution.metadata.version = self.egg_version
-
- # If we bootstrapped around the lack of a PKG-INFO, as might be the
- # case in a fresh checkout, make sure that any special tags get added
- # to the version info
- #
- pd = self.distribution._patched_dist
- if pd is not None and pd.key == self.egg_name.lower():
- pd._version = self.egg_version
- pd._parsed_version = parse_version(self.egg_version)
- self.distribution._patched_dist = None
-
- def write_or_delete_file(self, what, filename, data, force=False):
- """Write `data` to `filename` or delete if empty
-
- If `data` is non-empty, this routine is the same as ``write_file()``.
- If `data` is empty but not ``None``, this is the same as calling
- ``delete_file(filename)`. If `data` is ``None``, then this is a no-op
- unless `filename` exists, in which case a warning is issued about the
- orphaned file (if `force` is false), or deleted (if `force` is true).
- """
- if data:
- self.write_file(what, filename, data)
- elif os.path.exists(filename):
- if data is None and not force:
- log.warn(
- "%s not set in setup(), but %s exists", what, filename
- )
- return
- else:
- self.delete_file(filename)
-
- def write_file(self, what, filename, data):
- """Write `data` to `filename` (if not a dry run) after announcing it
-
- `what` is used in a log message to identify what is being written
- to the file.
- """
- log.info("writing %s to %s", what, filename)
+ self.egg_version = self.tagged_version()
+ parsed_version = parse_version(self.egg_version)
+
+ try:
+ is_version = isinstance(parsed_version, packaging.version.Version)
+ spec = (
+ "%s==%s" if is_version else "%s===%s"
+ )
+ list(
+ parse_requirements(spec % (self.egg_name, self.egg_version))
+ )
+ except ValueError:
+ raise distutils.errors.DistutilsOptionError(
+ "Invalid distribution name or version syntax: %s-%s" %
+ (self.egg_name, self.egg_version)
+ )
+
+ if self.egg_base is None:
+ dirs = self.distribution.package_dir
+ self.egg_base = (dirs or {}).get('', os.curdir)
+
+ self.ensure_dirname('egg_base')
+ self.egg_info = to_filename(self.egg_name) + '.egg-info'
+ if self.egg_base != os.curdir:
+ self.egg_info = os.path.join(self.egg_base, self.egg_info)
+ if '-' in self.egg_name:
+ self.check_broken_egg_info()
+
+ # Set package version for the benefit of dumber commands
+ # (e.g. sdist, bdist_wininst, etc.)
+ #
+ self.distribution.metadata.version = self.egg_version
+
+ # If we bootstrapped around the lack of a PKG-INFO, as might be the
+ # case in a fresh checkout, make sure that any special tags get added
+ # to the version info
+ #
+ pd = self.distribution._patched_dist
+ if pd is not None and pd.key == self.egg_name.lower():
+ pd._version = self.egg_version
+ pd._parsed_version = parse_version(self.egg_version)
+ self.distribution._patched_dist = None
+
+ def write_or_delete_file(self, what, filename, data, force=False):
+ """Write `data` to `filename` or delete if empty
+
+ If `data` is non-empty, this routine is the same as ``write_file()``.
+ If `data` is empty but not ``None``, this is the same as calling
+ ``delete_file(filename)`. If `data` is ``None``, then this is a no-op
+ unless `filename` exists, in which case a warning is issued about the
+ orphaned file (if `force` is false), or deleted (if `force` is true).
+ """
+ if data:
+ self.write_file(what, filename, data)
+ elif os.path.exists(filename):
+ if data is None and not force:
+ log.warn(
+ "%s not set in setup(), but %s exists", what, filename
+ )
+ return
+ else:
+ self.delete_file(filename)
+
+ def write_file(self, what, filename, data):
+ """Write `data` to `filename` (if not a dry run) after announcing it
+
+ `what` is used in a log message to identify what is being written
+ to the file.
+ """
+ log.info("writing %s to %s", what, filename)
if not six.PY2:
- data = data.encode("utf-8")
- if not self.dry_run:
- f = open(filename, 'wb')
- f.write(data)
- f.close()
-
- def delete_file(self, filename):
- """Delete `filename` (if not a dry run) after announcing it"""
- log.info("deleting %s", filename)
- if not self.dry_run:
- os.unlink(filename)
-
- def run(self):
- self.mkpath(self.egg_info)
+ data = data.encode("utf-8")
+ if not self.dry_run:
+ f = open(filename, 'wb')
+ f.write(data)
+ f.close()
+
+ def delete_file(self, filename):
+ """Delete `filename` (if not a dry run) after announcing it"""
+ log.info("deleting %s", filename)
+ if not self.dry_run:
+ os.unlink(filename)
+
+ def run(self):
+ self.mkpath(self.egg_info)
os.utime(self.egg_info, None)
- installer = self.distribution.fetch_build_egg
- for ep in iter_entry_points('egg_info.writers'):
- ep.require(installer=installer)
- writer = ep.resolve()
- writer(self, ep.name, os.path.join(self.egg_info, ep.name))
-
- # Get rid of native_libs.txt if it was put there by older bdist_egg
- nl = os.path.join(self.egg_info, "native_libs.txt")
- if os.path.exists(nl):
- self.delete_file(nl)
-
- self.find_sources()
-
- def find_sources(self):
- """Generate SOURCES.txt manifest file"""
- manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
- mm = manifest_maker(self.distribution)
- mm.manifest = manifest_filename
- mm.run()
- self.filelist = mm.filelist
-
- def check_broken_egg_info(self):
- bei = self.egg_name + '.egg-info'
- if self.egg_base != os.curdir:
- bei = os.path.join(self.egg_base, bei)
- if os.path.exists(bei):
- log.warn(
- "-" * 78 + '\n'
- "Note: Your current .egg-info directory has a '-' in its name;"
- '\nthis will not work correctly with "setup.py develop".\n\n'
- 'Please rename %s to %s to correct this problem.\n' + '-' * 78,
- bei, self.egg_info
- )
- self.broken_egg_info = self.egg_info
- self.egg_info = bei # make it work for now
-
-
-class FileList(_FileList):
+ installer = self.distribution.fetch_build_egg
+ for ep in iter_entry_points('egg_info.writers'):
+ ep.require(installer=installer)
+ writer = ep.resolve()
+ writer(self, ep.name, os.path.join(self.egg_info, ep.name))
+
+ # Get rid of native_libs.txt if it was put there by older bdist_egg
+ nl = os.path.join(self.egg_info, "native_libs.txt")
+ if os.path.exists(nl):
+ self.delete_file(nl)
+
+ self.find_sources()
+
+ def find_sources(self):
+ """Generate SOURCES.txt manifest file"""
+ manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
+ mm = manifest_maker(self.distribution)
+ mm.manifest = manifest_filename
+ mm.run()
+ self.filelist = mm.filelist
+
+ def check_broken_egg_info(self):
+ bei = self.egg_name + '.egg-info'
+ if self.egg_base != os.curdir:
+ bei = os.path.join(self.egg_base, bei)
+ if os.path.exists(bei):
+ log.warn(
+ "-" * 78 + '\n'
+ "Note: Your current .egg-info directory has a '-' in its name;"
+ '\nthis will not work correctly with "setup.py develop".\n\n'
+ 'Please rename %s to %s to correct this problem.\n' + '-' * 78,
+ bei, self.egg_info
+ )
+ self.broken_egg_info = self.egg_info
+ self.egg_info = bei # make it work for now
+
+
+class FileList(_FileList):
# Implementations of the various MANIFEST.in commands
-
+
def process_template_line(self, line):
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
@@ -470,95 +470,95 @@ class FileList(_FileList):
match = translate_pattern(os.path.join('**', pattern))
return self._remove_files(match.match)
- def append(self, item):
- if item.endswith('\r'): # Fix older sdists built on Windows
- item = item[:-1]
- path = convert_path(item)
-
- if self._safe_path(path):
- self.files.append(path)
-
- def extend(self, paths):
- self.files.extend(filter(self._safe_path, paths))
-
- def _repair(self):
- """
- Replace self.files with only safe paths
-
- Because some owners of FileList manipulate the underlying
- ``files`` attribute directly, this method must be called to
- repair those paths.
- """
- self.files = list(filter(self._safe_path, self.files))
-
- def _safe_path(self, path):
- enc_warn = "'%s' not %s encodable -- skipping"
-
- # To avoid accidental trans-codings errors, first to unicode
- u_path = unicode_utils.filesys_decode(path)
- if u_path is None:
- log.warn("'%s' in unexpected encoding -- skipping" % path)
- return False
-
- # Must ensure utf-8 encodability
- utf8_path = unicode_utils.try_encode(u_path, "utf-8")
- if utf8_path is None:
- log.warn(enc_warn, path, 'utf-8')
- return False
-
- try:
- # accept is either way checks out
- if os.path.exists(u_path) or os.path.exists(utf8_path):
- return True
- # this will catch any encode errors decoding u_path
- except UnicodeEncodeError:
- log.warn(enc_warn, path, sys.getfilesystemencoding())
-
-
-class manifest_maker(sdist):
- template = "MANIFEST.in"
-
- def initialize_options(self):
- self.use_defaults = 1
- self.prune = 1
- self.manifest_only = 1
- self.force_manifest = 1
-
- def finalize_options(self):
- pass
-
- def run(self):
- self.filelist = FileList()
- if not os.path.exists(self.manifest):
- self.write_manifest() # it must exist so it'll get in the list
- self.add_defaults()
- if os.path.exists(self.template):
- self.read_template()
- self.prune_file_list()
- self.filelist.sort()
- self.filelist.remove_duplicates()
- self.write_manifest()
-
- def _manifest_normalize(self, path):
- path = unicode_utils.filesys_decode(path)
- return path.replace(os.sep, '/')
-
- def write_manifest(self):
- """
- Write the file list in 'self.filelist' to the manifest file
- named by 'self.manifest'.
- """
- self.filelist._repair()
-
- # Now _repairs should encodability, but not unicode
- files = [self._manifest_normalize(f) for f in self.filelist.files]
- msg = "writing manifest file '%s'" % self.manifest
- self.execute(write_file, (self.manifest, files), msg)
-
+ def append(self, item):
+ if item.endswith('\r'): # Fix older sdists built on Windows
+ item = item[:-1]
+ path = convert_path(item)
+
+ if self._safe_path(path):
+ self.files.append(path)
+
+ def extend(self, paths):
+ self.files.extend(filter(self._safe_path, paths))
+
+ def _repair(self):
+ """
+ Replace self.files with only safe paths
+
+ Because some owners of FileList manipulate the underlying
+ ``files`` attribute directly, this method must be called to
+ repair those paths.
+ """
+ self.files = list(filter(self._safe_path, self.files))
+
+ def _safe_path(self, path):
+ enc_warn = "'%s' not %s encodable -- skipping"
+
+ # To avoid accidental trans-codings errors, first to unicode
+ u_path = unicode_utils.filesys_decode(path)
+ if u_path is None:
+ log.warn("'%s' in unexpected encoding -- skipping" % path)
+ return False
+
+ # Must ensure utf-8 encodability
+ utf8_path = unicode_utils.try_encode(u_path, "utf-8")
+ if utf8_path is None:
+ log.warn(enc_warn, path, 'utf-8')
+ return False
+
+ try:
+ # accept is either way checks out
+ if os.path.exists(u_path) or os.path.exists(utf8_path):
+ return True
+ # this will catch any encode errors decoding u_path
+ except UnicodeEncodeError:
+ log.warn(enc_warn, path, sys.getfilesystemencoding())
+
+
+class manifest_maker(sdist):
+ template = "MANIFEST.in"
+
+ def initialize_options(self):
+ self.use_defaults = 1
+ self.prune = 1
+ self.manifest_only = 1
+ self.force_manifest = 1
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ self.filelist = FileList()
+ if not os.path.exists(self.manifest):
+ self.write_manifest() # it must exist so it'll get in the list
+ self.add_defaults()
+ if os.path.exists(self.template):
+ self.read_template()
+ self.prune_file_list()
+ self.filelist.sort()
+ self.filelist.remove_duplicates()
+ self.write_manifest()
+
+ def _manifest_normalize(self, path):
+ path = unicode_utils.filesys_decode(path)
+ return path.replace(os.sep, '/')
+
+ def write_manifest(self):
+ """
+ Write the file list in 'self.filelist' to the manifest file
+ named by 'self.manifest'.
+ """
+ self.filelist._repair()
+
+ # Now _repairs should encodability, but not unicode
+ files = [self._manifest_normalize(f) for f in self.filelist.files]
+ msg = "writing manifest file '%s'" % self.manifest
+ self.execute(write_file, (self.manifest, files), msg)
+
def warn(self, msg):
if not self._should_suppress_warning(msg):
- sdist.warn(self, msg)
-
+ sdist.warn(self, msg)
+
@staticmethod
def _should_suppress_warning(msg):
"""
@@ -566,151 +566,151 @@ class manifest_maker(sdist):
"""
return re.match(r"standard file .*not found", msg)
- def add_defaults(self):
- sdist.add_defaults(self)
+ def add_defaults(self):
+ sdist.add_defaults(self)
self.check_license()
- self.filelist.append(self.template)
- self.filelist.append(self.manifest)
- rcfiles = list(walk_revctrl())
- if rcfiles:
- self.filelist.extend(rcfiles)
- elif os.path.exists(self.manifest):
- self.read_manifest()
+ self.filelist.append(self.template)
+ self.filelist.append(self.manifest)
+ rcfiles = list(walk_revctrl())
+ if rcfiles:
+ self.filelist.extend(rcfiles)
+ elif os.path.exists(self.manifest):
+ self.read_manifest()
if os.path.exists("setup.py"):
# setup.py should be included by default, even if it's not
# the script called to create the sdist
self.filelist.append("setup.py")
- ei_cmd = self.get_finalized_command('egg_info')
+ ei_cmd = self.get_finalized_command('egg_info')
self.filelist.graft(ei_cmd.egg_info)
-
- def prune_file_list(self):
- build = self.get_finalized_command('build')
- base_dir = self.distribution.get_fullname()
+
+ def prune_file_list(self):
+ build = self.get_finalized_command('build')
+ base_dir = self.distribution.get_fullname()
self.filelist.prune(build.build_base)
self.filelist.prune(base_dir)
- sep = re.escape(os.sep)
- self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
- is_regex=1)
-
-
-def write_file(filename, contents):
- """Create a file with the specified name and write 'contents' (a
- sequence of strings without line terminators) to it.
- """
- contents = "\n".join(contents)
-
- # assuming the contents has been vetted for utf-8 encoding
- contents = contents.encode("utf-8")
-
- with open(filename, "wb") as f: # always write POSIX-style manifest
- f.write(contents)
-
-
-def write_pkg_info(cmd, basename, filename):
- log.info("writing %s", filename)
- if not cmd.dry_run:
- metadata = cmd.distribution.metadata
- metadata.version, oldver = cmd.egg_version, metadata.version
- metadata.name, oldname = cmd.egg_name, metadata.name
-
- try:
- # write unescaped data to PKG-INFO, so older pkg_resources
- # can still parse it
- metadata.write_pkg_info(cmd.egg_info)
- finally:
- metadata.name, metadata.version = oldname, oldver
-
- safe = getattr(cmd.distribution, 'zip_safe', None)
-
- bdist_egg.write_safety_flag(cmd.egg_info, safe)
-
-
-def warn_depends_obsolete(cmd, basename, filename):
- if os.path.exists(filename):
- log.warn(
- "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
- "Use the install_requires/extras_require setup() args instead."
- )
-
-
-def _write_requirements(stream, reqs):
- lines = yield_lines(reqs or ())
- append_cr = lambda line: line + '\n'
- lines = map(append_cr, lines)
- stream.writelines(lines)
-
-
-def write_requirements(cmd, basename, filename):
- dist = cmd.distribution
- data = six.StringIO()
- _write_requirements(data, dist.install_requires)
- extras_require = dist.extras_require or {}
- for extra in sorted(extras_require):
- data.write('\n[{extra}]\n'.format(**vars()))
- _write_requirements(data, extras_require[extra])
- cmd.write_or_delete_file("requirements", filename, data.getvalue())
-
-
-def write_setup_requirements(cmd, basename, filename):
+ sep = re.escape(os.sep)
+ self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
+ is_regex=1)
+
+
+def write_file(filename, contents):
+ """Create a file with the specified name and write 'contents' (a
+ sequence of strings without line terminators) to it.
+ """
+ contents = "\n".join(contents)
+
+ # assuming the contents has been vetted for utf-8 encoding
+ contents = contents.encode("utf-8")
+
+ with open(filename, "wb") as f: # always write POSIX-style manifest
+ f.write(contents)
+
+
+def write_pkg_info(cmd, basename, filename):
+ log.info("writing %s", filename)
+ if not cmd.dry_run:
+ metadata = cmd.distribution.metadata
+ metadata.version, oldver = cmd.egg_version, metadata.version
+ metadata.name, oldname = cmd.egg_name, metadata.name
+
+ try:
+ # write unescaped data to PKG-INFO, so older pkg_resources
+ # can still parse it
+ metadata.write_pkg_info(cmd.egg_info)
+ finally:
+ metadata.name, metadata.version = oldname, oldver
+
+ safe = getattr(cmd.distribution, 'zip_safe', None)
+
+ bdist_egg.write_safety_flag(cmd.egg_info, safe)
+
+
+def warn_depends_obsolete(cmd, basename, filename):
+ if os.path.exists(filename):
+ log.warn(
+ "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
+ "Use the install_requires/extras_require setup() args instead."
+ )
+
+
+def _write_requirements(stream, reqs):
+ lines = yield_lines(reqs or ())
+ append_cr = lambda line: line + '\n'
+ lines = map(append_cr, lines)
+ stream.writelines(lines)
+
+
+def write_requirements(cmd, basename, filename):
+ dist = cmd.distribution
+ data = six.StringIO()
+ _write_requirements(data, dist.install_requires)
+ extras_require = dist.extras_require or {}
+ for extra in sorted(extras_require):
+ data.write('\n[{extra}]\n'.format(**vars()))
+ _write_requirements(data, extras_require[extra])
+ cmd.write_or_delete_file("requirements", filename, data.getvalue())
+
+
+def write_setup_requirements(cmd, basename, filename):
data = io.StringIO()
- _write_requirements(data, cmd.distribution.setup_requires)
- cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
-
-
-def write_toplevel_names(cmd, basename, filename):
- pkgs = dict.fromkeys(
- [
- k.split('.', 1)[0]
- for k in cmd.distribution.iter_distribution_names()
- ]
- )
- cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
-
-
-def overwrite_arg(cmd, basename, filename):
- write_arg(cmd, basename, filename, True)
-
-
-def write_arg(cmd, basename, filename, force=False):
- argname = os.path.splitext(basename)[0]
- value = getattr(cmd.distribution, argname, None)
- if value is not None:
- value = '\n'.join(value) + '\n'
- cmd.write_or_delete_file(argname, filename, value, force)
-
-
-def write_entries(cmd, basename, filename):
- ep = cmd.distribution.entry_points
-
- if isinstance(ep, six.string_types) or ep is None:
- data = ep
- elif ep is not None:
- data = []
- for section, contents in sorted(ep.items()):
- if not isinstance(contents, six.string_types):
- contents = EntryPoint.parse_group(section, contents)
- contents = '\n'.join(sorted(map(str, contents.values())))
- data.append('[%s]\n%s\n\n' % (section, contents))
- data = ''.join(data)
-
- cmd.write_or_delete_file('entry points', filename, data, True)
-
-
-def get_pkg_info_revision():
- """
- Get a -r### off of PKG-INFO Version in case this is an sdist of
- a subversion revision.
- """
+ _write_requirements(data, cmd.distribution.setup_requires)
+ cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
+
+
+def write_toplevel_names(cmd, basename, filename):
+ pkgs = dict.fromkeys(
+ [
+ k.split('.', 1)[0]
+ for k in cmd.distribution.iter_distribution_names()
+ ]
+ )
+ cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
+
+
+def overwrite_arg(cmd, basename, filename):
+ write_arg(cmd, basename, filename, True)
+
+
+def write_arg(cmd, basename, filename, force=False):
+ argname = os.path.splitext(basename)[0]
+ value = getattr(cmd.distribution, argname, None)
+ if value is not None:
+ value = '\n'.join(value) + '\n'
+ cmd.write_or_delete_file(argname, filename, value, force)
+
+
+def write_entries(cmd, basename, filename):
+ ep = cmd.distribution.entry_points
+
+ if isinstance(ep, six.string_types) or ep is None:
+ data = ep
+ elif ep is not None:
+ data = []
+ for section, contents in sorted(ep.items()):
+ if not isinstance(contents, six.string_types):
+ contents = EntryPoint.parse_group(section, contents)
+ contents = '\n'.join(sorted(map(str, contents.values())))
+ data.append('[%s]\n%s\n\n' % (section, contents))
+ data = ''.join(data)
+
+ cmd.write_or_delete_file('entry points', filename, data, True)
+
+
+def get_pkg_info_revision():
+ """
+ Get a -r### off of PKG-INFO Version in case this is an sdist of
+ a subversion revision.
+ """
warnings.warn("get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning)
- if os.path.exists('PKG-INFO'):
- with io.open('PKG-INFO') as f:
- for line in f:
- match = re.match(r"Version:.*-r(\d+)\s*$", line)
- if match:
- return int(match.group(1))
- return 0
+ if os.path.exists('PKG-INFO'):
+ with io.open('PKG-INFO') as f:
+ for line in f:
+ match = re.match(r"Version:.*-r(\d+)\s*$", line)
+ if match:
+ return int(match.group(1))
+ return 0
class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning):
diff --git a/contrib/python/setuptools/py2/setuptools/command/install.py b/contrib/python/setuptools/py2/setuptools/command/install.py
index 236862a647..72b9a3e424 100644
--- a/contrib/python/setuptools/py2/setuptools/command/install.py
+++ b/contrib/python/setuptools/py2/setuptools/command/install.py
@@ -1,125 +1,125 @@
-from distutils.errors import DistutilsArgError
-import inspect
-import glob
-import warnings
-import platform
-import distutils.command.install as orig
-
-import setuptools
-
-# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
+from distutils.errors import DistutilsArgError
+import inspect
+import glob
+import warnings
+import platform
+import distutils.command.install as orig
+
+import setuptools
+
+# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
# now. See https://github.com/pypa/setuptools/issues/199/
-_install = orig.install
-
-
-class install(orig.install):
- """Use easy_install to install the package, w/dependencies"""
-
- user_options = orig.install.user_options + [
- ('old-and-unmanageable', None, "Try not to use this!"),
- ('single-version-externally-managed', None,
- "used by system package builders to create 'flat' eggs"),
- ]
- boolean_options = orig.install.boolean_options + [
- 'old-and-unmanageable', 'single-version-externally-managed',
- ]
- new_commands = [
- ('install_egg_info', lambda self: True),
- ('install_scripts', lambda self: True),
- ]
- _nc = dict(new_commands)
-
- def initialize_options(self):
- orig.install.initialize_options(self)
- self.old_and_unmanageable = None
- self.single_version_externally_managed = None
-
- def finalize_options(self):
- orig.install.finalize_options(self)
- if self.root:
- self.single_version_externally_managed = True
- elif self.single_version_externally_managed:
- if not self.root and not self.record:
- raise DistutilsArgError(
- "You must specify --record or --root when building system"
- " packages"
- )
-
- def handle_extra_path(self):
- if self.root or self.single_version_externally_managed:
- # explicit backward-compatibility mode, allow extra_path to work
- return orig.install.handle_extra_path(self)
-
- # Ignore extra_path when installing an egg (or being run by another
- # command without --root or --single-version-externally-managed
- self.path_file = None
- self.extra_dirs = ''
-
- def run(self):
- # Explicit request for old-style install? Just do it
- if self.old_and_unmanageable or self.single_version_externally_managed:
- return orig.install.run(self)
-
- if not self._called_from_setup(inspect.currentframe()):
- # Run in backward-compatibility mode to support bdist_* commands.
- orig.install.run(self)
- else:
- self.do_egg_install()
-
- @staticmethod
- def _called_from_setup(run_frame):
- """
- Attempt to detect whether run() was called from setup() or by another
- command. If called by setup(), the parent caller will be the
- 'run_command' method in 'distutils.dist', and *its* caller will be
- the 'run_commands' method. If called any other way, the
- immediate caller *might* be 'run_command', but it won't have been
- called by 'run_commands'. Return True in that case or if a call stack
- is unavailable. Return False otherwise.
- """
- if run_frame is None:
- msg = "Call stack not available. bdist_* commands may fail."
- warnings.warn(msg)
- if platform.python_implementation() == 'IronPython':
- msg = "For best results, pass -X:Frames to enable call stack."
- warnings.warn(msg)
- return True
- res = inspect.getouterframes(run_frame)[2]
- caller, = res[:1]
- info = inspect.getframeinfo(caller)
- caller_module = caller.f_globals.get('__name__', '')
- return (
- caller_module == 'distutils.dist'
- and info.function == 'run_commands'
- )
-
- def do_egg_install(self):
-
- easy_install = self.distribution.get_command_class('easy_install')
-
- cmd = easy_install(
- self.distribution, args="x", root=self.root, record=self.record,
- )
- cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
- cmd.always_copy_from = '.' # make sure local-dir eggs get installed
-
- # pick up setup-dir .egg files only: no .egg-info
- cmd.package_index.scan(glob.glob('*.egg'))
-
- self.run_command('bdist_egg')
- args = [self.distribution.get_command_obj('bdist_egg').egg_output]
-
- if setuptools.bootstrap_install_from:
- # Bootstrap self-installation of setuptools
- args.insert(0, setuptools.bootstrap_install_from)
-
- cmd.args = args
+_install = orig.install
+
+
+class install(orig.install):
+ """Use easy_install to install the package, w/dependencies"""
+
+ user_options = orig.install.user_options + [
+ ('old-and-unmanageable', None, "Try not to use this!"),
+ ('single-version-externally-managed', None,
+ "used by system package builders to create 'flat' eggs"),
+ ]
+ boolean_options = orig.install.boolean_options + [
+ 'old-and-unmanageable', 'single-version-externally-managed',
+ ]
+ new_commands = [
+ ('install_egg_info', lambda self: True),
+ ('install_scripts', lambda self: True),
+ ]
+ _nc = dict(new_commands)
+
+ def initialize_options(self):
+ orig.install.initialize_options(self)
+ self.old_and_unmanageable = None
+ self.single_version_externally_managed = None
+
+ def finalize_options(self):
+ orig.install.finalize_options(self)
+ if self.root:
+ self.single_version_externally_managed = True
+ elif self.single_version_externally_managed:
+ if not self.root and not self.record:
+ raise DistutilsArgError(
+ "You must specify --record or --root when building system"
+ " packages"
+ )
+
+ def handle_extra_path(self):
+ if self.root or self.single_version_externally_managed:
+ # explicit backward-compatibility mode, allow extra_path to work
+ return orig.install.handle_extra_path(self)
+
+ # Ignore extra_path when installing an egg (or being run by another
+ # command without --root or --single-version-externally-managed
+ self.path_file = None
+ self.extra_dirs = ''
+
+ def run(self):
+ # Explicit request for old-style install? Just do it
+ if self.old_and_unmanageable or self.single_version_externally_managed:
+ return orig.install.run(self)
+
+ if not self._called_from_setup(inspect.currentframe()):
+ # Run in backward-compatibility mode to support bdist_* commands.
+ orig.install.run(self)
+ else:
+ self.do_egg_install()
+
+ @staticmethod
+ def _called_from_setup(run_frame):
+ """
+ Attempt to detect whether run() was called from setup() or by another
+ command. If called by setup(), the parent caller will be the
+ 'run_command' method in 'distutils.dist', and *its* caller will be
+ the 'run_commands' method. If called any other way, the
+ immediate caller *might* be 'run_command', but it won't have been
+ called by 'run_commands'. Return True in that case or if a call stack
+ is unavailable. Return False otherwise.
+ """
+ if run_frame is None:
+ msg = "Call stack not available. bdist_* commands may fail."
+ warnings.warn(msg)
+ if platform.python_implementation() == 'IronPython':
+ msg = "For best results, pass -X:Frames to enable call stack."
+ warnings.warn(msg)
+ return True
+ res = inspect.getouterframes(run_frame)[2]
+ caller, = res[:1]
+ info = inspect.getframeinfo(caller)
+ caller_module = caller.f_globals.get('__name__', '')
+ return (
+ caller_module == 'distutils.dist'
+ and info.function == 'run_commands'
+ )
+
+ def do_egg_install(self):
+
+ easy_install = self.distribution.get_command_class('easy_install')
+
+ cmd = easy_install(
+ self.distribution, args="x", root=self.root, record=self.record,
+ )
+ cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
+ cmd.always_copy_from = '.' # make sure local-dir eggs get installed
+
+ # pick up setup-dir .egg files only: no .egg-info
+ cmd.package_index.scan(glob.glob('*.egg'))
+
+ self.run_command('bdist_egg')
+ args = [self.distribution.get_command_obj('bdist_egg').egg_output]
+
+ if setuptools.bootstrap_install_from:
+ # Bootstrap self-installation of setuptools
+ args.insert(0, setuptools.bootstrap_install_from)
+
+ cmd.args = args
cmd.run(show_deprecation=False)
- setuptools.bootstrap_install_from = None
-
-
-# XXX Python 3.1 doesn't see _nc if this is inside the class
-install.sub_commands = (
- [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
- install.new_commands
-)
+ setuptools.bootstrap_install_from = None
+
+
+# XXX Python 3.1 doesn't see _nc if this is inside the class
+install.sub_commands = (
+ [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
+ install.new_commands
+)
diff --git a/contrib/python/setuptools/py2/setuptools/command/install_egg_info.py b/contrib/python/setuptools/py2/setuptools/command/install_egg_info.py
index 10bdb832a9..edc4718b68 100644
--- a/contrib/python/setuptools/py2/setuptools/command/install_egg_info.py
+++ b/contrib/python/setuptools/py2/setuptools/command/install_egg_info.py
@@ -1,62 +1,62 @@
-from distutils import log, dir_util
-import os
-
-from setuptools import Command
+from distutils import log, dir_util
+import os
+
+from setuptools import Command
from setuptools import namespaces
-from setuptools.archive_util import unpack_archive
-import pkg_resources
-
-
+from setuptools.archive_util import unpack_archive
+import pkg_resources
+
+
class install_egg_info(namespaces.Installer, Command):
- """Install an .egg-info directory for the package"""
-
- description = "Install an .egg-info directory for the package"
-
- user_options = [
- ('install-dir=', 'd', "directory to install to"),
- ]
-
- def initialize_options(self):
- self.install_dir = None
-
- def finalize_options(self):
- self.set_undefined_options('install_lib',
- ('install_dir', 'install_dir'))
- ei_cmd = self.get_finalized_command("egg_info")
- basename = pkg_resources.Distribution(
- None, None, ei_cmd.egg_name, ei_cmd.egg_version
- ).egg_name() + '.egg-info'
- self.source = ei_cmd.egg_info
- self.target = os.path.join(self.install_dir, basename)
+ """Install an .egg-info directory for the package"""
+
+ description = "Install an .egg-info directory for the package"
+
+ user_options = [
+ ('install-dir=', 'd', "directory to install to"),
+ ]
+
+ def initialize_options(self):
+ self.install_dir = None
+
+ def finalize_options(self):
+ self.set_undefined_options('install_lib',
+ ('install_dir', 'install_dir'))
+ ei_cmd = self.get_finalized_command("egg_info")
+ basename = pkg_resources.Distribution(
+ None, None, ei_cmd.egg_name, ei_cmd.egg_version
+ ).egg_name() + '.egg-info'
+ self.source = ei_cmd.egg_info
+ self.target = os.path.join(self.install_dir, basename)
self.outputs = []
-
- def run(self):
- self.run_command('egg_info')
- if os.path.isdir(self.target) and not os.path.islink(self.target):
- dir_util.remove_tree(self.target, dry_run=self.dry_run)
- elif os.path.exists(self.target):
- self.execute(os.unlink, (self.target,), "Removing " + self.target)
- if not self.dry_run:
- pkg_resources.ensure_directory(self.target)
- self.execute(
- self.copytree, (), "Copying %s to %s" % (self.source, self.target)
- )
- self.install_namespaces()
-
- def get_outputs(self):
- return self.outputs
-
- def copytree(self):
- # Copy the .egg-info tree to site-packages
- def skimmer(src, dst):
- # filter out source-control directories; note that 'src' is always
- # a '/'-separated path, regardless of platform. 'dst' is a
- # platform-specific path.
- for skip in '.svn/', 'CVS/':
- if src.startswith(skip) or '/' + skip in src:
- return None
- self.outputs.append(dst)
- log.debug("Copying %s to %s", src, dst)
- return dst
-
- unpack_archive(self.source, self.target, skimmer)
+
+ def run(self):
+ self.run_command('egg_info')
+ if os.path.isdir(self.target) and not os.path.islink(self.target):
+ dir_util.remove_tree(self.target, dry_run=self.dry_run)
+ elif os.path.exists(self.target):
+ self.execute(os.unlink, (self.target,), "Removing " + self.target)
+ if not self.dry_run:
+ pkg_resources.ensure_directory(self.target)
+ self.execute(
+ self.copytree, (), "Copying %s to %s" % (self.source, self.target)
+ )
+ self.install_namespaces()
+
+ def get_outputs(self):
+ return self.outputs
+
+ def copytree(self):
+ # Copy the .egg-info tree to site-packages
+ def skimmer(src, dst):
+ # filter out source-control directories; note that 'src' is always
+ # a '/'-separated path, regardless of platform. 'dst' is a
+ # platform-specific path.
+ for skip in '.svn/', 'CVS/':
+ if src.startswith(skip) or '/' + skip in src:
+ return None
+ self.outputs.append(dst)
+ log.debug("Copying %s to %s", src, dst)
+ return dst
+
+ unpack_archive(self.source, self.target, skimmer)
diff --git a/contrib/python/setuptools/py2/setuptools/command/install_lib.py b/contrib/python/setuptools/py2/setuptools/command/install_lib.py
index 96f37ab050..07d6593309 100644
--- a/contrib/python/setuptools/py2/setuptools/command/install_lib.py
+++ b/contrib/python/setuptools/py2/setuptools/command/install_lib.py
@@ -1,121 +1,121 @@
-import os
+import os
import sys
-from itertools import product, starmap
-import distutils.command.install_lib as orig
-
-
-class install_lib(orig.install_lib):
- """Don't add compiled flags to filenames of non-Python files"""
-
- def run(self):
- self.build()
- outfiles = self.install()
- if outfiles is not None:
- # always compile, in case we have any extension stubs to deal with
- self.byte_compile(outfiles)
-
- def get_exclusions(self):
- """
- Return a collections.Sized collections.Container of paths to be
- excluded for single_version_externally_managed installations.
- """
- all_packages = (
- pkg
- for ns_pkg in self._get_SVEM_NSPs()
- for pkg in self._all_packages(ns_pkg)
- )
-
- excl_specs = product(all_packages, self._gen_exclusion_paths())
- return set(starmap(self._exclude_pkg_path, excl_specs))
-
- def _exclude_pkg_path(self, pkg, exclusion_path):
- """
- Given a package name and exclusion path within that package,
- compute the full exclusion path.
- """
- parts = pkg.split('.') + [exclusion_path]
- return os.path.join(self.install_dir, *parts)
-
- @staticmethod
- def _all_packages(pkg_name):
- """
- >>> list(install_lib._all_packages('foo.bar.baz'))
- ['foo.bar.baz', 'foo.bar', 'foo']
- """
- while pkg_name:
- yield pkg_name
- pkg_name, sep, child = pkg_name.rpartition('.')
-
- def _get_SVEM_NSPs(self):
- """
- Get namespace packages (list) but only for
- single_version_externally_managed installations and empty otherwise.
- """
- # TODO: is it necessary to short-circuit here? i.e. what's the cost
- # if get_finalized_command is called even when namespace_packages is
- # False?
- if not self.distribution.namespace_packages:
- return []
-
- install_cmd = self.get_finalized_command('install')
- svem = install_cmd.single_version_externally_managed
-
- return self.distribution.namespace_packages if svem else []
-
- @staticmethod
- def _gen_exclusion_paths():
- """
- Generate file paths to be excluded for namespace packages (bytecode
- cache files).
- """
- # always exclude the package module itself
- yield '__init__.py'
-
- yield '__init__.pyc'
- yield '__init__.pyo'
-
+from itertools import product, starmap
+import distutils.command.install_lib as orig
+
+
+class install_lib(orig.install_lib):
+ """Don't add compiled flags to filenames of non-Python files"""
+
+ def run(self):
+ self.build()
+ outfiles = self.install()
+ if outfiles is not None:
+ # always compile, in case we have any extension stubs to deal with
+ self.byte_compile(outfiles)
+
+ def get_exclusions(self):
+ """
+ Return a collections.Sized collections.Container of paths to be
+ excluded for single_version_externally_managed installations.
+ """
+ all_packages = (
+ pkg
+ for ns_pkg in self._get_SVEM_NSPs()
+ for pkg in self._all_packages(ns_pkg)
+ )
+
+ excl_specs = product(all_packages, self._gen_exclusion_paths())
+ return set(starmap(self._exclude_pkg_path, excl_specs))
+
+ def _exclude_pkg_path(self, pkg, exclusion_path):
+ """
+ Given a package name and exclusion path within that package,
+ compute the full exclusion path.
+ """
+ parts = pkg.split('.') + [exclusion_path]
+ return os.path.join(self.install_dir, *parts)
+
+ @staticmethod
+ def _all_packages(pkg_name):
+ """
+ >>> list(install_lib._all_packages('foo.bar.baz'))
+ ['foo.bar.baz', 'foo.bar', 'foo']
+ """
+ while pkg_name:
+ yield pkg_name
+ pkg_name, sep, child = pkg_name.rpartition('.')
+
+ def _get_SVEM_NSPs(self):
+ """
+ Get namespace packages (list) but only for
+ single_version_externally_managed installations and empty otherwise.
+ """
+ # TODO: is it necessary to short-circuit here? i.e. what's the cost
+ # if get_finalized_command is called even when namespace_packages is
+ # False?
+ if not self.distribution.namespace_packages:
+ return []
+
+ install_cmd = self.get_finalized_command('install')
+ svem = install_cmd.single_version_externally_managed
+
+ return self.distribution.namespace_packages if svem else []
+
+ @staticmethod
+ def _gen_exclusion_paths():
+ """
+ Generate file paths to be excluded for namespace packages (bytecode
+ cache files).
+ """
+ # always exclude the package module itself
+ yield '__init__.py'
+
+ yield '__init__.pyc'
+ yield '__init__.pyo'
+
if not hasattr(sys, 'implementation'):
- return
-
+ return
+
base = os.path.join('__pycache__', '__init__.' + sys.implementation.cache_tag)
- yield base + '.pyc'
- yield base + '.pyo'
- yield base + '.opt-1.pyc'
- yield base + '.opt-2.pyc'
-
- def copy_tree(
- self, infile, outfile,
- preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
- ):
- assert preserve_mode and preserve_times and not preserve_symlinks
- exclude = self.get_exclusions()
-
- if not exclude:
- return orig.install_lib.copy_tree(self, infile, outfile)
-
- # Exclude namespace package __init__.py* files from the output
-
- from setuptools.archive_util import unpack_directory
- from distutils import log
-
- outfiles = []
-
- def pf(src, dst):
- if dst in exclude:
- log.warn("Skipping installation of %s (namespace package)",
- dst)
- return False
-
- log.info("copying %s -> %s", src, os.path.dirname(dst))
- outfiles.append(dst)
- return dst
-
- unpack_directory(infile, outfile, pf)
- return outfiles
-
- def get_outputs(self):
- outputs = orig.install_lib.get_outputs(self)
- exclude = self.get_exclusions()
- if exclude:
- return [f for f in outputs if f not in exclude]
- return outputs
+ yield base + '.pyc'
+ yield base + '.pyo'
+ yield base + '.opt-1.pyc'
+ yield base + '.opt-2.pyc'
+
+ def copy_tree(
+ self, infile, outfile,
+ preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
+ ):
+ assert preserve_mode and preserve_times and not preserve_symlinks
+ exclude = self.get_exclusions()
+
+ if not exclude:
+ return orig.install_lib.copy_tree(self, infile, outfile)
+
+ # Exclude namespace package __init__.py* files from the output
+
+ from setuptools.archive_util import unpack_directory
+ from distutils import log
+
+ outfiles = []
+
+ def pf(src, dst):
+ if dst in exclude:
+ log.warn("Skipping installation of %s (namespace package)",
+ dst)
+ return False
+
+ log.info("copying %s -> %s", src, os.path.dirname(dst))
+ outfiles.append(dst)
+ return dst
+
+ unpack_directory(infile, outfile, pf)
+ return outfiles
+
+ def get_outputs(self):
+ outputs = orig.install_lib.get_outputs(self)
+ exclude = self.get_exclusions()
+ if exclude:
+ return [f for f in outputs if f not in exclude]
+ return outputs
diff --git a/contrib/python/setuptools/py2/setuptools/command/install_scripts.py b/contrib/python/setuptools/py2/setuptools/command/install_scripts.py
index 4902f2c621..16234273a2 100644
--- a/contrib/python/setuptools/py2/setuptools/command/install_scripts.py
+++ b/contrib/python/setuptools/py2/setuptools/command/install_scripts.py
@@ -1,65 +1,65 @@
-from distutils import log
-import distutils.command.install_scripts as orig
-import os
+from distutils import log
+import distutils.command.install_scripts as orig
+import os
import sys
-
-from pkg_resources import Distribution, PathMetadata, ensure_directory
-
-
-class install_scripts(orig.install_scripts):
- """Do normal script install, plus any egg_info wrapper scripts"""
-
- def initialize_options(self):
- orig.install_scripts.initialize_options(self)
- self.no_ep = False
-
- def run(self):
- import setuptools.command.easy_install as ei
-
- self.run_command("egg_info")
- if self.distribution.scripts:
- orig.install_scripts.run(self) # run first to set up self.outfiles
- else:
- self.outfiles = []
- if self.no_ep:
- # don't install entry point scripts into .egg file!
- return
-
- ei_cmd = self.get_finalized_command("egg_info")
- dist = Distribution(
- ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
- ei_cmd.egg_name, ei_cmd.egg_version,
- )
- bs_cmd = self.get_finalized_command('build_scripts')
- exec_param = getattr(bs_cmd, 'executable', None)
- bw_cmd = self.get_finalized_command("bdist_wininst")
- is_wininst = getattr(bw_cmd, '_is_running', False)
- writer = ei.ScriptWriter
- if is_wininst:
- exec_param = "python.exe"
- writer = ei.WindowsScriptWriter
+
+from pkg_resources import Distribution, PathMetadata, ensure_directory
+
+
+class install_scripts(orig.install_scripts):
+ """Do normal script install, plus any egg_info wrapper scripts"""
+
+ def initialize_options(self):
+ orig.install_scripts.initialize_options(self)
+ self.no_ep = False
+
+ def run(self):
+ import setuptools.command.easy_install as ei
+
+ self.run_command("egg_info")
+ if self.distribution.scripts:
+ orig.install_scripts.run(self) # run first to set up self.outfiles
+ else:
+ self.outfiles = []
+ if self.no_ep:
+ # don't install entry point scripts into .egg file!
+ return
+
+ ei_cmd = self.get_finalized_command("egg_info")
+ dist = Distribution(
+ ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
+ ei_cmd.egg_name, ei_cmd.egg_version,
+ )
+ bs_cmd = self.get_finalized_command('build_scripts')
+ exec_param = getattr(bs_cmd, 'executable', None)
+ bw_cmd = self.get_finalized_command("bdist_wininst")
+ is_wininst = getattr(bw_cmd, '_is_running', False)
+ writer = ei.ScriptWriter
+ if is_wininst:
+ exec_param = "python.exe"
+ writer = ei.WindowsScriptWriter
if exec_param == sys.executable:
# In case the path to the Python executable contains a space, wrap
# it so it's not split up.
exec_param = [exec_param]
- # resolve the writer to the environment
- writer = writer.best()
- cmd = writer.command_spec_class.best().from_param(exec_param)
- for args in writer.get_args(dist, cmd.as_header()):
- self.write_script(*args)
-
- def write_script(self, script_name, contents, mode="t", *ignored):
- """Write an executable file to the scripts directory"""
- from setuptools.command.easy_install import chmod, current_umask
-
- log.info("Installing %s script to %s", script_name, self.install_dir)
- target = os.path.join(self.install_dir, script_name)
- self.outfiles.append(target)
-
- mask = current_umask()
- if not self.dry_run:
- ensure_directory(target)
- f = open(target, "w" + mode)
- f.write(contents)
- f.close()
- chmod(target, 0o777 - mask)
+ # resolve the writer to the environment
+ writer = writer.best()
+ cmd = writer.command_spec_class.best().from_param(exec_param)
+ for args in writer.get_args(dist, cmd.as_header()):
+ self.write_script(*args)
+
+ def write_script(self, script_name, contents, mode="t", *ignored):
+ """Write an executable file to the scripts directory"""
+ from setuptools.command.easy_install import chmod, current_umask
+
+ log.info("Installing %s script to %s", script_name, self.install_dir)
+ target = os.path.join(self.install_dir, script_name)
+ self.outfiles.append(target)
+
+ mask = current_umask()
+ if not self.dry_run:
+ ensure_directory(target)
+ f = open(target, "w" + mode)
+ f.write(contents)
+ f.close()
+ chmod(target, 0o777 - mask)
diff --git a/contrib/python/setuptools/py2/setuptools/command/register.py b/contrib/python/setuptools/py2/setuptools/command/register.py
index dbe5e619f7..b8266b9a60 100644
--- a/contrib/python/setuptools/py2/setuptools/command/register.py
+++ b/contrib/python/setuptools/py2/setuptools/command/register.py
@@ -1,13 +1,13 @@
from distutils import log
-import distutils.command.register as orig
-
+import distutils.command.register as orig
+
from setuptools.errors import RemovedCommandError
-
-class register(orig.register):
+
+class register(orig.register):
"""Formerly used to register packages on PyPI."""
-
- def run(self):
+
+ def run(self):
msg = (
"The register command has been removed, use twine to upload "
+ "instead (https://pypi.org/p/twine)"
diff --git a/contrib/python/setuptools/py2/setuptools/command/rotate.py b/contrib/python/setuptools/py2/setuptools/command/rotate.py
index 3e873a97df..b89353f529 100644
--- a/contrib/python/setuptools/py2/setuptools/command/rotate.py
+++ b/contrib/python/setuptools/py2/setuptools/command/rotate.py
@@ -1,65 +1,65 @@
-from distutils.util import convert_path
-from distutils import log
-from distutils.errors import DistutilsOptionError
-import os
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
import shutil
-
+
from setuptools.extern import six
-
-from setuptools import Command
-
-
-class rotate(Command):
- """Delete older distributions"""
-
- description = "delete older distributions, keeping N newest files"
- user_options = [
- ('match=', 'm', "patterns to match (required)"),
- ('dist-dir=', 'd', "directory where the distributions are"),
- ('keep=', 'k', "number of matching distributions to keep"),
- ]
-
- boolean_options = []
-
- def initialize_options(self):
- self.match = None
- self.dist_dir = None
- self.keep = None
-
- def finalize_options(self):
- if self.match is None:
- raise DistutilsOptionError(
- "Must specify one or more (comma-separated) match patterns "
- "(e.g. '.zip' or '.egg')"
- )
- if self.keep is None:
- raise DistutilsOptionError("Must specify number of files to keep")
- try:
- self.keep = int(self.keep)
- except ValueError:
- raise DistutilsOptionError("--keep must be an integer")
- if isinstance(self.match, six.string_types):
- self.match = [
- convert_path(p.strip()) for p in self.match.split(',')
- ]
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
-
- def run(self):
- self.run_command("egg_info")
- from glob import glob
-
- for pattern in self.match:
- pattern = self.distribution.get_name() + '*' + pattern
- files = glob(os.path.join(self.dist_dir, pattern))
- files = [(os.path.getmtime(f), f) for f in files]
- files.sort()
- files.reverse()
-
- log.info("%d file(s) matching %s", len(files), pattern)
- files = files[self.keep:]
- for (t, f) in files:
- log.info("Deleting %s", f)
- if not self.dry_run:
+
+from setuptools import Command
+
+
+class rotate(Command):
+ """Delete older distributions"""
+
+ description = "delete older distributions, keeping N newest files"
+ user_options = [
+ ('match=', 'm', "patterns to match (required)"),
+ ('dist-dir=', 'd', "directory where the distributions are"),
+ ('keep=', 'k', "number of matching distributions to keep"),
+ ]
+
+ boolean_options = []
+
+ def initialize_options(self):
+ self.match = None
+ self.dist_dir = None
+ self.keep = None
+
+ def finalize_options(self):
+ if self.match is None:
+ raise DistutilsOptionError(
+ "Must specify one or more (comma-separated) match patterns "
+ "(e.g. '.zip' or '.egg')"
+ )
+ if self.keep is None:
+ raise DistutilsOptionError("Must specify number of files to keep")
+ try:
+ self.keep = int(self.keep)
+ except ValueError:
+ raise DistutilsOptionError("--keep must be an integer")
+ if isinstance(self.match, six.string_types):
+ self.match = [
+ convert_path(p.strip()) for p in self.match.split(',')
+ ]
+ self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+ def run(self):
+ self.run_command("egg_info")
+ from glob import glob
+
+ for pattern in self.match:
+ pattern = self.distribution.get_name() + '*' + pattern
+ files = glob(os.path.join(self.dist_dir, pattern))
+ files = [(os.path.getmtime(f), f) for f in files]
+ files.sort()
+ files.reverse()
+
+ log.info("%d file(s) matching %s", len(files), pattern)
+ files = files[self.keep:]
+ for (t, f) in files:
+ log.info("Deleting %s", f)
+ if not self.dry_run:
if os.path.isdir(f):
shutil.rmtree(f)
else:
diff --git a/contrib/python/setuptools/py2/setuptools/command/saveopts.py b/contrib/python/setuptools/py2/setuptools/command/saveopts.py
index 3ce03df53c..611cec5528 100644
--- a/contrib/python/setuptools/py2/setuptools/command/saveopts.py
+++ b/contrib/python/setuptools/py2/setuptools/command/saveopts.py
@@ -1,22 +1,22 @@
-from setuptools.command.setopt import edit_config, option_base
-
-
-class saveopts(option_base):
- """Save command-line options to a file"""
-
- description = "save supplied options to setup.cfg or other config file"
-
- def run(self):
- dist = self.distribution
- settings = {}
-
- for cmd in dist.command_options:
-
- if cmd == 'saveopts':
- continue # don't save our own options!
-
- for opt, (src, val) in dist.get_option_dict(cmd).items():
- if src == "command line":
- settings.setdefault(cmd, {})[opt] = val
-
- edit_config(self.filename, settings, self.dry_run)
+from setuptools.command.setopt import edit_config, option_base
+
+
+class saveopts(option_base):
+ """Save command-line options to a file"""
+
+ description = "save supplied options to setup.cfg or other config file"
+
+ def run(self):
+ dist = self.distribution
+ settings = {}
+
+ for cmd in dist.command_options:
+
+ if cmd == 'saveopts':
+ continue # don't save our own options!
+
+ for opt, (src, val) in dist.get_option_dict(cmd).items():
+ if src == "command line":
+ settings.setdefault(cmd, {})[opt] = val
+
+ edit_config(self.filename, settings, self.dry_run)
diff --git a/contrib/python/setuptools/py2/setuptools/command/sdist.py b/contrib/python/setuptools/py2/setuptools/command/sdist.py
index 6afdfa4e0b..8c3438eaa6 100644
--- a/contrib/python/setuptools/py2/setuptools/command/sdist.py
+++ b/contrib/python/setuptools/py2/setuptools/command/sdist.py
@@ -1,64 +1,64 @@
-from distutils import log
-import distutils.command.sdist as orig
-import os
-import sys
-import io
+from distutils import log
+import distutils.command.sdist as orig
+import os
+import sys
+import io
import contextlib
-
+
from setuptools.extern import six, ordered_set
-
+
from .py36compat import sdist_add_defaults
-
-import pkg_resources
-
-_default_revctrl = list
-
-
-def walk_revctrl(dirname=''):
- """Find all files under revision control"""
- for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
- for item in ep.load()(dirname):
- yield item
-
-
+
+import pkg_resources
+
+_default_revctrl = list
+
+
+def walk_revctrl(dirname=''):
+ """Find all files under revision control"""
+ for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
+ for item in ep.load()(dirname):
+ yield item
+
+
class sdist(sdist_add_defaults, orig.sdist):
- """Smart sdist that finds anything supported by revision control"""
-
- user_options = [
- ('formats=', None,
- "formats for source distribution (comma-separated list)"),
- ('keep-temp', 'k',
- "keep the distribution tree around after creating " +
- "archive file(s)"),
- ('dist-dir=', 'd',
- "directory to put the source distribution archive(s) in "
- "[default: dist]"),
- ]
-
- negative_opt = {}
-
+ """Smart sdist that finds anything supported by revision control"""
+
+ user_options = [
+ ('formats=', None,
+ "formats for source distribution (comma-separated list)"),
+ ('keep-temp', 'k',
+ "keep the distribution tree around after creating " +
+ "archive file(s)"),
+ ('dist-dir=', 'd',
+ "directory to put the source distribution archive(s) in "
+ "[default: dist]"),
+ ]
+
+ negative_opt = {}
+
README_EXTENSIONS = ['', '.rst', '.txt', '.md']
READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS)
- def run(self):
- self.run_command('egg_info')
- ei_cmd = self.get_finalized_command('egg_info')
- self.filelist = ei_cmd.filelist
- self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
- self.check_readme()
-
- # Run sub commands
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- self.make_distribution()
-
- dist_files = getattr(self.distribution, 'dist_files', [])
- for file in self.archive_files:
- data = ('sdist', '', file)
- if data not in dist_files:
- dist_files.append(data)
-
+ def run(self):
+ self.run_command('egg_info')
+ ei_cmd = self.get_finalized_command('egg_info')
+ self.filelist = ei_cmd.filelist
+ self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
+ self.check_readme()
+
+ # Run sub commands
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ self.make_distribution()
+
+ dist_files = getattr(self.distribution, 'dist_files', [])
+ for file in self.archive_files:
+ data = ('sdist', '', file)
+ if data not in dist_files:
+ dist_files.append(data)
+
def initialize_options(self):
orig.sdist.initialize_options(self)
@@ -98,29 +98,29 @@ class sdist(sdist_add_defaults, orig.sdist):
if orig_val is not NoValue:
setattr(os, 'link', orig_val)
- def __read_template_hack(self):
- # This grody hack closes the template file (MANIFEST.in) if an
- # exception occurs during read_template.
- # Doing so prevents an error when easy_install attempts to delete the
- # file.
- try:
- orig.sdist.read_template(self)
+ def __read_template_hack(self):
+ # This grody hack closes the template file (MANIFEST.in) if an
+ # exception occurs during read_template.
+ # Doing so prevents an error when easy_install attempts to delete the
+ # file.
+ try:
+ orig.sdist.read_template(self)
except Exception:
- _, _, tb = sys.exc_info()
- tb.tb_next.tb_frame.f_locals['template'].close()
- raise
-
- # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle
- # has been fixed, so only override the method if we're using an earlier
- # Python.
- has_leaky_handle = (
- sys.version_info < (2, 7, 2)
- or (3, 0) <= sys.version_info < (3, 1, 4)
- or (3, 2) <= sys.version_info < (3, 2, 1)
- )
- if has_leaky_handle:
- read_template = __read_template_hack
-
+ _, _, tb = sys.exc_info()
+ tb.tb_next.tb_frame.f_locals['template'].close()
+ raise
+
+ # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle
+ # has been fixed, so only override the method if we're using an earlier
+ # Python.
+ has_leaky_handle = (
+ sys.version_info < (2, 7, 2)
+ or (3, 0) <= sys.version_info < (3, 1, 4)
+ or (3, 2) <= sys.version_info < (3, 2, 1)
+ )
+ if has_leaky_handle:
+ read_template = __read_template_hack
+
def _add_defaults_optional(self):
if six.PY2:
sdist_add_defaults._add_defaults_optional(self)
@@ -131,11 +131,11 @@ class sdist(sdist_add_defaults, orig.sdist):
def _add_defaults_python(self):
"""getting python files"""
- if self.distribution.has_pure_modules():
- build_py = self.get_finalized_command('build_py')
- self.filelist.extend(build_py.get_source_files())
+ if self.distribution.has_pure_modules():
+ build_py = self.get_finalized_command('build_py')
+ self.filelist.extend(build_py.get_source_files())
self._add_data_files(self._safe_data_files(build_py))
-
+
def _safe_data_files(self, build_py):
"""
Extracting data_files from build_py is known to cause
@@ -164,61 +164,61 @@ class sdist(sdist_add_defaults, orig.sdist):
super()._add_defaults_data_files()
except TypeError:
log.warn("data_files contains unexpected objects")
-
- def check_readme(self):
+
+ def check_readme(self):
for f in self.READMES:
- if os.path.exists(f):
- return
- else:
- self.warn(
- "standard file not found: should have one of " +
+ if os.path.exists(f):
+ return
+ else:
+ self.warn(
+ "standard file not found: should have one of " +
', '.join(self.READMES)
- )
-
- def make_release_tree(self, base_dir, files):
- orig.sdist.make_release_tree(self, base_dir, files)
-
- # Save any egg_info command line options used to create this sdist
- dest = os.path.join(base_dir, 'setup.cfg')
- if hasattr(os, 'link') and os.path.exists(dest):
- # unlink and re-copy, since it might be hard-linked, and
- # we don't want to change the source version
- os.unlink(dest)
- self.copy_file('setup.cfg', dest)
-
- self.get_finalized_command('egg_info').save_version_info(dest)
-
- def _manifest_is_not_generated(self):
- # check for special comment used in 2.7.1 and higher
- if not os.path.isfile(self.manifest):
- return False
-
- with io.open(self.manifest, 'rb') as fp:
- first_line = fp.readline()
- return (first_line !=
- '# file GENERATED by distutils, do NOT edit\n'.encode())
-
- def read_manifest(self):
- """Read the manifest file (named by 'self.manifest') and use it to
- fill in 'self.filelist', the list of files to include in the source
- distribution.
- """
- log.info("reading manifest file '%s'", self.manifest)
+ )
+
+ def make_release_tree(self, base_dir, files):
+ orig.sdist.make_release_tree(self, base_dir, files)
+
+ # Save any egg_info command line options used to create this sdist
+ dest = os.path.join(base_dir, 'setup.cfg')
+ if hasattr(os, 'link') and os.path.exists(dest):
+ # unlink and re-copy, since it might be hard-linked, and
+ # we don't want to change the source version
+ os.unlink(dest)
+ self.copy_file('setup.cfg', dest)
+
+ self.get_finalized_command('egg_info').save_version_info(dest)
+
+ def _manifest_is_not_generated(self):
+ # check for special comment used in 2.7.1 and higher
+ if not os.path.isfile(self.manifest):
+ return False
+
+ with io.open(self.manifest, 'rb') as fp:
+ first_line = fp.readline()
+ return (first_line !=
+ '# file GENERATED by distutils, do NOT edit\n'.encode())
+
+ def read_manifest(self):
+ """Read the manifest file (named by 'self.manifest') and use it to
+ fill in 'self.filelist', the list of files to include in the source
+ distribution.
+ """
+ log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rb')
- for line in manifest:
- # The manifest must contain UTF-8. See #303.
+ for line in manifest:
+ # The manifest must contain UTF-8. See #303.
if not six.PY2:
- try:
- line = line.decode('UTF-8')
- except UnicodeDecodeError:
- log.warn("%r not UTF-8 decodable -- skipping" % line)
- continue
- # ignore comments and blank lines
- line = line.strip()
- if line.startswith('#') or not line:
- continue
- self.filelist.append(line)
- manifest.close()
+ try:
+ line = line.decode('UTF-8')
+ except UnicodeDecodeError:
+ log.warn("%r not UTF-8 decodable -- skipping" % line)
+ continue
+ # ignore comments and blank lines
+ line = line.strip()
+ if line.startswith('#') or not line:
+ continue
+ self.filelist.append(line)
+ manifest.close()
def check_license(self):
"""Checks if license_file' or 'license_files' is configured and adds any
diff --git a/contrib/python/setuptools/py2/setuptools/command/setopt.py b/contrib/python/setuptools/py2/setuptools/command/setopt.py
index 9677a92797..7e57cc0262 100644
--- a/contrib/python/setuptools/py2/setuptools/command/setopt.py
+++ b/contrib/python/setuptools/py2/setuptools/command/setopt.py
@@ -1,149 +1,149 @@
-from distutils.util import convert_path
-from distutils import log
-from distutils.errors import DistutilsOptionError
-import distutils
-import os
-
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import distutils
+import os
+
from setuptools.extern.six.moves import configparser
-
-from setuptools import Command
-
-__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
-
-
-def config_file(kind="local"):
- """Get the filename of the distutils, local, global, or per-user config
-
- `kind` must be one of "local", "global", or "user"
- """
- if kind == 'local':
- return 'setup.cfg'
- if kind == 'global':
- return os.path.join(
- os.path.dirname(distutils.__file__), 'distutils.cfg'
- )
- if kind == 'user':
- dot = os.name == 'posix' and '.' or ''
- return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
- raise ValueError(
- "config_file() type must be 'local', 'global', or 'user'", kind
- )
-
-
-def edit_config(filename, settings, dry_run=False):
- """Edit a configuration file to include `settings`
-
- `settings` is a dictionary of dictionaries or ``None`` values, keyed by
- command/section name. A ``None`` value means to delete the entire section,
- while a dictionary lists settings to be changed or deleted in that section.
- A setting of ``None`` means to delete that setting.
- """
- log.debug("Reading configuration from %s", filename)
- opts = configparser.RawConfigParser()
- opts.read([filename])
- for section, options in settings.items():
- if options is None:
- log.info("Deleting section [%s] from %s", section, filename)
- opts.remove_section(section)
- else:
- if not opts.has_section(section):
- log.debug("Adding new section [%s] to %s", section, filename)
- opts.add_section(section)
- for option, value in options.items():
- if value is None:
- log.debug(
- "Deleting %s.%s from %s",
- section, option, filename
- )
- opts.remove_option(section, option)
- if not opts.options(section):
- log.info("Deleting empty [%s] section from %s",
- section, filename)
- opts.remove_section(section)
- else:
- log.debug(
- "Setting %s.%s to %r in %s",
- section, option, value, filename
- )
- opts.set(section, option, value)
-
- log.info("Writing %s", filename)
- if not dry_run:
- with open(filename, 'w') as f:
- opts.write(f)
-
-
-class option_base(Command):
- """Abstract base class for commands that mess with config files"""
-
- user_options = [
- ('global-config', 'g',
- "save options to the site-wide distutils.cfg file"),
- ('user-config', 'u',
- "save options to the current user's pydistutils.cfg file"),
- ('filename=', 'f',
- "configuration file to use (default=setup.cfg)"),
- ]
-
- boolean_options = [
- 'global-config', 'user-config',
- ]
-
- def initialize_options(self):
- self.global_config = None
- self.user_config = None
- self.filename = None
-
- def finalize_options(self):
- filenames = []
- if self.global_config:
- filenames.append(config_file('global'))
- if self.user_config:
- filenames.append(config_file('user'))
- if self.filename is not None:
- filenames.append(self.filename)
- if not filenames:
- filenames.append(config_file('local'))
- if len(filenames) > 1:
- raise DistutilsOptionError(
- "Must specify only one configuration file option",
- filenames
- )
- self.filename, = filenames
-
-
-class setopt(option_base):
- """Save command-line options to a file"""
-
- description = "set an option in setup.cfg or another config file"
-
- user_options = [
- ('command=', 'c', 'command to set an option for'),
- ('option=', 'o', 'option to set'),
- ('set-value=', 's', 'value of the option'),
- ('remove', 'r', 'remove (unset) the value'),
- ] + option_base.user_options
-
- boolean_options = option_base.boolean_options + ['remove']
-
- def initialize_options(self):
- option_base.initialize_options(self)
- self.command = None
- self.option = None
- self.set_value = None
- self.remove = None
-
- def finalize_options(self):
- option_base.finalize_options(self)
- if self.command is None or self.option is None:
- raise DistutilsOptionError("Must specify --command *and* --option")
- if self.set_value is None and not self.remove:
- raise DistutilsOptionError("Must specify --set-value or --remove")
-
- def run(self):
- edit_config(
- self.filename, {
- self.command: {self.option.replace('-', '_'): self.set_value}
- },
- self.dry_run
- )
+
+from setuptools import Command
+
+__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
+
+
+def config_file(kind="local"):
+ """Get the filename of the distutils, local, global, or per-user config
+
+ `kind` must be one of "local", "global", or "user"
+ """
+ if kind == 'local':
+ return 'setup.cfg'
+ if kind == 'global':
+ return os.path.join(
+ os.path.dirname(distutils.__file__), 'distutils.cfg'
+ )
+ if kind == 'user':
+ dot = os.name == 'posix' and '.' or ''
+ return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
+ raise ValueError(
+ "config_file() type must be 'local', 'global', or 'user'", kind
+ )
+
+
+def edit_config(filename, settings, dry_run=False):
+ """Edit a configuration file to include `settings`
+
+ `settings` is a dictionary of dictionaries or ``None`` values, keyed by
+ command/section name. A ``None`` value means to delete the entire section,
+ while a dictionary lists settings to be changed or deleted in that section.
+ A setting of ``None`` means to delete that setting.
+ """
+ log.debug("Reading configuration from %s", filename)
+ opts = configparser.RawConfigParser()
+ opts.read([filename])
+ for section, options in settings.items():
+ if options is None:
+ log.info("Deleting section [%s] from %s", section, filename)
+ opts.remove_section(section)
+ else:
+ if not opts.has_section(section):
+ log.debug("Adding new section [%s] to %s", section, filename)
+ opts.add_section(section)
+ for option, value in options.items():
+ if value is None:
+ log.debug(
+ "Deleting %s.%s from %s",
+ section, option, filename
+ )
+ opts.remove_option(section, option)
+ if not opts.options(section):
+ log.info("Deleting empty [%s] section from %s",
+ section, filename)
+ opts.remove_section(section)
+ else:
+ log.debug(
+ "Setting %s.%s to %r in %s",
+ section, option, value, filename
+ )
+ opts.set(section, option, value)
+
+ log.info("Writing %s", filename)
+ if not dry_run:
+ with open(filename, 'w') as f:
+ opts.write(f)
+
+
+class option_base(Command):
+ """Abstract base class for commands that mess with config files"""
+
+ user_options = [
+ ('global-config', 'g',
+ "save options to the site-wide distutils.cfg file"),
+ ('user-config', 'u',
+ "save options to the current user's pydistutils.cfg file"),
+ ('filename=', 'f',
+ "configuration file to use (default=setup.cfg)"),
+ ]
+
+ boolean_options = [
+ 'global-config', 'user-config',
+ ]
+
+ def initialize_options(self):
+ self.global_config = None
+ self.user_config = None
+ self.filename = None
+
+ def finalize_options(self):
+ filenames = []
+ if self.global_config:
+ filenames.append(config_file('global'))
+ if self.user_config:
+ filenames.append(config_file('user'))
+ if self.filename is not None:
+ filenames.append(self.filename)
+ if not filenames:
+ filenames.append(config_file('local'))
+ if len(filenames) > 1:
+ raise DistutilsOptionError(
+ "Must specify only one configuration file option",
+ filenames
+ )
+ self.filename, = filenames
+
+
+class setopt(option_base):
+ """Save command-line options to a file"""
+
+ description = "set an option in setup.cfg or another config file"
+
+ user_options = [
+ ('command=', 'c', 'command to set an option for'),
+ ('option=', 'o', 'option to set'),
+ ('set-value=', 's', 'value of the option'),
+ ('remove', 'r', 'remove (unset) the value'),
+ ] + option_base.user_options
+
+ boolean_options = option_base.boolean_options + ['remove']
+
+ def initialize_options(self):
+ option_base.initialize_options(self)
+ self.command = None
+ self.option = None
+ self.set_value = None
+ self.remove = None
+
+ def finalize_options(self):
+ option_base.finalize_options(self)
+ if self.command is None or self.option is None:
+ raise DistutilsOptionError("Must specify --command *and* --option")
+ if self.set_value is None and not self.remove:
+ raise DistutilsOptionError("Must specify --set-value or --remove")
+
+ def run(self):
+ edit_config(
+ self.filename, {
+ self.command: {self.option.replace('-', '_'): self.set_value}
+ },
+ self.dry_run
+ )
diff --git a/contrib/python/setuptools/py2/setuptools/command/test.py b/contrib/python/setuptools/py2/setuptools/command/test.py
index dfccc90a8d..f6470e9c34 100644
--- a/contrib/python/setuptools/py2/setuptools/command/test.py
+++ b/contrib/python/setuptools/py2/setuptools/command/test.py
@@ -6,121 +6,121 @@ import itertools
import unittest
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils import log
-from unittest import TestLoader
-
+from unittest import TestLoader
+
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter
-
-from pkg_resources import (resource_listdir, resource_exists, normalize_path,
+
+from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages, evaluate_marker,
- add_activation_listener, require, EntryPoint)
-from setuptools import Command
+ add_activation_listener, require, EntryPoint)
+from setuptools import Command
from .build_py import _unique_everseen
-
+
__metaclass__ = type
-
-class ScanningLoader(TestLoader):
+
+class ScanningLoader(TestLoader):
def __init__(self):
TestLoader.__init__(self)
self._visited = set()
- def loadTestsFromModule(self, module, pattern=None):
- """Return a suite of all tests cases contained in the given module
-
- If the module is a package, load tests from all the modules in it.
- If the module has an ``additional_tests`` function, call it and add
- the return value to the tests.
- """
+ def loadTestsFromModule(self, module, pattern=None):
+ """Return a suite of all tests cases contained in the given module
+
+ If the module is a package, load tests from all the modules in it.
+ If the module has an ``additional_tests`` function, call it and add
+ the return value to the tests.
+ """
if module in self._visited:
return None
self._visited.add(module)
- tests = []
- tests.append(TestLoader.loadTestsFromModule(self, module))
-
- if hasattr(module, "additional_tests"):
- tests.append(module.additional_tests())
-
- if hasattr(module, '__path__'):
- for file in resource_listdir(module.__name__, ''):
- if file.endswith('.py') and file != '__init__.py':
- submodule = module.__name__ + '.' + file[:-3]
- else:
- if resource_exists(module.__name__, file + '/__init__.py'):
- submodule = module.__name__ + '.' + file
- else:
- continue
- tests.append(self.loadTestsFromName(submodule))
-
- if len(tests) != 1:
- return self.suiteClass(tests)
- else:
- return tests[0] # don't create a nested suite for only one return
-
-
-# adapted from jaraco.classes.properties:NonDataProperty
+ tests = []
+ tests.append(TestLoader.loadTestsFromModule(self, module))
+
+ if hasattr(module, "additional_tests"):
+ tests.append(module.additional_tests())
+
+ if hasattr(module, '__path__'):
+ for file in resource_listdir(module.__name__, ''):
+ if file.endswith('.py') and file != '__init__.py':
+ submodule = module.__name__ + '.' + file[:-3]
+ else:
+ if resource_exists(module.__name__, file + '/__init__.py'):
+ submodule = module.__name__ + '.' + file
+ else:
+ continue
+ tests.append(self.loadTestsFromName(submodule))
+
+ if len(tests) != 1:
+ return self.suiteClass(tests)
+ else:
+ return tests[0] # don't create a nested suite for only one return
+
+
+# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty:
- def __init__(self, fget):
- self.fget = fget
-
- def __get__(self, obj, objtype=None):
- if obj is None:
- return self
- return self.fget(obj)
-
-
-class test(Command):
- """Command to run unit tests after in-place build"""
-
+ def __init__(self, fget):
+ self.fget = fget
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+ return self.fget(obj)
+
+
+class test(Command):
+ """Command to run unit tests after in-place build"""
+
description = "run unit tests after in-place build (deprecated)"
-
- user_options = [
- ('test-module=', 'm', "Run 'test_suite' in specified module"),
- ('test-suite=', 's',
+
+ user_options = [
+ ('test-module=', 'm', "Run 'test_suite' in specified module"),
+ ('test-suite=', 's',
"Run single test, case or suite (e.g. 'module.test_suite')"),
- ('test-runner=', 'r', "Test runner to use"),
- ]
-
- def initialize_options(self):
- self.test_suite = None
- self.test_module = None
- self.test_loader = None
- self.test_runner = None
-
- def finalize_options(self):
-
- if self.test_suite and self.test_module:
- msg = "You may specify a module or a suite, but not both"
- raise DistutilsOptionError(msg)
-
- if self.test_suite is None:
- if self.test_module is None:
- self.test_suite = self.distribution.test_suite
- else:
- self.test_suite = self.test_module + ".test_suite"
-
- if self.test_loader is None:
- self.test_loader = getattr(self.distribution, 'test_loader', None)
- if self.test_loader is None:
- self.test_loader = "setuptools.command.test:ScanningLoader"
- if self.test_runner is None:
- self.test_runner = getattr(self.distribution, 'test_runner', None)
-
- @NonDataProperty
- def test_args(self):
- return list(self._test_args())
-
- def _test_args(self):
+ ('test-runner=', 'r', "Test runner to use"),
+ ]
+
+ def initialize_options(self):
+ self.test_suite = None
+ self.test_module = None
+ self.test_loader = None
+ self.test_runner = None
+
+ def finalize_options(self):
+
+ if self.test_suite and self.test_module:
+ msg = "You may specify a module or a suite, but not both"
+ raise DistutilsOptionError(msg)
+
+ if self.test_suite is None:
+ if self.test_module is None:
+ self.test_suite = self.distribution.test_suite
+ else:
+ self.test_suite = self.test_module + ".test_suite"
+
+ if self.test_loader is None:
+ self.test_loader = getattr(self.distribution, 'test_loader', None)
+ if self.test_loader is None:
+ self.test_loader = "setuptools.command.test:ScanningLoader"
+ if self.test_runner is None:
+ self.test_runner = getattr(self.distribution, 'test_runner', None)
+
+ @NonDataProperty
+ def test_args(self):
+ return list(self._test_args())
+
+ def _test_args(self):
if not self.test_suite and sys.version_info >= (2, 7):
yield 'discover'
- if self.verbose:
- yield '--verbose'
- if self.test_suite:
- yield self.test_suite
-
- def with_project_on_sys_path(self, func):
+ if self.verbose:
+ yield '--verbose'
+ if self.test_suite:
+ yield self.test_suite
+
+ def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
@@ -130,49 +130,49 @@ class test(Command):
@contextlib.contextmanager
def project_on_sys_path(self, include_dists=[]):
with_2to3 = not six.PY2 and getattr(self.distribution, 'use_2to3', False)
-
- if with_2to3:
- # If we run 2to3 we can not do this inplace:
-
- # Ensure metadata is up-to-date
- self.reinitialize_command('build_py', inplace=0)
- self.run_command('build_py')
- bpy_cmd = self.get_finalized_command("build_py")
- build_path = normalize_path(bpy_cmd.build_lib)
-
- # Build extensions
- self.reinitialize_command('egg_info', egg_base=build_path)
- self.run_command('egg_info')
-
- self.reinitialize_command('build_ext', inplace=0)
- self.run_command('build_ext')
- else:
- # Without 2to3 inplace works fine:
- self.run_command('egg_info')
-
- # Build extensions in-place
- self.reinitialize_command('build_ext', inplace=1)
- self.run_command('build_ext')
-
- ei_cmd = self.get_finalized_command("egg_info")
-
- old_path = sys.path[:]
- old_modules = sys.modules.copy()
-
- try:
+
+ if with_2to3:
+ # If we run 2to3 we can not do this inplace:
+
+ # Ensure metadata is up-to-date
+ self.reinitialize_command('build_py', inplace=0)
+ self.run_command('build_py')
+ bpy_cmd = self.get_finalized_command("build_py")
+ build_path = normalize_path(bpy_cmd.build_lib)
+
+ # Build extensions
+ self.reinitialize_command('egg_info', egg_base=build_path)
+ self.run_command('egg_info')
+
+ self.reinitialize_command('build_ext', inplace=0)
+ self.run_command('build_ext')
+ else:
+ # Without 2to3 inplace works fine:
+ self.run_command('egg_info')
+
+ # Build extensions in-place
+ self.reinitialize_command('build_ext', inplace=1)
+ self.run_command('build_ext')
+
+ ei_cmd = self.get_finalized_command("egg_info")
+
+ old_path = sys.path[:]
+ old_modules = sys.modules.copy()
+
+ try:
project_path = normalize_path(ei_cmd.egg_base)
sys.path.insert(0, project_path)
- working_set.__init__()
- add_activation_listener(lambda dist: dist.activate())
- require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
+ working_set.__init__()
+ add_activation_listener(lambda dist: dist.activate())
+ require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
with self.paths_on_pythonpath([project_path]):
yield
- finally:
- sys.path[:] = old_path
- sys.modules.clear()
- sys.modules.update(old_modules)
- working_set.__init__()
-
+ finally:
+ sys.path[:] = old_path
+ sys.modules.clear()
+ sys.modules.update(old_modules)
+ working_set.__init__()
+
@staticmethod
@contextlib.contextmanager
def paths_on_pythonpath(paths):
@@ -213,7 +213,7 @@ class test(Command):
)
return itertools.chain(ir_d, tr_d, er_d)
- def run(self):
+ def run(self):
self.announce(
"WARNING: Testing via this command is deprecated and will be "
"removed in a future version. Users looking for a generic test "
@@ -223,12 +223,12 @@ class test(Command):
)
installed_dists = self.install_dists(self.distribution)
-
- cmd = ' '.join(self._argv)
- if self.dry_run:
- self.announce('skipping "%s" (dry run)' % cmd)
+
+ cmd = ' '.join(self._argv)
+ if self.dry_run:
+ self.announce('skipping "%s" (dry run)' % cmd)
return
-
+
self.announce('running "%s"' % cmd)
paths = map(operator.attrgetter('location'), installed_dists)
@@ -236,44 +236,44 @@ class test(Command):
with self.project_on_sys_path():
self.run_tests()
- def run_tests(self):
- # Purge modules under test from sys.modules. The test loader will
- # re-import them from the build location. Required when 2to3 is used
- # with namespace packages.
+ def run_tests(self):
+ # Purge modules under test from sys.modules. The test loader will
+ # re-import them from the build location. Required when 2to3 is used
+ # with namespace packages.
if not six.PY2 and getattr(self.distribution, 'use_2to3', False):
- module = self.test_suite.split('.')[0]
- if module in _namespace_packages:
- del_modules = []
- if module in sys.modules:
- del_modules.append(module)
- module += '.'
- for name in sys.modules:
- if name.startswith(module):
- del_modules.append(name)
- list(map(sys.modules.__delitem__, del_modules))
-
+ module = self.test_suite.split('.')[0]
+ if module in _namespace_packages:
+ del_modules = []
+ if module in sys.modules:
+ del_modules.append(module)
+ module += '.'
+ for name in sys.modules:
+ if name.startswith(module):
+ del_modules.append(name)
+ list(map(sys.modules.__delitem__, del_modules))
+
test = unittest.main(
- None, None, self._argv,
- testLoader=self._resolve_as_ep(self.test_loader),
- testRunner=self._resolve_as_ep(self.test_runner),
+ None, None, self._argv,
+ testLoader=self._resolve_as_ep(self.test_loader),
+ testRunner=self._resolve_as_ep(self.test_runner),
exit=False,
- )
+ )
if not test.result.wasSuccessful():
msg = 'Test failed: %s' % test.result
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
-
- @property
- def _argv(self):
- return ['unittest'] + self.test_args
-
- @staticmethod
- def _resolve_as_ep(val):
- """
- Load the indicated attribute value, called, as a as if it were
- specified as an entry point.
- """
- if val is None:
- return
- parsed = EntryPoint.parse("x=" + val)
- return parsed.resolve()()
+
+ @property
+ def _argv(self):
+ return ['unittest'] + self.test_args
+
+ @staticmethod
+ def _resolve_as_ep(val):
+ """
+ Load the indicated attribute value, called, as a as if it were
+ specified as an entry point.
+ """
+ if val is None:
+ return
+ parsed = EntryPoint.parse("x=" + val)
+ return parsed.resolve()()
diff --git a/contrib/python/setuptools/py2/setuptools/command/upload_docs.py b/contrib/python/setuptools/py2/setuptools/command/upload_docs.py
index ca84dfc322..130a0cb6c9 100644
--- a/contrib/python/setuptools/py2/setuptools/command/upload_docs.py
+++ b/contrib/python/setuptools/py2/setuptools/command/upload_docs.py
@@ -1,107 +1,107 @@
-# -*- coding: utf-8 -*-
-"""upload_docs
-
-Implements a Distutils 'upload_docs' subcommand (upload documentation to
-PyPI's pythonhosted.org).
-"""
-
-from base64 import standard_b64encode
-from distutils import log
-from distutils.errors import DistutilsOptionError
-import os
-import socket
-import zipfile
-import tempfile
-import shutil
+# -*- coding: utf-8 -*-
+"""upload_docs
+
+Implements a Distutils 'upload_docs' subcommand (upload documentation to
+PyPI's pythonhosted.org).
+"""
+
+from base64 import standard_b64encode
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
+import socket
+import zipfile
+import tempfile
+import shutil
import itertools
import functools
-
+
from setuptools.extern import six
from setuptools.extern.six.moves import http_client, urllib
-
-from pkg_resources import iter_entry_points
+
+from pkg_resources import iter_entry_points
from .upload import upload
-
-
+
+
def _encode(s):
errors = 'strict' if six.PY2 else 'surrogateescape'
return s.encode('utf-8', errors)
-
-
+
+
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
-
- description = 'Upload documentation to PyPI'
-
- user_options = [
- ('repository=', 'r',
- "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
- ('show-response', None,
- 'display full response text from server'),
- ('upload-dir=', None, 'directory to upload'),
- ]
- boolean_options = upload.boolean_options
-
- def has_sphinx(self):
- if self.upload_dir is None:
- for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
- return True
-
- sub_commands = [('build_sphinx', has_sphinx)]
-
- def initialize_options(self):
- upload.initialize_options(self)
- self.upload_dir = None
- self.target_dir = None
-
- def finalize_options(self):
- upload.finalize_options(self)
- if self.upload_dir is None:
- if self.has_sphinx():
- build_sphinx = self.get_finalized_command('build_sphinx')
- self.target_dir = build_sphinx.builder_target_dir
- else:
- build = self.get_finalized_command('build')
- self.target_dir = os.path.join(build.build_base, 'docs')
- else:
- self.ensure_dirname('upload_dir')
- self.target_dir = self.upload_dir
+
+ description = 'Upload documentation to PyPI'
+
+ user_options = [
+ ('repository=', 'r',
+ "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
+ ('show-response', None,
+ 'display full response text from server'),
+ ('upload-dir=', None, 'directory to upload'),
+ ]
+ boolean_options = upload.boolean_options
+
+ def has_sphinx(self):
+ if self.upload_dir is None:
+ for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
+ return True
+
+ sub_commands = [('build_sphinx', has_sphinx)]
+
+ def initialize_options(self):
+ upload.initialize_options(self)
+ self.upload_dir = None
+ self.target_dir = None
+
+ def finalize_options(self):
+ upload.finalize_options(self)
+ if self.upload_dir is None:
+ if self.has_sphinx():
+ build_sphinx = self.get_finalized_command('build_sphinx')
+ self.target_dir = build_sphinx.builder_target_dir
+ else:
+ build = self.get_finalized_command('build')
+ self.target_dir = os.path.join(build.build_base, 'docs')
+ else:
+ self.ensure_dirname('upload_dir')
+ self.target_dir = self.upload_dir
if 'pypi.python.org' in self.repository:
log.warn("Upload_docs command is deprecated. Use RTD instead.")
- self.announce('Using upload directory %s' % self.target_dir)
-
- def create_zipfile(self, filename):
- zip_file = zipfile.ZipFile(filename, "w")
- try:
- self.mkpath(self.target_dir) # just in case
- for root, dirs, files in os.walk(self.target_dir):
- if root == self.target_dir and not files:
+ self.announce('Using upload directory %s' % self.target_dir)
+
+ def create_zipfile(self, filename):
+ zip_file = zipfile.ZipFile(filename, "w")
+ try:
+ self.mkpath(self.target_dir) # just in case
+ for root, dirs, files in os.walk(self.target_dir):
+ if root == self.target_dir and not files:
tmpl = "no files found in upload directory '%s'"
raise DistutilsOptionError(tmpl % self.target_dir)
- for name in files:
- full = os.path.join(root, name)
- relative = root[len(self.target_dir):].lstrip(os.path.sep)
- dest = os.path.join(relative, name)
- zip_file.write(full, dest)
- finally:
- zip_file.close()
-
- def run(self):
- # Run sub commands
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- tmp_dir = tempfile.mkdtemp()
- name = self.distribution.metadata.get_name()
- zip_file = os.path.join(tmp_dir, "%s.zip" % name)
- try:
- self.create_zipfile(zip_file)
- self.upload_file(zip_file)
- finally:
- shutil.rmtree(tmp_dir)
-
+ for name in files:
+ full = os.path.join(root, name)
+ relative = root[len(self.target_dir):].lstrip(os.path.sep)
+ dest = os.path.join(relative, name)
+ zip_file.write(full, dest)
+ finally:
+ zip_file.close()
+
+ def run(self):
+ # Run sub commands
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ tmp_dir = tempfile.mkdtemp()
+ name = self.distribution.metadata.get_name()
+ zip_file = os.path.join(tmp_dir, "%s.zip" % name)
+ try:
+ self.create_zipfile(zip_file)
+ self.upload_file(zip_file)
+ finally:
+ shutil.rmtree(tmp_dir)
+
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
@@ -141,66 +141,66 @@ class upload_docs(upload):
content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii')
return b''.join(body_items), content_type
- def upload_file(self, filename):
+ def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
- meta = self.distribution.metadata
- data = {
- ':action': 'doc_upload',
- 'name': meta.get_name(),
- 'content': (os.path.basename(filename), content),
- }
- # set up the authentication
+ meta = self.distribution.metadata
+ data = {
+ ':action': 'doc_upload',
+ 'name': meta.get_name(),
+ 'content': (os.path.basename(filename), content),
+ }
+ # set up the authentication
credentials = _encode(self.username + ':' + self.password)
- credentials = standard_b64encode(credentials)
+ credentials = standard_b64encode(credentials)
if not six.PY2:
- credentials = credentials.decode('ascii')
- auth = "Basic " + credentials
-
+ credentials = credentials.decode('ascii')
+ auth = "Basic " + credentials
+
body, ct = self._build_multipart(data)
-
+
msg = "Submitting documentation to %s" % (self.repository)
self.announce(msg, log.INFO)
-
- # build the Request
- # We can't use urllib2 since we need to send the Basic
- # auth right with the first request
- schema, netloc, url, params, query, fragments = \
- urllib.parse.urlparse(self.repository)
- assert not params and not query and not fragments
- if schema == 'http':
- conn = http_client.HTTPConnection(netloc)
- elif schema == 'https':
- conn = http_client.HTTPSConnection(netloc)
- else:
- raise AssertionError("unsupported schema " + schema)
-
- data = ''
- try:
- conn.connect()
- conn.putrequest("POST", url)
+
+ # build the Request
+ # We can't use urllib2 since we need to send the Basic
+ # auth right with the first request
+ schema, netloc, url, params, query, fragments = \
+ urllib.parse.urlparse(self.repository)
+ assert not params and not query and not fragments
+ if schema == 'http':
+ conn = http_client.HTTPConnection(netloc)
+ elif schema == 'https':
+ conn = http_client.HTTPSConnection(netloc)
+ else:
+ raise AssertionError("unsupported schema " + schema)
+
+ data = ''
+ try:
+ conn.connect()
+ conn.putrequest("POST", url)
content_type = ct
- conn.putheader('Content-type', content_type)
- conn.putheader('Content-length', str(len(body)))
- conn.putheader('Authorization', auth)
- conn.endheaders()
- conn.send(body)
- except socket.error as e:
- self.announce(str(e), log.ERROR)
- return
-
- r = conn.getresponse()
- if r.status == 200:
+ conn.putheader('Content-type', content_type)
+ conn.putheader('Content-length', str(len(body)))
+ conn.putheader('Authorization', auth)
+ conn.endheaders()
+ conn.send(body)
+ except socket.error as e:
+ self.announce(str(e), log.ERROR)
+ return
+
+ r = conn.getresponse()
+ if r.status == 200:
msg = 'Server response (%s): %s' % (r.status, r.reason)
self.announce(msg, log.INFO)
- elif r.status == 301:
- location = r.getheader('Location')
- if location is None:
- location = 'https://pythonhosted.org/%s/' % meta.get_name()
+ elif r.status == 301:
+ location = r.getheader('Location')
+ if location is None:
+ location = 'https://pythonhosted.org/%s/' % meta.get_name()
msg = 'Upload successful. Visit %s' % location
self.announce(msg, log.INFO)
- else:
+ else:
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
self.announce(msg, log.ERROR)
- if self.show_response:
- print('-' * 75, r.read(), '-' * 75)
+ if self.show_response:
+ print('-' * 75, r.read(), '-' * 75)
diff --git a/contrib/python/setuptools/py2/setuptools/depends.py b/contrib/python/setuptools/py2/setuptools/depends.py
index bc2b767ff1..a37675cbd9 100644
--- a/contrib/python/setuptools/py2/setuptools/depends.py
+++ b/contrib/python/setuptools/py2/setuptools/depends.py
@@ -1,87 +1,87 @@
-import sys
-import marshal
+import sys
+import marshal
import contextlib
from distutils.version import StrictVersion
-
+
from .py33compat import Bytecode
-
+
from .py27compat import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
from . import py27compat
-__all__ = [
- 'Require', 'find_module', 'get_module_constant', 'extract_constant'
-]
-
+__all__ = [
+ 'Require', 'find_module', 'get_module_constant', 'extract_constant'
+]
+
+
+class Require:
+ """A prerequisite to building or installing a distribution"""
-class Require:
- """A prerequisite to building or installing a distribution"""
-
def __init__(
self, name, requested_version, module, homepage='',
- attribute=None, format=None):
-
- if format is None and requested_version is not None:
- format = StrictVersion
-
- if format is not None:
- requested_version = format(requested_version)
- if attribute is None:
- attribute = '__version__'
-
- self.__dict__.update(locals())
- del self.self
-
- def full_name(self):
- """Return full package/distribution name, w/version"""
- if self.requested_version is not None:
+ attribute=None, format=None):
+
+ if format is None and requested_version is not None:
+ format = StrictVersion
+
+ if format is not None:
+ requested_version = format(requested_version)
+ if attribute is None:
+ attribute = '__version__'
+
+ self.__dict__.update(locals())
+ del self.self
+
+ def full_name(self):
+ """Return full package/distribution name, w/version"""
+ if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
- return self.name
-
- def version_ok(self, version):
- """Is 'version' sufficiently up-to-date?"""
- return self.attribute is None or self.format is None or \
- str(version) != "unknown" and version >= self.requested_version
-
- def get_version(self, paths=None, default="unknown"):
- """Get version number of installed module, 'None', or 'default'
-
- Search 'paths' for module. If not found, return 'None'. If found,
- return the extracted version attribute, or 'default' if no version
- attribute was specified, or the value cannot be determined without
- importing the module. The version is formatted according to the
- requirement's version format (if any), unless it is 'None' or the
- supplied 'default'.
- """
-
- if self.attribute is None:
- try:
+ return self.name
+
+ def version_ok(self, version):
+ """Is 'version' sufficiently up-to-date?"""
+ return self.attribute is None or self.format is None or \
+ str(version) != "unknown" and version >= self.requested_version
+
+ def get_version(self, paths=None, default="unknown"):
+ """Get version number of installed module, 'None', or 'default'
+
+ Search 'paths' for module. If not found, return 'None'. If found,
+ return the extracted version attribute, or 'default' if no version
+ attribute was specified, or the value cannot be determined without
+ importing the module. The version is formatted according to the
+ requirement's version format (if any), unless it is 'None' or the
+ supplied 'default'.
+ """
+
+ if self.attribute is None:
+ try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
- return default
- except ImportError:
- return None
-
- v = get_module_constant(self.module, self.attribute, default, paths)
-
- if v is not None and v is not default and self.format is not None:
- return self.format(v)
-
- return v
-
- def is_present(self, paths=None):
- """Return true if dependency is present on 'paths'"""
- return self.get_version(paths) is not None
-
- def is_current(self, paths=None):
- """Return true if dependency is present and up-to-date on 'paths'"""
- version = self.get_version(paths)
- if version is None:
- return False
- return self.version_ok(version)
-
-
+ return default
+ except ImportError:
+ return None
+
+ v = get_module_constant(self.module, self.attribute, default, paths)
+
+ if v is not None and v is not default and self.format is not None:
+ return self.format(v)
+
+ return v
+
+ def is_present(self, paths=None):
+ """Return true if dependency is present on 'paths'"""
+ return self.get_version(paths) is not None
+
+ def is_current(self, paths=None):
+ """Return true if dependency is present and up-to-date on 'paths'"""
+ version = self.get_version(paths)
+ if version is None:
+ return False
+ return self.version_ok(version)
+
+
def maybe_close(f):
@contextlib.contextmanager
def empty():
@@ -89,88 +89,88 @@ def maybe_close(f):
return
if not f:
return empty()
-
+
return contextlib.closing(f)
-
-
-def get_module_constant(module, symbol, default=-1, paths=None):
- """Find 'module' by searching 'paths', and extract 'symbol'
-
- Return 'None' if 'module' does not exist on 'paths', or it does not define
- 'symbol'. If the module defines 'symbol' as a constant, return the
- constant. Otherwise, return 'default'."""
-
- try:
+
+
+def get_module_constant(module, symbol, default=-1, paths=None):
+ """Find 'module' by searching 'paths', and extract 'symbol'
+
+ Return 'None' if 'module' does not exist on 'paths', or it does not define
+ 'symbol'. If the module defines 'symbol' as a constant, return the
+ constant. Otherwise, return 'default'."""
+
+ try:
f, path, (suffix, mode, kind) = info = find_module(module, paths)
- except ImportError:
- # Module doesn't exist
- return None
-
+ except ImportError:
+ # Module doesn't exist
+ return None
+
with maybe_close(f):
if kind == PY_COMPILED:
f.read(8) # skip magic & date
- code = marshal.load(f)
+ code = marshal.load(f)
elif kind == PY_FROZEN:
code = py27compat.get_frozen_object(module, paths)
elif kind == PY_SOURCE:
- code = compile(f.read(), path, 'exec')
- else:
- # Not something we can parse; we'll have to import it. :(
+ code = compile(f.read(), path, 'exec')
+ else:
+ # Not something we can parse; we'll have to import it. :(
imported = py27compat.get_module(module, paths, info)
return getattr(imported, symbol, None)
-
- return extract_constant(code, symbol, default)
-
-
-def extract_constant(code, symbol, default=-1):
- """Extract the constant value of 'symbol' from 'code'
-
- If the name 'symbol' is bound to a constant value by the Python code
- object 'code', return that value. If 'symbol' is bound to an expression,
- return 'default'. Otherwise, return 'None'.
-
- Return value is based on the first assignment to 'symbol'. 'symbol' must
- be a global, or at least a non-"fast" local in the code block. That is,
- only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
- must be present in 'code.co_names'.
- """
- if symbol not in code.co_names:
+
+ return extract_constant(code, symbol, default)
+
+
+def extract_constant(code, symbol, default=-1):
+ """Extract the constant value of 'symbol' from 'code'
+
+ If the name 'symbol' is bound to a constant value by the Python code
+ object 'code', return that value. If 'symbol' is bound to an expression,
+ return 'default'. Otherwise, return 'None'.
+
+ Return value is based on the first assignment to 'symbol'. 'symbol' must
+ be a global, or at least a non-"fast" local in the code block. That is,
+ only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
+ must be present in 'code.co_names'.
+ """
+ if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
- return None
-
- name_idx = list(code.co_names).index(symbol)
-
- STORE_NAME = 90
- STORE_GLOBAL = 97
- LOAD_CONST = 100
-
- const = default
-
+ return None
+
+ name_idx = list(code.co_names).index(symbol)
+
+ STORE_NAME = 90
+ STORE_GLOBAL = 97
+ LOAD_CONST = 100
+
+ const = default
+
for byte_code in Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
-
+
if op == LOAD_CONST:
- const = code.co_consts[arg]
+ const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
- return const
- else:
- const = default
-
-
-def _update_globals():
- """
- Patch the globals to remove the objects not available on some platforms.
-
- XXX it'd be better to test assertions about bytecode instead.
- """
-
- if not sys.platform.startswith('java') and sys.platform != 'cli':
- return
- incompatible = 'extract_constant', 'get_module_constant'
- for name in incompatible:
- del globals()[name]
- __all__.remove(name)
-
-
-_update_globals()
+ return const
+ else:
+ const = default
+
+
+def _update_globals():
+ """
+ Patch the globals to remove the objects not available on some platforms.
+
+ XXX it'd be better to test assertions about bytecode instead.
+ """
+
+ if not sys.platform.startswith('java') and sys.platform != 'cli':
+ return
+ incompatible = 'extract_constant', 'get_module_constant'
+ for name in incompatible:
+ del globals()[name]
+ __all__.remove(name)
+
+
+_update_globals()
diff --git a/contrib/python/setuptools/py2/setuptools/dist.py b/contrib/python/setuptools/py2/setuptools/dist.py
index c381b4ac54..f6453a0878 100644
--- a/contrib/python/setuptools/py2/setuptools/dist.py
+++ b/contrib/python/setuptools/py2/setuptools/dist.py
@@ -1,16 +1,16 @@
# -*- coding: utf-8 -*-
-__all__ = ['Distribution']
-
+__all__ = ['Distribution']
+
import io
import sys
-import re
-import os
-import warnings
-import numbers
-import distutils.log
-import distutils.core
-import distutils.cmd
-import distutils.dist
+import re
+import os
+import warnings
+import numbers
+import distutils.log
+import distutils.core
+import distutils.cmd
+import distutils.dist
from distutils.util import strtobool
from distutils.debug import DEBUG
from distutils.fancy_getopt import translate_longopt
@@ -24,28 +24,28 @@ from distutils.errors import (
)
from distutils.util import rfc822_escape
from distutils.version import StrictVersion
-
+
from setuptools.extern import six
from setuptools.extern import packaging
from setuptools.extern import ordered_set
from setuptools.extern.six.moves import map, filter, filterfalse
-
+
from . import SetuptoolsDeprecationWarning
-from setuptools.depends import Require
-from setuptools import windows_support
+from setuptools.depends import Require
+from setuptools import windows_support
from setuptools.monkey import get_unpatched
from setuptools.config import parse_configuration
-import pkg_resources
-
+import pkg_resources
+
__import__('setuptools.extern.packaging.specifiers')
__import__('setuptools.extern.packaging.version')
-
-def _get_unpatched(cls):
+
+def _get_unpatched(cls):
warnings.warn("Do not call this function", DistDeprecationWarning)
return get_unpatched(cls)
-
+
def get_metadata_version(self):
mv = getattr(self, 'metadata_version', None)
@@ -125,9 +125,9 @@ def read_pkg_file(self, file):
# Based on Python 3.5 version
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
- """
+ """
version = self.get_metadata_version()
-
+
if six.PY2:
def write_field(key, value):
file.write("%s: %s\n" % (key, self._encode_field(value)))
@@ -163,7 +163,7 @@ def write_pkg_file(self, file):
write_field('Download-URL', self.download_url)
for project_url in self.project_urls.items():
write_field('Project-URL', '%s, %s' % project_url)
-
+
long_desc = rfc822_escape(self.get_long_description())
write_field('Description', long_desc)
@@ -198,64 +198,64 @@ def write_pkg_file(self, file):
for extra in self.provides_extras:
write_field('Provides-Extra', extra)
-
-sequence = tuple, list
-
-def check_importable(dist, attr, value):
- try:
+sequence = tuple, list
+
+
+def check_importable(dist, attr, value):
+ try:
ep = pkg_resources.EntryPoint.parse('x=' + value)
- assert not ep.extras
+ assert not ep.extras
except (TypeError, ValueError, AttributeError, AssertionError):
- raise DistutilsSetupError(
- "%r must be importable 'module:attrs' string (got %r)"
+ raise DistutilsSetupError(
+ "%r must be importable 'module:attrs' string (got %r)"
% (attr, value)
- )
-
-
-def assert_string_list(dist, attr, value):
+ )
+
+
+def assert_string_list(dist, attr, value):
"""Verify that value is a string list"""
- try:
+ try:
# verify that value is a list or tuple to exclude unordered
# or single-use iterables
assert isinstance(value, (list, tuple))
# verify that elements of value are strings
assert ''.join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError):
- raise DistutilsSetupError(
+ raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
- )
+ )
-def check_nsp(dist, attr, value):
- """Verify that namespace packages are valid"""
+def check_nsp(dist, attr, value):
+ """Verify that namespace packages are valid"""
ns_packages = value
assert_string_list(dist, attr, ns_packages)
for nsp in ns_packages:
- if not dist.has_contents_for(nsp):
- raise DistutilsSetupError(
- "Distribution contains no modules or packages for " +
- "namespace package %r" % nsp
- )
+ if not dist.has_contents_for(nsp):
+ raise DistutilsSetupError(
+ "Distribution contains no modules or packages for " +
+ "namespace package %r" % nsp
+ )
parent, sep, child = nsp.rpartition('.')
if parent and parent not in ns_packages:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
-
-def check_extras(dist, attr, value):
- """Verify that extras_require mapping is valid"""
- try:
+
+def check_extras(dist, attr, value):
+ """Verify that extras_require mapping is valid"""
+ try:
list(itertools.starmap(_check_extra, value.items()))
except (TypeError, ValueError, AttributeError):
- raise DistutilsSetupError(
- "'extras_require' must be a dictionary whose values are "
- "strings or lists of strings containing valid project/version "
- "requirement specifiers."
- )
-
+ raise DistutilsSetupError(
+ "'extras_require' must be a dictionary whose values are "
+ "strings or lists of strings containing valid project/version "
+ "requirement specifiers."
+ )
+
def _check_extra(extra, reqs):
name, sep, marker = extra.partition(':')
@@ -264,26 +264,26 @@ def _check_extra(extra, reqs):
list(pkg_resources.parse_requirements(reqs))
-def assert_bool(dist, attr, value):
- """Verify that value is True, False, 0, or 1"""
- if bool(value) != value:
- tmpl = "{attr!r} must be a boolean value (got {value!r})"
- raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
-
-
-def check_requirements(dist, attr, value):
- """Verify that install_requires is a valid requirements list"""
- try:
- list(pkg_resources.parse_requirements(value))
+def assert_bool(dist, attr, value):
+ """Verify that value is True, False, 0, or 1"""
+ if bool(value) != value:
+ tmpl = "{attr!r} must be a boolean value (got {value!r})"
+ raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
+
+
+def check_requirements(dist, attr, value):
+ """Verify that install_requires is a valid requirements list"""
+ try:
+ list(pkg_resources.parse_requirements(value))
if isinstance(value, (dict, set)):
raise TypeError("Unordered types are not allowed")
- except (TypeError, ValueError) as error:
- tmpl = (
- "{attr!r} must be a string or list of strings "
- "containing valid project/version requirement specifiers; {error}"
- )
- raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
-
+ except (TypeError, ValueError) as error:
+ tmpl = (
+ "{attr!r} must be a string or list of strings "
+ "containing valid project/version requirement specifiers; {error}"
+ )
+ raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
+
def check_specifier(dist, attr, value):
"""Verify that value is a valid version specifier"""
@@ -297,21 +297,21 @@ def check_specifier(dist, attr, value):
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
-def check_entry_points(dist, attr, value):
- """Verify that entry_points map is parseable"""
- try:
- pkg_resources.EntryPoint.parse_map(value)
- except ValueError as e:
- raise DistutilsSetupError(e)
-
+def check_entry_points(dist, attr, value):
+ """Verify that entry_points map is parseable"""
+ try:
+ pkg_resources.EntryPoint.parse_map(value)
+ except ValueError as e:
+ raise DistutilsSetupError(e)
-def check_test_suite(dist, attr, value):
- if not isinstance(value, six.string_types):
- raise DistutilsSetupError("test_suite must be a string")
-
-def check_package_data(dist, attr, value):
- """Verify that value is a dictionary of package names to glob lists"""
+def check_test_suite(dist, attr, value):
+ if not isinstance(value, six.string_types):
+ raise DistutilsSetupError("test_suite must be a string")
+
+
+def check_package_data(dist, attr, value):
+ """Verify that value is a dictionary of package names to glob lists"""
if not isinstance(value, dict):
raise DistutilsSetupError(
"{!r} must be a dictionary mapping package names to lists of "
@@ -323,88 +323,88 @@ def check_package_data(dist, attr, value):
.format(attr, k)
)
assert_string_list(dist, 'values of {!r} dict'.format(attr), v)
-
-
-def check_packages(dist, attr, value):
- for pkgname in value:
- if not re.match(r'\w+(\.\w+)*', pkgname):
- distutils.log.warn(
- "WARNING: %r not a valid package name; please use only "
- ".-separated package names in setup.py", pkgname
- )
-
-
+
+
+def check_packages(dist, attr, value):
+ for pkgname in value:
+ if not re.match(r'\w+(\.\w+)*', pkgname):
+ distutils.log.warn(
+ "WARNING: %r not a valid package name; please use only "
+ ".-separated package names in setup.py", pkgname
+ )
+
+
_Distribution = get_unpatched(distutils.core.Distribution)
class Distribution(_Distribution):
- """Distribution with support for features, tests, and package data
-
- This is an enhanced version of 'distutils.dist.Distribution' that
- effectively adds the following new optional keyword arguments to 'setup()':
-
- 'install_requires' -- a string or sequence of strings specifying project
- versions that the distribution requires when installed, in the format
- used by 'pkg_resources.require()'. They will be installed
- automatically when the package is installed. If you wish to use
- packages that are not available in PyPI, or want to give your users an
- alternate download location, you can add a 'find_links' option to the
- '[easy_install]' section of your project's 'setup.cfg' file, and then
- setuptools will scan the listed web pages for links that satisfy the
- requirements.
-
- 'extras_require' -- a dictionary mapping names of optional "extras" to the
- additional requirement(s) that using those extras incurs. For example,
- this::
-
- extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
-
- indicates that the distribution can optionally provide an extra
- capability called "reST", but it can only be used if docutils and
- reSTedit are installed. If the user installs your package using
- EasyInstall and requests one of your extras, the corresponding
- additional requirements will be installed if needed.
-
- 'features' **deprecated** -- a dictionary mapping option names to
- 'setuptools.Feature'
- objects. Features are a portion of the distribution that can be
- included or excluded based on user options, inter-feature dependencies,
- and availability on the current system. Excluded features are omitted
- from all setup commands, including source and binary distributions, so
- you can create multiple distributions from the same source tree.
- Feature names should be valid Python identifiers, except that they may
- contain the '-' (minus) sign. Features can be included or excluded
- via the command line options '--with-X' and '--without-X', where 'X' is
- the name of the feature. Whether a feature is included by default, and
- whether you are allowed to control this from the command line, is
- determined by the Feature object. See the 'Feature' class for more
- information.
-
- 'test_suite' -- the name of a test suite to run for the 'test' command.
- If the user runs 'python setup.py test', the package will be installed,
- and the named test suite will be run. The format is the same as
- would be used on a 'unittest.py' command line. That is, it is the
- dotted name of an object to import and call to generate a test suite.
-
- 'package_data' -- a dictionary mapping package names to lists of filenames
- or globs to use to find data files contained in the named packages.
- If the dictionary has filenames or globs listed under '""' (the empty
- string), those names will be searched for in every package, in addition
- to any names for the specific package. Data files found using these
- names/globs will be installed along with the package, in the same
- location as the package. Note that globs are allowed to reference
- the contents of non-package subdirectories, as long as you use '/' as
- a path separator. (Globs are automatically converted to
- platform-specific paths at runtime.)
-
- In addition to these new keywords, this class also has several new methods
- for manipulating the distribution's contents. For example, the 'include()'
- and 'exclude()' methods can be thought of as in-place add and subtract
- commands that add or remove packages, modules, extensions, and so on from
- the distribution. They are used by the feature subsystem to configure the
- distribution for the included and excluded features.
- """
-
+ """Distribution with support for features, tests, and package data
+
+ This is an enhanced version of 'distutils.dist.Distribution' that
+ effectively adds the following new optional keyword arguments to 'setup()':
+
+ 'install_requires' -- a string or sequence of strings specifying project
+ versions that the distribution requires when installed, in the format
+ used by 'pkg_resources.require()'. They will be installed
+ automatically when the package is installed. If you wish to use
+ packages that are not available in PyPI, or want to give your users an
+ alternate download location, you can add a 'find_links' option to the
+ '[easy_install]' section of your project's 'setup.cfg' file, and then
+ setuptools will scan the listed web pages for links that satisfy the
+ requirements.
+
+ 'extras_require' -- a dictionary mapping names of optional "extras" to the
+ additional requirement(s) that using those extras incurs. For example,
+ this::
+
+ extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
+
+ indicates that the distribution can optionally provide an extra
+ capability called "reST", but it can only be used if docutils and
+ reSTedit are installed. If the user installs your package using
+ EasyInstall and requests one of your extras, the corresponding
+ additional requirements will be installed if needed.
+
+ 'features' **deprecated** -- a dictionary mapping option names to
+ 'setuptools.Feature'
+ objects. Features are a portion of the distribution that can be
+ included or excluded based on user options, inter-feature dependencies,
+ and availability on the current system. Excluded features are omitted
+ from all setup commands, including source and binary distributions, so
+ you can create multiple distributions from the same source tree.
+ Feature names should be valid Python identifiers, except that they may
+ contain the '-' (minus) sign. Features can be included or excluded
+ via the command line options '--with-X' and '--without-X', where 'X' is
+ the name of the feature. Whether a feature is included by default, and
+ whether you are allowed to control this from the command line, is
+ determined by the Feature object. See the 'Feature' class for more
+ information.
+
+ 'test_suite' -- the name of a test suite to run for the 'test' command.
+ If the user runs 'python setup.py test', the package will be installed,
+ and the named test suite will be run. The format is the same as
+ would be used on a 'unittest.py' command line. That is, it is the
+ dotted name of an object to import and call to generate a test suite.
+
+ 'package_data' -- a dictionary mapping package names to lists of filenames
+ or globs to use to find data files contained in the named packages.
+ If the dictionary has filenames or globs listed under '""' (the empty
+ string), those names will be searched for in every package, in addition
+ to any names for the specific package. Data files found using these
+ names/globs will be installed along with the package, in the same
+ location as the package. Note that globs are allowed to reference
+ the contents of non-package subdirectories, as long as you use '/' as
+ a path separator. (Globs are automatically converted to
+ platform-specific paths at runtime.)
+
+ In addition to these new keywords, this class also has several new methods
+ for manipulating the distribution's contents. For example, the 'include()'
+ and 'exclude()' methods can be thought of as in-place add and subtract
+ commands that add or remove packages, modules, extensions, and so on from
+ the distribution. They are used by the feature subsystem to configure the
+ distribution for the included and excluded features.
+ """
+
_DISTUTILS_UNSUPPORTED_METADATA = {
'long_description_content_type': None,
'project_urls': dict,
@@ -412,38 +412,38 @@ class Distribution(_Distribution):
'license_files': ordered_set.OrderedSet,
}
- _patched_dist = None
-
- def patch_missing_pkg_info(self, attrs):
- # Fake up a replacement for the data that would normally come from
- # PKG-INFO, but which might not yet be built if this is a fresh
- # checkout.
- #
- if not attrs or 'name' not in attrs or 'version' not in attrs:
- return
- key = pkg_resources.safe_name(str(attrs['name'])).lower()
- dist = pkg_resources.working_set.by_key.get(key)
- if dist is not None and not dist.has_metadata('PKG-INFO'):
- dist._version = pkg_resources.safe_version(str(attrs['version']))
- self._patched_dist = dist
-
- def __init__(self, attrs=None):
- have_package_data = hasattr(self, "package_data")
- if not have_package_data:
- self.package_data = {}
+ _patched_dist = None
+
+ def patch_missing_pkg_info(self, attrs):
+ # Fake up a replacement for the data that would normally come from
+ # PKG-INFO, but which might not yet be built if this is a fresh
+ # checkout.
+ #
+ if not attrs or 'name' not in attrs or 'version' not in attrs:
+ return
+ key = pkg_resources.safe_name(str(attrs['name'])).lower()
+ dist = pkg_resources.working_set.by_key.get(key)
+ if dist is not None and not dist.has_metadata('PKG-INFO'):
+ dist._version = pkg_resources.safe_version(str(attrs['version']))
+ self._patched_dist = dist
+
+ def __init__(self, attrs=None):
+ have_package_data = hasattr(self, "package_data")
+ if not have_package_data:
+ self.package_data = {}
attrs = attrs or {}
if 'features' in attrs or 'require_features' in attrs:
- Feature.warn_deprecated()
- self.require_features = []
- self.features = {}
- self.dist_files = []
+ Feature.warn_deprecated()
+ self.require_features = []
+ self.features = {}
+ self.dist_files = []
# Filter-out setuptools' specific options.
self.src_root = attrs.pop("src_root", None)
- self.patch_missing_pkg_info(attrs)
+ self.patch_missing_pkg_info(attrs)
self.dependency_links = attrs.pop('dependency_links', [])
self.setup_requires = attrs.pop('setup_requires', [])
- for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
- vars(self).setdefault(ep.name, None)
+ for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+ vars(self).setdefault(ep.name, None)
_Distribution.__init__(self, {
k: v for k, v in attrs.items()
if k not in self._DISTUTILS_UNSUPPORTED_METADATA
@@ -461,29 +461,29 @@ class Distribution(_Distribution):
value = default() if default else None
setattr(self.metadata, option, value)
- if isinstance(self.metadata.version, numbers.Number):
- # Some people apparently take "version number" too literally :)
- self.metadata.version = str(self.metadata.version)
-
- if self.metadata.version is not None:
- try:
- ver = packaging.version.Version(self.metadata.version)
- normalized_version = str(ver)
- if self.metadata.version != normalized_version:
- warnings.warn(
- "Normalizing '%s' to '%s'" % (
- self.metadata.version,
- normalized_version,
- )
- )
- self.metadata.version = normalized_version
- except (packaging.version.InvalidVersion, TypeError):
- warnings.warn(
- "The version specified (%r) is an invalid version, this "
- "may not work as expected with newer versions of "
- "setuptools, pip, and PyPI. Please see PEP 440 for more "
- "details." % self.metadata.version
- )
+ if isinstance(self.metadata.version, numbers.Number):
+ # Some people apparently take "version number" too literally :)
+ self.metadata.version = str(self.metadata.version)
+
+ if self.metadata.version is not None:
+ try:
+ ver = packaging.version.Version(self.metadata.version)
+ normalized_version = str(ver)
+ if self.metadata.version != normalized_version:
+ warnings.warn(
+ "Normalizing '%s' to '%s'" % (
+ self.metadata.version,
+ normalized_version,
+ )
+ )
+ self.metadata.version = normalized_version
+ except (packaging.version.InvalidVersion, TypeError):
+ warnings.warn(
+ "The version specified (%r) is an invalid version, this "
+ "may not work as expected with newer versions of "
+ "setuptools, pip, and PyPI. Please see PEP 440 for more "
+ "details." % self.metadata.version
+ )
self._finalize_requires()
def _finalize_requires(self):
@@ -493,7 +493,7 @@ class Distribution(_Distribution):
"""
if getattr(self, 'python_requires', None):
self.metadata.python_requires = self.python_requires
-
+
if getattr(self, 'extras_require', None):
for extra in self.extras_require.keys():
# Since this gets called multiple times at points where the
@@ -702,29 +702,29 @@ class Distribution(_Distribution):
ignore_option_errors=ignore_option_errors)
self._finalize_requires()
- def parse_command_line(self):
- """Process features after parsing command line options"""
- result = _Distribution.parse_command_line(self)
- if self.features:
- self._finalize_features()
- return result
-
+ def parse_command_line(self):
+ """Process features after parsing command line options"""
+ result = _Distribution.parse_command_line(self)
+ if self.features:
+ self._finalize_features()
+ return result
+
def _feature_attrname(self, name):
- """Convert feature name to corresponding option attribute name"""
+ """Convert feature name to corresponding option attribute name"""
return 'with_' + name.replace('-', '_')
-
- def fetch_build_eggs(self, requires):
- """Resolve pre-setup requirements"""
- resolved_dists = pkg_resources.working_set.resolve(
- pkg_resources.parse_requirements(requires),
- installer=self.fetch_build_egg,
- replace_conflicting=True,
- )
- for dist in resolved_dists:
- pkg_resources.working_set.add(dist, replace=True)
+
+ def fetch_build_eggs(self, requires):
+ """Resolve pre-setup requirements"""
+ resolved_dists = pkg_resources.working_set.resolve(
+ pkg_resources.parse_requirements(requires),
+ installer=self.fetch_build_egg,
+ replace_conflicting=True,
+ )
+ for dist in resolved_dists:
+ pkg_resources.working_set.add(dist, replace=True)
return resolved_dists
-
- def finalize_options(self):
+
+ def finalize_options(self):
"""
Allow plugins to apply arbitrary operations to the
distribution. Each hook may optionally define a 'order'
@@ -732,7 +732,7 @@ class Distribution(_Distribution):
go first and the default is 0.
"""
group = 'setuptools.finalize_distribution_options'
-
+
def by_order(hook):
return getattr(hook, 'order', 0)
eps = map(lambda e: e.load(), pkg_resources.iter_entry_points(group))
@@ -740,533 +740,533 @@ class Distribution(_Distribution):
ep(self)
def _finalize_setup_keywords(self):
- for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+ for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self, ep.name, None)
- if value is not None:
- ep.require(installer=self.fetch_build_egg)
- ep.load()(self, ep.name, value)
+ if value is not None:
+ ep.require(installer=self.fetch_build_egg)
+ ep.load()(self, ep.name, value)
def _finalize_2to3_doctests(self):
- if getattr(self, 'convert_2to3_doctests', None):
- # XXX may convert to set here when we can rely on set being builtin
+ if getattr(self, 'convert_2to3_doctests', None):
+ # XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [
os.path.abspath(p)
for p in self.convert_2to3_doctests
]
- else:
- self.convert_2to3_doctests = []
-
- def get_egg_cache_dir(self):
- egg_cache_dir = os.path.join(os.curdir, '.eggs')
- if not os.path.exists(egg_cache_dir):
- os.mkdir(egg_cache_dir)
- windows_support.hide_file(egg_cache_dir)
- readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
- with open(readme_txt_filename, 'w') as f:
- f.write('This directory contains eggs that were downloaded '
- 'by setuptools to build, test, and run plug-ins.\n\n')
- f.write('This directory caches those eggs to prevent '
- 'repeated downloads.\n\n')
- f.write('However, it is safe to delete this directory.\n\n')
-
- return egg_cache_dir
-
- def fetch_build_egg(self, req):
- """Fetch an egg needed for building"""
+ else:
+ self.convert_2to3_doctests = []
+
+ def get_egg_cache_dir(self):
+ egg_cache_dir = os.path.join(os.curdir, '.eggs')
+ if not os.path.exists(egg_cache_dir):
+ os.mkdir(egg_cache_dir)
+ windows_support.hide_file(egg_cache_dir)
+ readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
+ with open(readme_txt_filename, 'w') as f:
+ f.write('This directory contains eggs that were downloaded '
+ 'by setuptools to build, test, and run plug-ins.\n\n')
+ f.write('This directory caches those eggs to prevent '
+ 'repeated downloads.\n\n')
+ f.write('However, it is safe to delete this directory.\n\n')
+
+ return egg_cache_dir
+
+ def fetch_build_egg(self, req):
+ """Fetch an egg needed for building"""
from setuptools.installer import fetch_build_egg
return fetch_build_egg(self, req)
-
+
def _finalize_feature_opts(self):
- """Add --with-X/--without-X options based on optional features"""
-
+ """Add --with-X/--without-X options based on optional features"""
+
if not self.features:
return
- go = []
- no = self.negative_opt.copy()
-
+ go = []
+ no = self.negative_opt.copy()
+
for name, feature in self.features.items():
self._set_feature(name, None)
- feature.validate(self)
-
- if feature.optional:
- descr = feature.description
- incdef = ' (default)'
+ feature.validate(self)
+
+ if feature.optional:
+ descr = feature.description
+ incdef = ' (default)'
excdef = ''
- if not feature.include_by_default():
- excdef, incdef = incdef, excdef
-
+ if not feature.include_by_default():
+ excdef, incdef = incdef, excdef
+
new = (
('with-' + name, None, 'include ' + descr + incdef),
('without-' + name, None, 'exclude ' + descr + excdef),
)
go.extend(new)
no['without-' + name] = 'with-' + name
-
- self.global_options = self.feature_options = go + self.global_options
- self.negative_opt = self.feature_negopt = no
-
- def _finalize_features(self):
- """Add/remove features and resolve dependencies between them"""
-
- # First, flag all the enabled items (and thus their dependencies)
+
+ self.global_options = self.feature_options = go + self.global_options
+ self.negative_opt = self.feature_negopt = no
+
+ def _finalize_features(self):
+ """Add/remove features and resolve dependencies between them"""
+
+ # First, flag all the enabled items (and thus their dependencies)
for name, feature in self.features.items():
- enabled = self.feature_is_included(name)
- if enabled or (enabled is None and feature.include_by_default()):
- feature.include_in(self)
+ enabled = self.feature_is_included(name)
+ if enabled or (enabled is None and feature.include_by_default()):
+ feature.include_in(self)
self._set_feature(name, 1)
-
- # Then disable the rest, so that off-by-default features don't
- # get flagged as errors when they're required by an enabled feature
+
+ # Then disable the rest, so that off-by-default features don't
+ # get flagged as errors when they're required by an enabled feature
for name, feature in self.features.items():
- if not self.feature_is_included(name):
- feature.exclude_from(self)
+ if not self.feature_is_included(name):
+ feature.exclude_from(self)
self._set_feature(name, 0)
-
- def get_command_class(self, command):
- """Pluggable version of get_command_class()"""
- if command in self.cmdclass:
- return self.cmdclass[command]
-
+
+ def get_command_class(self, command):
+ """Pluggable version of get_command_class()"""
+ if command in self.cmdclass:
+ return self.cmdclass[command]
+
eps = pkg_resources.iter_entry_points('distutils.commands', command)
for ep in eps:
- ep.require(installer=self.fetch_build_egg)
- self.cmdclass[command] = cmdclass = ep.load()
- return cmdclass
- else:
- return _Distribution.get_command_class(self, command)
-
- def print_commands(self):
- for ep in pkg_resources.iter_entry_points('distutils.commands'):
- if ep.name not in self.cmdclass:
- # don't require extras as the commands won't be invoked
- cmdclass = ep.resolve()
- self.cmdclass[ep.name] = cmdclass
- return _Distribution.print_commands(self)
-
- def get_command_list(self):
- for ep in pkg_resources.iter_entry_points('distutils.commands'):
- if ep.name not in self.cmdclass:
- # don't require extras as the commands won't be invoked
- cmdclass = ep.resolve()
- self.cmdclass[ep.name] = cmdclass
- return _Distribution.get_command_list(self)
-
+ ep.require(installer=self.fetch_build_egg)
+ self.cmdclass[command] = cmdclass = ep.load()
+ return cmdclass
+ else:
+ return _Distribution.get_command_class(self, command)
+
+ def print_commands(self):
+ for ep in pkg_resources.iter_entry_points('distutils.commands'):
+ if ep.name not in self.cmdclass:
+ # don't require extras as the commands won't be invoked
+ cmdclass = ep.resolve()
+ self.cmdclass[ep.name] = cmdclass
+ return _Distribution.print_commands(self)
+
+ def get_command_list(self):
+ for ep in pkg_resources.iter_entry_points('distutils.commands'):
+ if ep.name not in self.cmdclass:
+ # don't require extras as the commands won't be invoked
+ cmdclass = ep.resolve()
+ self.cmdclass[ep.name] = cmdclass
+ return _Distribution.get_command_list(self)
+
def _set_feature(self, name, status):
- """Set feature's inclusion status"""
+ """Set feature's inclusion status"""
setattr(self, self._feature_attrname(name), status)
-
+
def feature_is_included(self, name):
- """Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
+ """Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self, self._feature_attrname(name))
-
+
def include_feature(self, name):
- """Request inclusion of feature named 'name'"""
-
+ """Request inclusion of feature named 'name'"""
+
if self.feature_is_included(name) == 0:
- descr = self.features[name].description
- raise DistutilsOptionError(
- descr + " is required, but was excluded or is not available"
- )
- self.features[name].include_in(self)
+ descr = self.features[name].description
+ raise DistutilsOptionError(
+ descr + " is required, but was excluded or is not available"
+ )
+ self.features[name].include_in(self)
self._set_feature(name, 1)
-
+
def include(self, **attrs):
- """Add items to distribution that are named in keyword arguments
-
+ """Add items to distribution that are named in keyword arguments
+
For example, 'dist.include(py_modules=["x"])' would add 'x' to
- the distribution's 'py_modules' attribute, if it was not already
- there.
-
- Currently, this method only supports inclusion for attributes that are
- lists or tuples. If you need to add support for adding to other
- attributes in this or a subclass, you can add an '_include_X' method,
- where 'X' is the name of the attribute. The method will be called with
- the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
- will try to call 'dist._include_foo({"bar":"baz"})', which can then
- handle whatever special inclusion logic is needed.
- """
+ the distribution's 'py_modules' attribute, if it was not already
+ there.
+
+ Currently, this method only supports inclusion for attributes that are
+ lists or tuples. If you need to add support for adding to other
+ attributes in this or a subclass, you can add an '_include_X' method,
+ where 'X' is the name of the attribute. The method will be called with
+ the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
+ will try to call 'dist._include_foo({"bar":"baz"})', which can then
+ handle whatever special inclusion logic is needed.
+ """
for k, v in attrs.items():
include = getattr(self, '_include_' + k, None)
- if include:
- include(v)
- else:
+ if include:
+ include(v)
+ else:
self._include_misc(k, v)
-
+
def exclude_package(self, package):
- """Remove packages, modules, and extensions in named package"""
-
+ """Remove packages, modules, and extensions in named package"""
+
pfx = package + '.'
- if self.packages:
- self.packages = [
- p for p in self.packages
+ if self.packages:
+ self.packages = [
+ p for p in self.packages
if p != package and not p.startswith(pfx)
- ]
-
- if self.py_modules:
- self.py_modules = [
- p for p in self.py_modules
+ ]
+
+ if self.py_modules:
+ self.py_modules = [
+ p for p in self.py_modules
if p != package and not p.startswith(pfx)
- ]
-
- if self.ext_modules:
- self.ext_modules = [
- p for p in self.ext_modules
+ ]
+
+ if self.ext_modules:
+ self.ext_modules = [
+ p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
- ]
-
+ ]
+
def has_contents_for(self, package):
- """Return true if 'exclude_package(package)' would do something"""
-
+ """Return true if 'exclude_package(package)' would do something"""
+
pfx = package + '.'
-
- for p in self.iter_distribution_names():
+
+ for p in self.iter_distribution_names():
if p == package or p.startswith(pfx):
- return True
-
+ return True
+
def _exclude_misc(self, name, value):
- """Handle 'exclude()' for list/tuple attrs without a special handler"""
+ """Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
- raise DistutilsSetupError(
- "%s: setting must be a list or tuple (%r)" % (name, value)
- )
- try:
+ raise DistutilsSetupError(
+ "%s: setting must be a list or tuple (%r)" % (name, value)
+ )
+ try:
old = getattr(self, name)
- except AttributeError:
- raise DistutilsSetupError(
- "%s: No such distribution setting" % name
- )
+ except AttributeError:
+ raise DistutilsSetupError(
+ "%s: No such distribution setting" % name
+ )
if old is not None and not isinstance(old, sequence):
- raise DistutilsSetupError(
+ raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
- )
- elif old:
+ )
+ elif old:
setattr(self, name, [item for item in old if item not in value])
-
+
def _include_misc(self, name, value):
- """Handle 'include()' for list/tuple attrs without a special handler"""
-
+ """Handle 'include()' for list/tuple attrs without a special handler"""
+
if not isinstance(value, sequence):
- raise DistutilsSetupError(
- "%s: setting must be a list (%r)" % (name, value)
- )
- try:
+ raise DistutilsSetupError(
+ "%s: setting must be a list (%r)" % (name, value)
+ )
+ try:
old = getattr(self, name)
- except AttributeError:
- raise DistutilsSetupError(
- "%s: No such distribution setting" % name
- )
- if old is None:
+ except AttributeError:
+ raise DistutilsSetupError(
+ "%s: No such distribution setting" % name
+ )
+ if old is None:
setattr(self, name, value)
elif not isinstance(old, sequence):
- raise DistutilsSetupError(
+ raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
- )
- else:
+ )
+ else:
new = [item for item in value if item not in old]
setattr(self, name, old + new)
-
+
def exclude(self, **attrs):
- """Remove items from distribution that are named in keyword arguments
-
- For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
- the distribution's 'py_modules' attribute. Excluding packages uses
- the 'exclude_package()' method, so all of the package's contained
- packages, modules, and extensions are also excluded.
-
- Currently, this method only supports exclusion from attributes that are
- lists or tuples. If you need to add support for excluding from other
- attributes in this or a subclass, you can add an '_exclude_X' method,
- where 'X' is the name of the attribute. The method will be called with
- the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
- will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
- handle whatever special exclusion logic is needed.
- """
+ """Remove items from distribution that are named in keyword arguments
+
+ For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
+ the distribution's 'py_modules' attribute. Excluding packages uses
+ the 'exclude_package()' method, so all of the package's contained
+ packages, modules, and extensions are also excluded.
+
+ Currently, this method only supports exclusion from attributes that are
+ lists or tuples. If you need to add support for excluding from other
+ attributes in this or a subclass, you can add an '_exclude_X' method,
+ where 'X' is the name of the attribute. The method will be called with
+ the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
+ will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
+ handle whatever special exclusion logic is needed.
+ """
for k, v in attrs.items():
exclude = getattr(self, '_exclude_' + k, None)
- if exclude:
- exclude(v)
- else:
+ if exclude:
+ exclude(v)
+ else:
self._exclude_misc(k, v)
-
+
def _exclude_packages(self, packages):
if not isinstance(packages, sequence):
- raise DistutilsSetupError(
- "packages: setting must be a list or tuple (%r)" % (packages,)
- )
- list(map(self.exclude_package, packages))
-
- def _parse_command_opts(self, parser, args):
- # Remove --with-X/--without-X options when processing command args
- self.global_options = self.__class__.global_options
- self.negative_opt = self.__class__.negative_opt
-
- # First, expand any aliases
- command = args[0]
- aliases = self.get_option_dict('aliases')
- while command in aliases:
+ raise DistutilsSetupError(
+ "packages: setting must be a list or tuple (%r)" % (packages,)
+ )
+ list(map(self.exclude_package, packages))
+
+ def _parse_command_opts(self, parser, args):
+ # Remove --with-X/--without-X options when processing command args
+ self.global_options = self.__class__.global_options
+ self.negative_opt = self.__class__.negative_opt
+
+ # First, expand any aliases
+ command = args[0]
+ aliases = self.get_option_dict('aliases')
+ while command in aliases:
src, alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
- import shlex
+ import shlex
args[:1] = shlex.split(alias, True)
- command = args[0]
-
- nargs = _Distribution._parse_command_opts(self, parser, args)
-
- # Handle commands that want to consume all remaining arguments
- cmd_class = self.get_command_class(command)
+ command = args[0]
+
+ nargs = _Distribution._parse_command_opts(self, parser, args)
+
+ # Handle commands that want to consume all remaining arguments
+ cmd_class = self.get_command_class(command)
if getattr(cmd_class, 'command_consumes_arguments', None):
- self.get_option_dict(command)['args'] = ("command line", nargs)
- if nargs is not None:
- return []
-
- return nargs
-
- def get_cmdline_options(self):
- """Return a '{cmd: {opt:val}}' map of all command-line options
-
- Option names are all long, but do not include the leading '--', and
- contain dashes rather than underscores. If the option doesn't take
- an argument (e.g. '--quiet'), the 'val' is 'None'.
-
- Note that options provided by config files are intentionally excluded.
- """
-
- d = {}
-
+ self.get_option_dict(command)['args'] = ("command line", nargs)
+ if nargs is not None:
+ return []
+
+ return nargs
+
+ def get_cmdline_options(self):
+ """Return a '{cmd: {opt:val}}' map of all command-line options
+
+ Option names are all long, but do not include the leading '--', and
+ contain dashes rather than underscores. If the option doesn't take
+ an argument (e.g. '--quiet'), the 'val' is 'None'.
+
+ Note that options provided by config files are intentionally excluded.
+ """
+
+ d = {}
+
for cmd, opts in self.command_options.items():
-
+
for opt, (src, val) in opts.items():
-
- if src != "command line":
- continue
-
+
+ if src != "command line":
+ continue
+
opt = opt.replace('_', '-')
-
+
if val == 0:
- cmdobj = self.get_command_obj(cmd)
- neg_opt = self.negative_opt.copy()
+ cmdobj = self.get_command_obj(cmd)
+ neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
for neg, pos in neg_opt.items():
if pos == opt:
opt = neg
val = None
- break
- else:
- raise AssertionError("Shouldn't be able to get here")
-
+ break
+ else:
+ raise AssertionError("Shouldn't be able to get here")
+
elif val == 1:
- val = None
-
+ val = None
+
d.setdefault(cmd, {})[opt] = val
-
- return d
-
- def iter_distribution_names(self):
- """Yield all packages, modules, and extension names in distribution"""
-
- for pkg in self.packages or ():
- yield pkg
-
- for module in self.py_modules or ():
- yield module
-
- for ext in self.ext_modules or ():
+
+ return d
+
+ def iter_distribution_names(self):
+ """Yield all packages, modules, and extension names in distribution"""
+
+ for pkg in self.packages or ():
+ yield pkg
+
+ for module in self.py_modules or ():
+ yield module
+
+ for ext in self.ext_modules or ():
if isinstance(ext, tuple):
- name, buildinfo = ext
- else:
- name = ext.name
- if name.endswith('module'):
- name = name[:-6]
- yield name
-
- def handle_display_options(self, option_order):
- """If there were any non-global "display-only" options
- (--help-commands or the metadata display options) on the command
- line, display the requested info and return true; else return
- false.
- """
- import sys
-
- if six.PY2 or self.help_commands:
- return _Distribution.handle_display_options(self, option_order)
-
- # Stdout may be StringIO (e.g. in tests)
- if not isinstance(sys.stdout, io.TextIOWrapper):
- return _Distribution.handle_display_options(self, option_order)
-
- # Don't wrap stdout if utf-8 is already the encoding. Provides
- # workaround for #334.
- if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
- return _Distribution.handle_display_options(self, option_order)
-
- # Print metadata in UTF-8 no matter the platform
- encoding = sys.stdout.encoding
- errors = sys.stdout.errors
- newline = sys.platform != 'win32' and '\n' or None
- line_buffering = sys.stdout.line_buffering
-
- sys.stdout = io.TextIOWrapper(
- sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
- try:
- return _Distribution.handle_display_options(self, option_order)
- finally:
- sys.stdout = io.TextIOWrapper(
- sys.stdout.detach(), encoding, errors, newline, line_buffering)
-
-
-class Feature:
- """
- **deprecated** -- The `Feature` facility was never completely implemented
- or supported, `has reported issues
+ name, buildinfo = ext
+ else:
+ name = ext.name
+ if name.endswith('module'):
+ name = name[:-6]
+ yield name
+
+ def handle_display_options(self, option_order):
+ """If there were any non-global "display-only" options
+ (--help-commands or the metadata display options) on the command
+ line, display the requested info and return true; else return
+ false.
+ """
+ import sys
+
+ if six.PY2 or self.help_commands:
+ return _Distribution.handle_display_options(self, option_order)
+
+ # Stdout may be StringIO (e.g. in tests)
+ if not isinstance(sys.stdout, io.TextIOWrapper):
+ return _Distribution.handle_display_options(self, option_order)
+
+ # Don't wrap stdout if utf-8 is already the encoding. Provides
+ # workaround for #334.
+ if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
+ return _Distribution.handle_display_options(self, option_order)
+
+ # Print metadata in UTF-8 no matter the platform
+ encoding = sys.stdout.encoding
+ errors = sys.stdout.errors
+ newline = sys.platform != 'win32' and '\n' or None
+ line_buffering = sys.stdout.line_buffering
+
+ sys.stdout = io.TextIOWrapper(
+ sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
+ try:
+ return _Distribution.handle_display_options(self, option_order)
+ finally:
+ sys.stdout = io.TextIOWrapper(
+ sys.stdout.detach(), encoding, errors, newline, line_buffering)
+
+
+class Feature:
+ """
+ **deprecated** -- The `Feature` facility was never completely implemented
+ or supported, `has reported issues
<https://github.com/pypa/setuptools/issues/58>`_ and will be removed in
- a future version.
-
- A subset of the distribution that can be excluded if unneeded/wanted
-
- Features are created using these keyword arguments:
-
- 'description' -- a short, human readable description of the feature, to
- be used in error messages, and option help messages.
-
- 'standard' -- if true, the feature is included by default if it is
- available on the current system. Otherwise, the feature is only
- included if requested via a command line '--with-X' option, or if
- another included feature requires it. The default setting is 'False'.
-
- 'available' -- if true, the feature is available for installation on the
- current system. The default setting is 'True'.
-
- 'optional' -- if true, the feature's inclusion can be controlled from the
- command line, using the '--with-X' or '--without-X' options. If
- false, the feature's inclusion status is determined automatically,
- based on 'availabile', 'standard', and whether any other feature
- requires it. The default setting is 'True'.
-
- 'require_features' -- a string or sequence of strings naming features
- that should also be included if this feature is included. Defaults to
- empty list. May also contain 'Require' objects that should be
- added/removed from the distribution.
-
- 'remove' -- a string or list of strings naming packages to be removed
- from the distribution if this feature is *not* included. If the
- feature *is* included, this argument is ignored. This argument exists
- to support removing features that "crosscut" a distribution, such as
- defining a 'tests' feature that removes all the 'tests' subpackages
- provided by other features. The default for this argument is an empty
- list. (Note: the named package(s) or modules must exist in the base
- distribution when the 'setup()' function is initially called.)
-
- other keywords -- any other keyword arguments are saved, and passed to
- the distribution's 'include()' and 'exclude()' methods when the
- feature is included or excluded, respectively. So, for example, you
- could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
- added or removed from the distribution as appropriate.
-
- A feature must include at least one 'requires', 'remove', or other
- keyword argument. Otherwise, it can't affect the distribution in any way.
- Note also that you can subclass 'Feature' to create your own specialized
- feature types that modify the distribution in other ways when included or
- excluded. See the docstrings for the various methods here for more detail.
- Aside from the methods, the only feature attributes that distributions look
- at are 'description' and 'optional'.
- """
-
- @staticmethod
- def warn_deprecated():
+ a future version.
+
+ A subset of the distribution that can be excluded if unneeded/wanted
+
+ Features are created using these keyword arguments:
+
+ 'description' -- a short, human readable description of the feature, to
+ be used in error messages, and option help messages.
+
+ 'standard' -- if true, the feature is included by default if it is
+ available on the current system. Otherwise, the feature is only
+ included if requested via a command line '--with-X' option, or if
+ another included feature requires it. The default setting is 'False'.
+
+ 'available' -- if true, the feature is available for installation on the
+ current system. The default setting is 'True'.
+
+ 'optional' -- if true, the feature's inclusion can be controlled from the
+ command line, using the '--with-X' or '--without-X' options. If
+ false, the feature's inclusion status is determined automatically,
+ based on 'availabile', 'standard', and whether any other feature
+ requires it. The default setting is 'True'.
+
+ 'require_features' -- a string or sequence of strings naming features
+ that should also be included if this feature is included. Defaults to
+ empty list. May also contain 'Require' objects that should be
+ added/removed from the distribution.
+
+ 'remove' -- a string or list of strings naming packages to be removed
+ from the distribution if this feature is *not* included. If the
+ feature *is* included, this argument is ignored. This argument exists
+ to support removing features that "crosscut" a distribution, such as
+ defining a 'tests' feature that removes all the 'tests' subpackages
+ provided by other features. The default for this argument is an empty
+ list. (Note: the named package(s) or modules must exist in the base
+ distribution when the 'setup()' function is initially called.)
+
+ other keywords -- any other keyword arguments are saved, and passed to
+ the distribution's 'include()' and 'exclude()' methods when the
+ feature is included or excluded, respectively. So, for example, you
+ could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
+ added or removed from the distribution as appropriate.
+
+ A feature must include at least one 'requires', 'remove', or other
+ keyword argument. Otherwise, it can't affect the distribution in any way.
+ Note also that you can subclass 'Feature' to create your own specialized
+ feature types that modify the distribution in other ways when included or
+ excluded. See the docstrings for the various methods here for more detail.
+ Aside from the methods, the only feature attributes that distributions look
+ at are 'description' and 'optional'.
+ """
+
+ @staticmethod
+ def warn_deprecated():
msg = (
- "Features are deprecated and will be removed in a future "
+ "Features are deprecated and will be removed in a future "
"version. See https://github.com/pypa/setuptools/issues/65."
- )
+ )
warnings.warn(msg, DistDeprecationWarning, stacklevel=3)
-
+
def __init__(
self, description, standard=False, available=True,
- optional=True, require_features=(), remove=(), **extras):
- self.warn_deprecated()
-
- self.description = description
- self.standard = standard
- self.available = available
- self.optional = optional
+ optional=True, require_features=(), remove=(), **extras):
+ self.warn_deprecated()
+
+ self.description = description
+ self.standard = standard
+ self.available = available
+ self.optional = optional
if isinstance(require_features, (str, Require)):
- require_features = require_features,
-
- self.require_features = [
+ require_features = require_features,
+
+ self.require_features = [
r for r in require_features if isinstance(r, str)
- ]
+ ]
er = [r for r in require_features if not isinstance(r, str)]
if er:
extras['require_features'] = er
-
+
if isinstance(remove, str):
- remove = remove,
- self.remove = remove
- self.extras = extras
-
- if not remove and not require_features and not extras:
- raise DistutilsSetupError(
+ remove = remove,
+ self.remove = remove
+ self.extras = extras
+
+ if not remove and not require_features and not extras:
+ raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or "
"at least one of 'packages', 'py_modules', etc."
- )
-
- def include_by_default(self):
- """Should this feature be included by default?"""
- return self.available and self.standard
-
+ )
+
+ def include_by_default(self):
+ """Should this feature be included by default?"""
+ return self.available and self.standard
+
def include_in(self, dist):
- """Ensure feature and its requirements are included in distribution
-
- You may override this in a subclass to perform additional operations on
- the distribution. Note that this method may be called more than once
- per feature, and so should be idempotent.
-
- """
-
- if not self.available:
- raise DistutilsPlatformError(
+ """Ensure feature and its requirements are included in distribution
+
+ You may override this in a subclass to perform additional operations on
+ the distribution. Note that this method may be called more than once
+ per feature, and so should be idempotent.
+
+ """
+
+ if not self.available:
+ raise DistutilsPlatformError(
self.description + " is required, "
- "but is not available on this platform"
- )
-
- dist.include(**self.extras)
-
- for f in self.require_features:
- dist.include_feature(f)
-
+ "but is not available on this platform"
+ )
+
+ dist.include(**self.extras)
+
+ for f in self.require_features:
+ dist.include_feature(f)
+
def exclude_from(self, dist):
- """Ensure feature is excluded from distribution
-
- You may override this in a subclass to perform additional operations on
- the distribution. This method will be called at most once per
- feature, and only after all included features have been asked to
- include themselves.
- """
-
- dist.exclude(**self.extras)
-
- if self.remove:
- for item in self.remove:
- dist.exclude_package(item)
-
+ """Ensure feature is excluded from distribution
+
+ You may override this in a subclass to perform additional operations on
+ the distribution. This method will be called at most once per
+ feature, and only after all included features have been asked to
+ include themselves.
+ """
+
+ dist.exclude(**self.extras)
+
+ if self.remove:
+ for item in self.remove:
+ dist.exclude_package(item)
+
def validate(self, dist):
- """Verify that feature makes sense in context of distribution
-
- This method is called by the distribution just before it parses its
- command line. It checks to ensure that the 'remove' attribute, if any,
- contains only valid package/module names that are present in the base
- distribution when 'setup()' is called. You may override it in a
- subclass to perform any other required validation of the feature
- against a target distribution.
- """
-
- for item in self.remove:
- if not dist.has_contents_for(item):
- raise DistutilsSetupError(
- "%s wants to be able to remove %s, but the distribution"
- " doesn't contain any packages or modules under %s"
- % (self.description, item, item)
- )
+ """Verify that feature makes sense in context of distribution
+
+ This method is called by the distribution just before it parses its
+ command line. It checks to ensure that the 'remove' attribute, if any,
+ contains only valid package/module names that are present in the base
+ distribution when 'setup()' is called. You may override it in a
+ subclass to perform any other required validation of the feature
+ against a target distribution.
+ """
+
+ for item in self.remove:
+ if not dist.has_contents_for(item):
+ raise DistutilsSetupError(
+ "%s wants to be able to remove %s, but the distribution"
+ " doesn't contain any packages or modules under %s"
+ % (self.description, item, item)
+ )
class DistDeprecationWarning(SetuptoolsDeprecationWarning):
diff --git a/contrib/python/setuptools/py2/setuptools/extension.py b/contrib/python/setuptools/py2/setuptools/extension.py
index 679c11263d..29468894f8 100644
--- a/contrib/python/setuptools/py2/setuptools/extension.py
+++ b/contrib/python/setuptools/py2/setuptools/extension.py
@@ -1,57 +1,57 @@
-import re
-import functools
-import distutils.core
-import distutils.errors
-import distutils.extension
-
+import re
+import functools
+import distutils.core
+import distutils.errors
+import distutils.extension
+
from setuptools.extern.six.moves import map
-
+
from .monkey import get_unpatched
-
-
-def _have_cython():
- """
- Return True if Cython can be imported.
- """
+
+
+def _have_cython():
+ """
+ Return True if Cython can be imported.
+ """
cython_impl = 'Cython.Distutils.build_ext'
- try:
- # from (cython_impl) import build_ext
- __import__(cython_impl, fromlist=['build_ext']).build_ext
- return True
- except Exception:
- pass
- return False
-
-
-# for compatibility
-have_pyrex = _have_cython
-
+ try:
+ # from (cython_impl) import build_ext
+ __import__(cython_impl, fromlist=['build_ext']).build_ext
+ return True
+ except Exception:
+ pass
+ return False
+
+
+# for compatibility
+have_pyrex = _have_cython
+
_Extension = get_unpatched(distutils.core.Extension)
-
-class Extension(_Extension):
- """Extension that uses '.c' files in place of '.pyx' files"""
-
+
+class Extension(_Extension):
+ """Extension that uses '.c' files in place of '.pyx' files"""
+
def __init__(self, name, sources, *args, **kw):
# The *args is needed for compatibility as calls may use positional
# arguments. py_limited_api may be set only via keyword.
self.py_limited_api = kw.pop("py_limited_api", False)
_Extension.__init__(self, name, sources, *args, **kw)
- def _convert_pyx_sources_to_lang(self):
- """
- Replace sources with .pyx extensions to sources with the target
- language extension. This mechanism allows language authors to supply
- pre-converted sources but to prefer the .pyx sources.
- """
- if _have_cython():
- # the build has Cython, so allow it to compile the .pyx files
- return
- lang = self.language or ''
- target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
- sub = functools.partial(re.sub, '.pyx$', target_ext)
- self.sources = list(map(sub, self.sources))
-
-
-class Library(Extension):
- """Just like a regular Extension, but built as a library instead"""
+ def _convert_pyx_sources_to_lang(self):
+ """
+ Replace sources with .pyx extensions to sources with the target
+ language extension. This mechanism allows language authors to supply
+ pre-converted sources but to prefer the .pyx sources.
+ """
+ if _have_cython():
+ # the build has Cython, so allow it to compile the .pyx files
+ return
+ lang = self.language or ''
+ target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
+ sub = functools.partial(re.sub, '.pyx$', target_ext)
+ self.sources = list(map(sub, self.sources))
+
+
+class Library(Extension):
+ """Just like a regular Extension, but built as a library instead"""
diff --git a/contrib/python/setuptools/py2/setuptools/launch.py b/contrib/python/setuptools/py2/setuptools/launch.py
index 8c302e90a1..308283ea93 100644
--- a/contrib/python/setuptools/py2/setuptools/launch.py
+++ b/contrib/python/setuptools/py2/setuptools/launch.py
@@ -1,16 +1,16 @@
-"""
-Launch the Python script on the command line after
-setuptools is bootstrapped via import.
-"""
-
-# Note that setuptools gets imported implicitly by the
-# invocation of this script using python -m setuptools.launch
-
-import tokenize
-import sys
-
-
-def run():
+"""
+Launch the Python script on the command line after
+setuptools is bootstrapped via import.
+"""
+
+# Note that setuptools gets imported implicitly by the
+# invocation of this script using python -m setuptools.launch
+
+import tokenize
+import sys
+
+
+def run():
"""
Run the script in sys.argv[1] as if it had
been invoked naturally.
@@ -23,13 +23,13 @@ def run():
__doc__=None,
)
sys.argv[:] = sys.argv[1:]
-
+
open_ = getattr(tokenize, 'open', open)
script = open_(script_name).read()
norm_script = script.replace('\\r\\n', '\\n')
code = compile(norm_script, script_name, 'exec')
exec(code, namespace)
-
-
-if __name__ == '__main__':
+
+
+if __name__ == '__main__':
run()
diff --git a/contrib/python/setuptools/py2/setuptools/lib2to3_ex.py b/contrib/python/setuptools/py2/setuptools/lib2to3_ex.py
index b193707807..4b1a73feb2 100644
--- a/contrib/python/setuptools/py2/setuptools/lib2to3_ex.py
+++ b/contrib/python/setuptools/py2/setuptools/lib2to3_ex.py
@@ -1,62 +1,62 @@
-"""
-Customized Mixin2to3 support:
-
- - adds support for converting doctests
-
-
-This module raises an ImportError on Python 2.
-"""
-
-from distutils.util import Mixin2to3 as _Mixin2to3
-from distutils import log
-from lib2to3.refactor import RefactoringTool, get_fixers_from_package
-
-import setuptools
-
-
-class DistutilsRefactoringTool(RefactoringTool):
- def log_error(self, msg, *args, **kw):
- log.error(msg, *args)
-
- def log_message(self, msg, *args):
- log.info(msg, *args)
-
- def log_debug(self, msg, *args):
- log.debug(msg, *args)
-
-
-class Mixin2to3(_Mixin2to3):
+"""
+Customized Mixin2to3 support:
+
+ - adds support for converting doctests
+
+
+This module raises an ImportError on Python 2.
+"""
+
+from distutils.util import Mixin2to3 as _Mixin2to3
+from distutils import log
+from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+
+import setuptools
+
+
+class DistutilsRefactoringTool(RefactoringTool):
+ def log_error(self, msg, *args, **kw):
+ log.error(msg, *args)
+
+ def log_message(self, msg, *args):
+ log.info(msg, *args)
+
+ def log_debug(self, msg, *args):
+ log.debug(msg, *args)
+
+
+class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests=False):
- # See of the distribution option has been set, otherwise check the
- # setuptools default.
- if self.distribution.use_2to3 is not True:
- return
- if not files:
- return
+ # See of the distribution option has been set, otherwise check the
+ # setuptools default.
+ if self.distribution.use_2to3 is not True:
+ return
+ if not files:
+ return
log.info("Fixing " + " ".join(files))
- self.__build_fixer_names()
- self.__exclude_fixers()
- if doctests:
- if setuptools.run_2to3_on_doctests:
- r = DistutilsRefactoringTool(self.fixer_names)
- r.refactor(files, write=True, doctests_only=True)
- else:
- _Mixin2to3.run_2to3(self, files)
-
- def __build_fixer_names(self):
+ self.__build_fixer_names()
+ self.__exclude_fixers()
+ if doctests:
+ if setuptools.run_2to3_on_doctests:
+ r = DistutilsRefactoringTool(self.fixer_names)
+ r.refactor(files, write=True, doctests_only=True)
+ else:
+ _Mixin2to3.run_2to3(self, files)
+
+ def __build_fixer_names(self):
if self.fixer_names:
return
- self.fixer_names = []
- for p in setuptools.lib2to3_fixer_packages:
- self.fixer_names.extend(get_fixers_from_package(p))
- if self.distribution.use_2to3_fixers is not None:
- for p in self.distribution.use_2to3_fixers:
- self.fixer_names.extend(get_fixers_from_package(p))
-
- def __exclude_fixers(self):
- excluded_fixers = getattr(self, 'exclude_fixers', [])
- if self.distribution.use_2to3_exclude_fixers is not None:
- excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
- for fixer_name in excluded_fixers:
- if fixer_name in self.fixer_names:
- self.fixer_names.remove(fixer_name)
+ self.fixer_names = []
+ for p in setuptools.lib2to3_fixer_packages:
+ self.fixer_names.extend(get_fixers_from_package(p))
+ if self.distribution.use_2to3_fixers is not None:
+ for p in self.distribution.use_2to3_fixers:
+ self.fixer_names.extend(get_fixers_from_package(p))
+
+ def __exclude_fixers(self):
+ excluded_fixers = getattr(self, 'exclude_fixers', [])
+ if self.distribution.use_2to3_exclude_fixers is not None:
+ excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
+ for fixer_name in excluded_fixers:
+ if fixer_name in self.fixer_names:
+ self.fixer_names.remove(fixer_name)
diff --git a/contrib/python/setuptools/py2/setuptools/package_index.py b/contrib/python/setuptools/py2/setuptools/package_index.py
index 38c661dab0..f419d47167 100644
--- a/contrib/python/setuptools/py2/setuptools/package_index.py
+++ b/contrib/python/setuptools/py2/setuptools/package_index.py
@@ -1,50 +1,50 @@
-"""PyPI and direct package downloading"""
-import sys
-import os
-import re
-import shutil
-import socket
-import base64
-import hashlib
-import itertools
+"""PyPI and direct package downloading"""
+import sys
+import os
+import re
+import shutil
+import socket
+import base64
+import hashlib
+import itertools
import warnings
-from functools import wraps
-
+from functools import wraps
+
from setuptools.extern import six
from setuptools.extern.six.moves import urllib, http_client, configparser, map
-
+
import setuptools
-from pkg_resources import (
- CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
+from pkg_resources import (
+ CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST, EGG_DIST,
-)
-from setuptools import ssl_support
-from distutils import log
-from distutils.errors import DistutilsError
-from fnmatch import translate
-from setuptools.py27compat import get_all_headers
+)
+from setuptools import ssl_support
+from distutils import log
+from distutils.errors import DistutilsError
+from fnmatch import translate
+from setuptools.py27compat import get_all_headers
from setuptools.py33compat import unescape
from setuptools.wheel import Wheel
-
+
__metaclass__ = type
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
-PYPI_MD5 = re.compile(
+PYPI_MD5 = re.compile(
r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)'
r'href="[^?]+\?:action=show_md5&amp;digest=([0-9a-f]{32})">md5</a>\)'
-)
+)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
-EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
-
-__all__ = [
- 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
- 'interpret_distro_name',
-]
-
-_SOCKET_TIMEOUT = 15
-
+EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
+
+__all__ = [
+ 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
+ 'interpret_distro_name',
+]
+
+_SOCKET_TIMEOUT = 15
+
_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
user_agent = _tmpl.format(py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools)
@@ -58,62 +58,62 @@ def parse_requirement_arg(spec):
)
-def parse_bdist_wininst(name):
- """Return (base,pyversion) or (None,None) for possible .exe name"""
-
- lower = name.lower()
- base, py_ver, plat = None, None, None
-
- if lower.endswith('.exe'):
- if lower.endswith('.win32.exe'):
- base = name[:-10]
- plat = 'win32'
+def parse_bdist_wininst(name):
+ """Return (base,pyversion) or (None,None) for possible .exe name"""
+
+ lower = name.lower()
+ base, py_ver, plat = None, None, None
+
+ if lower.endswith('.exe'):
+ if lower.endswith('.win32.exe'):
+ base = name[:-10]
+ plat = 'win32'
elif lower.startswith('.win32-py', -16):
- py_ver = name[-7:-4]
- base = name[:-16]
- plat = 'win32'
- elif lower.endswith('.win-amd64.exe'):
- base = name[:-14]
- plat = 'win-amd64'
+ py_ver = name[-7:-4]
+ base = name[:-16]
+ plat = 'win32'
+ elif lower.endswith('.win-amd64.exe'):
+ base = name[:-14]
+ plat = 'win-amd64'
elif lower.startswith('.win-amd64-py', -20):
- py_ver = name[-7:-4]
- base = name[:-20]
- plat = 'win-amd64'
+ py_ver = name[-7:-4]
+ base = name[:-20]
+ plat = 'win-amd64'
return base, py_ver, plat
-
-
-def egg_info_for_url(url):
- parts = urllib.parse.urlparse(url)
- scheme, server, path, parameters, query, fragment = parts
- base = urllib.parse.unquote(path.split('/')[-1])
+
+
+def egg_info_for_url(url):
+ parts = urllib.parse.urlparse(url)
+ scheme, server, path, parameters, query, fragment = parts
+ base = urllib.parse.unquote(path.split('/')[-1])
if server == 'sourceforge.net' and base == 'download': # XXX Yuck
- base = urllib.parse.unquote(path.split('/')[-2])
+ base = urllib.parse.unquote(path.split('/')[-2])
if '#' in base:
base, fragment = base.split('#', 1)
return base, fragment
-
-def distros_for_url(url, metadata=None):
- """Yield egg or source distribution objects that might be found at a URL"""
- base, fragment = egg_info_for_url(url)
+
+def distros_for_url(url, metadata=None):
+ """Yield egg or source distribution objects that might be found at a URL"""
+ base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata):
yield dist
- if fragment:
- match = EGG_FRAGMENT.match(fragment)
- if match:
- for dist in interpret_distro_name(
+ if fragment:
+ match = EGG_FRAGMENT.match(fragment)
+ if match:
+ for dist in interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST
- ):
- yield dist
-
+ ):
+ yield dist
+
-def distros_for_location(location, basename, metadata=None):
- """Yield egg or source distribution objects based on basename"""
- if basename.endswith('.egg.zip'):
+def distros_for_location(location, basename, metadata=None):
+ """Yield egg or source distribution objects based on basename"""
+ if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
- if basename.endswith('.egg') and '-' in basename:
- # only one, unambiguous interpretation
- return [Distribution.from_location(location, basename, metadata)]
+ if basename.endswith('.egg') and '-' in basename:
+ # only one, unambiguous interpretation
+ return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.whl') and '-' in basename:
wheel = Wheel(basename)
if not wheel.is_compatible():
@@ -125,508 +125,508 @@ def distros_for_location(location, basename, metadata=None):
# Increase priority over eggs.
precedence=EGG_DIST + 1,
)]
- if basename.endswith('.exe'):
- win_base, py_ver, platform = parse_bdist_wininst(basename)
- if win_base is not None:
- return interpret_distro_name(
- location, win_base, metadata, py_ver, BINARY_DIST, platform
- )
- # Try source distro extensions (.zip, .tgz, etc.)
- #
- for ext in EXTENSIONS:
- if basename.endswith(ext):
- basename = basename[:-len(ext)]
- return interpret_distro_name(location, basename, metadata)
- return [] # no extension matched
-
-
-def distros_for_filename(filename, metadata=None):
- """Yield possible egg or source distribution objects based on a filename"""
- return distros_for_location(
- normalize_path(filename), os.path.basename(filename), metadata
- )
-
-
-def interpret_distro_name(
- location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
- platform=None
+ if basename.endswith('.exe'):
+ win_base, py_ver, platform = parse_bdist_wininst(basename)
+ if win_base is not None:
+ return interpret_distro_name(
+ location, win_base, metadata, py_ver, BINARY_DIST, platform
+ )
+ # Try source distro extensions (.zip, .tgz, etc.)
+ #
+ for ext in EXTENSIONS:
+ if basename.endswith(ext):
+ basename = basename[:-len(ext)]
+ return interpret_distro_name(location, basename, metadata)
+ return [] # no extension matched
+
+
+def distros_for_filename(filename, metadata=None):
+ """Yield possible egg or source distribution objects based on a filename"""
+ return distros_for_location(
+ normalize_path(filename), os.path.basename(filename), metadata
+ )
+
+
+def interpret_distro_name(
+ location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
+ platform=None
):
- """Generate alternative interpretations of a source distro name
-
- Note: if `location` is a filesystem filename, you should call
- ``pkg_resources.normalize_path()`` on it before passing it to this
- routine!
- """
- # Generate alternative interpretations of a source distro name
- # Because some packages are ambiguous as to name/versions split
- # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
- # So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
- # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
- # the spurious interpretations should be ignored, because in the event
- # there's also an "adns" package, the spurious "python-1.1.0" version will
- # compare lower than any numeric version number, and is therefore unlikely
- # to match a request for it. It's still a potential problem, though, and
- # in the long run PyPI and the distutils should go for "safe" names and
- # versions in distribution archive names (sdist and bdist).
-
- parts = basename.split('-')
+ """Generate alternative interpretations of a source distro name
+
+ Note: if `location` is a filesystem filename, you should call
+ ``pkg_resources.normalize_path()`` on it before passing it to this
+ routine!
+ """
+ # Generate alternative interpretations of a source distro name
+ # Because some packages are ambiguous as to name/versions split
+ # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
+ # So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
+ # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
+ # the spurious interpretations should be ignored, because in the event
+ # there's also an "adns" package, the spurious "python-1.1.0" version will
+ # compare lower than any numeric version number, and is therefore unlikely
+ # to match a request for it. It's still a potential problem, though, and
+ # in the long run PyPI and the distutils should go for "safe" names and
+ # versions in distribution archive names (sdist and bdist).
+
+ parts = basename.split('-')
if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
- # it is a bdist_dumb, not an sdist -- bail out
- return
-
+ # it is a bdist_dumb, not an sdist -- bail out
+ return
+
for p in range(1, len(parts) + 1):
- yield Distribution(
- location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
+ yield Distribution(
+ location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence=precedence,
platform=platform
- )
-
-
-# From Python 2.7 docs
-def unique_everseen(iterable, key=None):
- "List unique elements, preserving order. Remember all elements ever seen."
- # unique_everseen('AAAABBBCCDAABBB') --> A B C D
- # unique_everseen('ABBCcAD', str.lower) --> A B C D
- seen = set()
- seen_add = seen.add
- if key is None:
- for element in six.moves.filterfalse(seen.__contains__, iterable):
- seen_add(element)
- yield element
- else:
- for element in iterable:
- k = key(element)
- if k not in seen:
- seen_add(k)
- yield element
-
-
-def unique_values(func):
- """
- Wrap a function returning an iterable such that the resulting iterable
- only ever yields unique items.
- """
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- return unique_everseen(func(*args, **kwargs))
-
- return wrapper
-
+ )
+
+
+# From Python 2.7 docs
+def unique_everseen(iterable, key=None):
+ "List unique elements, preserving order. Remember all elements ever seen."
+ # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+ # unique_everseen('ABBCcAD', str.lower) --> A B C D
+ seen = set()
+ seen_add = seen.add
+ if key is None:
+ for element in six.moves.filterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
+
+
+def unique_values(func):
+ """
+ Wrap a function returning an iterable such that the resulting iterable
+ only ever yields unique items.
+ """
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ return unique_everseen(func(*args, **kwargs))
+
+ return wrapper
+
REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
-# this line is here to fix emacs' cruddy broken syntax highlighting
-
-
-@unique_values
-def find_external_links(url, page):
- """Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
-
- for match in REL.finditer(page):
- tag, rel = match.groups()
- rels = set(map(str.strip, rel.lower().split(',')))
- if 'homepage' in rels or 'download' in rels:
- for match in HREF.finditer(tag):
- yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
-
- for tag in ("<th>Home Page", "<th>Download URL"):
- pos = page.find(tag)
+# this line is here to fix emacs' cruddy broken syntax highlighting
+
+
+@unique_values
+def find_external_links(url, page):
+ """Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
+
+ for match in REL.finditer(page):
+ tag, rel = match.groups()
+ rels = set(map(str.strip, rel.lower().split(',')))
+ if 'homepage' in rels or 'download' in rels:
+ for match in HREF.finditer(tag):
+ yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
+
+ for tag in ("<th>Home Page", "<th>Download URL"):
+ pos = page.find(tag)
if pos != -1:
match = HREF.search(page, pos)
- if match:
- yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
-
-
+ if match:
+ yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
+
+
class ContentChecker:
- """
- A null content checker that defines the interface for checking content
- """
-
- def feed(self, block):
- """
- Feed a block of data to the hash.
- """
- return
-
- def is_valid(self):
- """
- Check the hash. Return False if validation fails.
- """
- return True
-
- def report(self, reporter, template):
- """
- Call reporter with information about the checker (hash name)
- substituted into the template.
- """
- return
-
-
-class HashChecker(ContentChecker):
- pattern = re.compile(
- r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
- r'(?P<expected>[a-f0-9]+)'
- )
-
- def __init__(self, hash_name, expected):
- self.hash_name = hash_name
- self.hash = hashlib.new(hash_name)
- self.expected = expected
-
- @classmethod
- def from_url(cls, url):
- "Construct a (possibly null) ContentChecker from a URL"
- fragment = urllib.parse.urlparse(url)[-1]
- if not fragment:
- return ContentChecker()
- match = cls.pattern.search(fragment)
- if not match:
- return ContentChecker()
- return cls(**match.groupdict())
-
- def feed(self, block):
- self.hash.update(block)
-
- def is_valid(self):
- return self.hash.hexdigest() == self.expected
-
- def report(self, reporter, template):
- msg = template % self.hash_name
- return reporter(msg)
-
-
-class PackageIndex(Environment):
- """A distribution index that scans web pages for download URLs"""
-
- def __init__(
+ """
+ A null content checker that defines the interface for checking content
+ """
+
+ def feed(self, block):
+ """
+ Feed a block of data to the hash.
+ """
+ return
+
+ def is_valid(self):
+ """
+ Check the hash. Return False if validation fails.
+ """
+ return True
+
+ def report(self, reporter, template):
+ """
+ Call reporter with information about the checker (hash name)
+ substituted into the template.
+ """
+ return
+
+
+class HashChecker(ContentChecker):
+ pattern = re.compile(
+ r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
+ r'(?P<expected>[a-f0-9]+)'
+ )
+
+ def __init__(self, hash_name, expected):
+ self.hash_name = hash_name
+ self.hash = hashlib.new(hash_name)
+ self.expected = expected
+
+ @classmethod
+ def from_url(cls, url):
+ "Construct a (possibly null) ContentChecker from a URL"
+ fragment = urllib.parse.urlparse(url)[-1]
+ if not fragment:
+ return ContentChecker()
+ match = cls.pattern.search(fragment)
+ if not match:
+ return ContentChecker()
+ return cls(**match.groupdict())
+
+ def feed(self, block):
+ self.hash.update(block)
+
+ def is_valid(self):
+ return self.hash.hexdigest() == self.expected
+
+ def report(self, reporter, template):
+ msg = template % self.hash_name
+ return reporter(msg)
+
+
+class PackageIndex(Environment):
+ """A distribution index that scans web pages for download URLs"""
+
+ def __init__(
self, index_url="https://pypi.org/simple/", hosts=('*',),
- ca_bundle=None, verify_ssl=True, *args, **kw
+ ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self, *args, **kw)
self.index_url = index_url + "/" [:not index_url.endswith('/')]
- self.scanned_urls = {}
- self.fetched_urls = {}
- self.package_pages = {}
+ self.scanned_urls = {}
+ self.fetched_urls = {}
+ self.package_pages = {}
self.allows = re.compile('|'.join(map(translate, hosts))).match
- self.to_scan = []
+ self.to_scan = []
use_ssl = (
verify_ssl
and ssl_support.is_available
and (ca_bundle or ssl_support.find_ca_bundle())
)
if use_ssl:
- self.opener = ssl_support.opener_for(ca_bundle)
+ self.opener = ssl_support.opener_for(ca_bundle)
else:
self.opener = urllib.request.urlopen
-
- def process_url(self, url, retrieve=False):
- """Evaluate a URL as a possible download, and maybe retrieve it"""
- if url in self.scanned_urls and not retrieve:
- return
- self.scanned_urls[url] = True
- if not URL_SCHEME(url):
- self.process_filename(url)
- return
- else:
- dists = list(distros_for_url(url))
- if dists:
- if not self.url_ok(url):
- return
- self.debug("Found link: %s", url)
-
- if dists or not retrieve or url in self.fetched_urls:
- list(map(self.add, dists))
- return # don't need the actual page
-
- if not self.url_ok(url):
- self.fetched_urls[url] = True
- return
-
- self.info("Reading %s", url)
+
+ def process_url(self, url, retrieve=False):
+ """Evaluate a URL as a possible download, and maybe retrieve it"""
+ if url in self.scanned_urls and not retrieve:
+ return
+ self.scanned_urls[url] = True
+ if not URL_SCHEME(url):
+ self.process_filename(url)
+ return
+ else:
+ dists = list(distros_for_url(url))
+ if dists:
+ if not self.url_ok(url):
+ return
+ self.debug("Found link: %s", url)
+
+ if dists or not retrieve or url in self.fetched_urls:
+ list(map(self.add, dists))
+ return # don't need the actual page
+
+ if not self.url_ok(url):
+ self.fetched_urls[url] = True
+ return
+
+ self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
tmpl = "Download error on %s: %%s -- Some packages may not be found!"
f = self.open_url(url, tmpl % url)
if f is None:
return
- self.fetched_urls[f.url] = True
- if 'html' not in f.headers.get('content-type', '').lower():
+ self.fetched_urls[f.url] = True
+ if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
- return
-
+ return
+
base = f.url # handle redirects
- page = f.read()
+ page = f.read()
if not isinstance(page, str):
# In Python 3 and got bytes but want str.
- if isinstance(f, urllib.error.HTTPError):
- # Errors have no charset, assume latin1:
- charset = 'latin-1'
- else:
- charset = f.headers.get_param('charset') or 'latin-1'
- page = page.decode(charset, "ignore")
- f.close()
- for match in HREF.finditer(page):
- link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
- self.process_url(link)
+ if isinstance(f, urllib.error.HTTPError):
+ # Errors have no charset, assume latin1:
+ charset = 'latin-1'
+ else:
+ charset = f.headers.get_param('charset') or 'latin-1'
+ page = page.decode(charset, "ignore")
+ f.close()
+ for match in HREF.finditer(page):
+ link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
+ self.process_url(link)
if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
- page = self.process_index(url, page)
-
- def process_filename(self, fn, nested=False):
- # process filenames or directories
- if not os.path.exists(fn):
- self.warn("Not found: %s", fn)
- return
-
- if os.path.isdir(fn) and not nested:
- path = os.path.realpath(fn)
- for item in os.listdir(path):
+ page = self.process_index(url, page)
+
+ def process_filename(self, fn, nested=False):
+ # process filenames or directories
+ if not os.path.exists(fn):
+ self.warn("Not found: %s", fn)
+ return
+
+ if os.path.isdir(fn) and not nested:
+ path = os.path.realpath(fn)
+ for item in os.listdir(path):
self.process_filename(os.path.join(path, item), True)
-
- dists = distros_for_filename(fn)
- if dists:
- self.debug("Found: %s", fn)
- list(map(self.add, dists))
-
- def url_ok(self, url, fatal=False):
- s = URL_SCHEME(url)
+
+ dists = distros_for_filename(fn)
+ if dists:
+ self.debug("Found: %s", fn)
+ list(map(self.add, dists))
+
+ def url_ok(self, url, fatal=False):
+ s = URL_SCHEME(url)
is_file = s and s.group(1).lower() == 'file'
if is_file or self.allows(urllib.parse.urlparse(url)[1]):
- return True
+ return True
msg = (
"\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/2hrImnY for details).\n")
- if fatal:
- raise DistutilsError(msg % url)
- else:
- self.warn(msg, url)
-
- def scan_egg_links(self, search_path):
- dirs = filter(os.path.isdir, search_path)
- egg_links = (
- (path, entry)
- for path in dirs
- for entry in os.listdir(path)
- if entry.endswith('.egg-link')
- )
- list(itertools.starmap(self.scan_egg_link, egg_links))
-
- def scan_egg_link(self, path, entry):
- with open(os.path.join(path, entry)) as raw_lines:
- # filter non-empty lines
- lines = list(filter(None, map(str.strip, raw_lines)))
-
- if len(lines) != 2:
- # format is not recognized; punt
- return
-
- egg_path, setup_path = lines
-
- for dist in find_distributions(os.path.join(path, egg_path)):
- dist.location = os.path.join(path, *lines)
- dist.precedence = SOURCE_DIST
- self.add(dist)
-
+ if fatal:
+ raise DistutilsError(msg % url)
+ else:
+ self.warn(msg, url)
+
+ def scan_egg_links(self, search_path):
+ dirs = filter(os.path.isdir, search_path)
+ egg_links = (
+ (path, entry)
+ for path in dirs
+ for entry in os.listdir(path)
+ if entry.endswith('.egg-link')
+ )
+ list(itertools.starmap(self.scan_egg_link, egg_links))
+
+ def scan_egg_link(self, path, entry):
+ with open(os.path.join(path, entry)) as raw_lines:
+ # filter non-empty lines
+ lines = list(filter(None, map(str.strip, raw_lines)))
+
+ if len(lines) != 2:
+ # format is not recognized; punt
+ return
+
+ egg_path, setup_path = lines
+
+ for dist in find_distributions(os.path.join(path, egg_path)):
+ dist.location = os.path.join(path, *lines)
+ dist.precedence = SOURCE_DIST
+ self.add(dist)
+
def process_index(self, url, page):
- """Process the contents of a PyPI page"""
-
- def scan(link):
- # Process a URL to see if it's for a package page
- if link.startswith(self.index_url):
- parts = list(map(
- urllib.parse.unquote, link[len(self.index_url):].split('/')
- ))
+ """Process the contents of a PyPI page"""
+
+ def scan(link):
+ # Process a URL to see if it's for a package page
+ if link.startswith(self.index_url):
+ parts = list(map(
+ urllib.parse.unquote, link[len(self.index_url):].split('/')
+ ))
if len(parts) == 2 and '#' not in parts[1]:
- # it's a package page, sanitize and index it
- pkg = safe_name(parts[0])
- ver = safe_version(parts[1])
+ # it's a package page, sanitize and index it
+ pkg = safe_name(parts[0])
+ ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(), {})[link] = True
- return to_filename(pkg), to_filename(ver)
- return None, None
-
- # process an index page into the package-page index
- for match in HREF.finditer(page):
- try:
- scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
- except ValueError:
- pass
-
+ return to_filename(pkg), to_filename(ver)
+ return None, None
+
+ # process an index page into the package-page index
+ for match in HREF.finditer(page):
+ try:
+ scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
+ except ValueError:
+ pass
+
pkg, ver = scan(url) # ensure this page is in the page index
- if pkg:
- # process individual package page
- for new_url in find_external_links(url, page):
- # Process the found URL
- base, frag = egg_info_for_url(new_url)
- if base.endswith('.py') and not frag:
- if ver:
+ if pkg:
+ # process individual package page
+ for new_url in find_external_links(url, page):
+ # Process the found URL
+ base, frag = egg_info_for_url(new_url)
+ if base.endswith('.py') and not frag:
+ if ver:
new_url += '#egg=%s-%s' % (pkg, ver)
- else:
- self.need_version_info(url)
- self.scan_url(new_url)
-
- return PYPI_MD5.sub(
+ else:
+ self.need_version_info(url)
+ self.scan_url(new_url)
+
+ return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
- )
- else:
+ )
+ else:
return "" # no sense double-scanning non-package pages
-
- def need_version_info(self, url):
- self.scan_all(
- "Page at %s links to .py file(s) without version info; an index "
- "scan is required.", url
- )
-
- def scan_all(self, msg=None, *args):
- if self.index_url not in self.fetched_urls:
+
+ def need_version_info(self, url):
+ self.scan_all(
+ "Page at %s links to .py file(s) without version info; an index "
+ "scan is required.", url
+ )
+
+ def scan_all(self, msg=None, *args):
+ if self.index_url not in self.fetched_urls:
if msg:
self.warn(msg, *args)
- self.info(
- "Scanning index of all packages (this may take a while)"
- )
- self.scan_url(self.index_url)
-
- def find_packages(self, requirement):
+ self.info(
+ "Scanning index of all packages (this may take a while)"
+ )
+ self.scan_url(self.index_url)
+
+ def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name + '/')
-
- if not self.package_pages.get(requirement.key):
- # Fall back to safe version of the name
+
+ if not self.package_pages.get(requirement.key):
+ # Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name + '/')
-
- if not self.package_pages.get(requirement.key):
- # We couldn't find the target package, so search the index page too
- self.not_found_in_index(requirement)
-
+
+ if not self.package_pages.get(requirement.key):
+ # We couldn't find the target package, so search the index page too
+ self.not_found_in_index(requirement)
+
for url in list(self.package_pages.get(requirement.key, ())):
- # scan each page that might be related to the desired package
- self.scan_url(url)
-
- def obtain(self, requirement, installer=None):
- self.prescan()
- self.find_packages(requirement)
- for dist in self[requirement.key]:
- if dist in requirement:
- return dist
- self.debug("%s does not match %s", requirement, dist)
+ # scan each page that might be related to the desired package
+ self.scan_url(url)
+
+ def obtain(self, requirement, installer=None):
+ self.prescan()
+ self.find_packages(requirement)
+ for dist in self[requirement.key]:
+ if dist in requirement:
+ return dist
+ self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement, installer)
-
- def check_hash(self, checker, filename, tfp):
- """
- checker is a ContentChecker
- """
+
+ def check_hash(self, checker, filename, tfp):
+ """
+ checker is a ContentChecker
+ """
checker.report(
self.debug,
- "Validating %%s checksum for %s" % filename)
- if not checker.is_valid():
- tfp.close()
- os.unlink(filename)
- raise DistutilsError(
- "%s validation failed for %s; "
+ "Validating %%s checksum for %s" % filename)
+ if not checker.is_valid():
+ tfp.close()
+ os.unlink(filename)
+ raise DistutilsError(
+ "%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
- )
-
- def add_find_links(self, urls):
- """Add `urls` to the list that will be prescanned for searches"""
- for url in urls:
- if (
+ )
+
+ def add_find_links(self, urls):
+ """Add `urls` to the list that will be prescanned for searches"""
+ for url in urls:
+ if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
- or url.startswith('file:')
+ or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
- ):
- # then go ahead and process it now
- self.scan_url(url)
- else:
- # otherwise, defer retrieval till later
- self.to_scan.append(url)
-
- def prescan(self):
- """Scan urls scheduled for prescanning (e.g. --find-links)"""
- if self.to_scan:
- list(map(self.scan_url, self.to_scan))
+ ):
+ # then go ahead and process it now
+ self.scan_url(url)
+ else:
+ # otherwise, defer retrieval till later
+ self.to_scan.append(url)
+
+ def prescan(self):
+ """Scan urls scheduled for prescanning (e.g. --find-links)"""
+ if self.to_scan:
+ list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
-
- def not_found_in_index(self, requirement):
+
+ def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
- meth, msg = self.info, "Couldn't retrieve index page for %r"
+ meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (
self.warn,
- "Couldn't find index page for %r (maybe misspelled?)")
- meth(msg, requirement.unsafe_name)
- self.scan_all()
-
- def download(self, spec, tmpdir):
- """Locate and/or download `spec` to `tmpdir`, returning a local path
-
- `spec` may be a ``Requirement`` object, or a string containing a URL,
- an existing local filename, or a project/version requirement spec
- (i.e. the string form of a ``Requirement`` object). If it is the URL
- of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
- that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
- automatically created alongside the downloaded file.
-
- If `spec` is a ``Requirement`` object or a string containing a
- project/version requirement spec, this method returns the location of
- a matching distribution (possibly after downloading it to `tmpdir`).
- If `spec` is a locally existing file or directory name, it is simply
- returned unchanged. If `spec` is a URL, it is downloaded to a subpath
- of `tmpdir`, and the local filename is returned. Various errors may be
- raised if a problem occurs during downloading.
- """
+ "Couldn't find index page for %r (maybe misspelled?)")
+ meth(msg, requirement.unsafe_name)
+ self.scan_all()
+
+ def download(self, spec, tmpdir):
+ """Locate and/or download `spec` to `tmpdir`, returning a local path
+
+ `spec` may be a ``Requirement`` object, or a string containing a URL,
+ an existing local filename, or a project/version requirement spec
+ (i.e. the string form of a ``Requirement`` object). If it is the URL
+ of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
+ that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
+ automatically created alongside the downloaded file.
+
+ If `spec` is a ``Requirement`` object or a string containing a
+ project/version requirement spec, this method returns the location of
+ a matching distribution (possibly after downloading it to `tmpdir`).
+ If `spec` is a locally existing file or directory name, it is simply
+ returned unchanged. If `spec` is a URL, it is downloaded to a subpath
+ of `tmpdir`, and the local filename is returned. Various errors may be
+ raised if a problem occurs during downloading.
+ """
if not isinstance(spec, Requirement):
- scheme = URL_SCHEME(spec)
- if scheme:
- # It's a url, download it to tmpdir
- found = self._download_url(scheme.group(1), spec, tmpdir)
- base, fragment = egg_info_for_url(spec)
- if base.endswith('.py'):
+ scheme = URL_SCHEME(spec)
+ if scheme:
+ # It's a url, download it to tmpdir
+ found = self._download_url(scheme.group(1), spec, tmpdir)
+ base, fragment = egg_info_for_url(spec)
+ if base.endswith('.py'):
found = self.gen_setup(found, fragment, tmpdir)
- return found
- elif os.path.exists(spec):
- # Existing file or directory, just return it
- return spec
- else:
+ return found
+ elif os.path.exists(spec):
+ # Existing file or directory, just return it
+ return spec
+ else:
spec = parse_requirement_arg(spec)
return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
-
- def fetch_distribution(
- self, requirement, tmpdir, force_scan=False, source=False,
+
+ def fetch_distribution(
+ self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None):
- """Obtain a distribution suitable for fulfilling `requirement`
-
- `requirement` must be a ``pkg_resources.Requirement`` instance.
- If necessary, or if the `force_scan` flag is set, the requirement is
- searched for in the (online) package index as well as the locally
- installed packages. If a distribution matching `requirement` is found,
- the returned distribution's ``location`` is the value you would have
- gotten from calling the ``download()`` method with the matching
- distribution's URL or filename. If no matching distribution is found,
- ``None`` is returned.
-
- If the `source` flag is set, only source distributions and source
- checkout links will be considered. Unless the `develop_ok` flag is
- set, development and system eggs (i.e., those using the ``.egg-info``
- format) will be ignored.
- """
- # process a Requirement
- self.info("Searching for %s", requirement)
- skipped = {}
- dist = None
-
- def find(req, env=None):
- if env is None:
- env = self
- # Find a matching distribution; may be called more than once
-
- for dist in env[req.key]:
-
+ """Obtain a distribution suitable for fulfilling `requirement`
+
+ `requirement` must be a ``pkg_resources.Requirement`` instance.
+ If necessary, or if the `force_scan` flag is set, the requirement is
+ searched for in the (online) package index as well as the locally
+ installed packages. If a distribution matching `requirement` is found,
+ the returned distribution's ``location`` is the value you would have
+ gotten from calling the ``download()`` method with the matching
+ distribution's URL or filename. If no matching distribution is found,
+ ``None`` is returned.
+
+ If the `source` flag is set, only source distributions and source
+ checkout links will be considered. Unless the `develop_ok` flag is
+ set, development and system eggs (i.e., those using the ``.egg-info``
+ format) will be ignored.
+ """
+ # process a Requirement
+ self.info("Searching for %s", requirement)
+ skipped = {}
+ dist = None
+
+ def find(req, env=None):
+ if env is None:
+ env = self
+ # Find a matching distribution; may be called more than once
+
+ for dist in env[req.key]:
+
if dist.precedence == DEVELOP_DIST and not develop_ok:
- if dist not in skipped:
+ if dist not in skipped:
self.warn(
"Skipping development or system egg: %s", dist,
)
- skipped[dist] = 1
- continue
-
+ skipped[dist] = 1
+ continue
+
test = (
dist in req
and (dist.precedence <= SOURCE_DIST or not source)
@@ -636,311 +636,311 @@ class PackageIndex(Environment):
dist.download_location = loc
if os.path.exists(dist.download_location):
return dist
-
- if force_scan:
- self.prescan()
- self.find_packages(requirement)
- dist = find(requirement)
-
+
+ if force_scan:
+ self.prescan()
+ self.find_packages(requirement)
+ dist = find(requirement)
+
if not dist and local_index is not None:
dist = find(requirement, local_index)
-
- if dist is None:
- if self.to_scan is not None:
- self.prescan()
- dist = find(requirement)
-
- if dist is None and not force_scan:
- self.find_packages(requirement)
- dist = find(requirement)
-
- if dist is None:
- self.warn(
+
+ if dist is None:
+ if self.to_scan is not None:
+ self.prescan()
+ dist = find(requirement)
+
+ if dist is None and not force_scan:
+ self.find_packages(requirement)
+ dist = find(requirement)
+
+ if dist is None:
+ self.warn(
"No local packages or working download links found for %s%s",
- (source and "a source distribution of " or ""),
- requirement,
- )
- else:
- self.info("Best match: %s", dist)
+ (source and "a source distribution of " or ""),
+ requirement,
+ )
+ else:
+ self.info("Best match: %s", dist)
return dist.clone(location=dist.download_location)
-
- def fetch(self, requirement, tmpdir, force_scan=False, source=False):
- """Obtain a file suitable for fulfilling `requirement`
-
- DEPRECATED; use the ``fetch_distribution()`` method now instead. For
- backward compatibility, this routine is identical but returns the
- ``location`` of the downloaded distribution instead of a distribution
- object.
- """
+
+ def fetch(self, requirement, tmpdir, force_scan=False, source=False):
+ """Obtain a file suitable for fulfilling `requirement`
+
+ DEPRECATED; use the ``fetch_distribution()`` method now instead. For
+ backward compatibility, this routine is identical but returns the
+ ``location`` of the downloaded distribution instead of a distribution
+ object.
+ """
dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
- if dist is not None:
- return dist.location
- return None
-
- def gen_setup(self, filename, fragment, tmpdir):
- match = EGG_FRAGMENT.match(fragment)
- dists = match and [
- d for d in
- interpret_distro_name(filename, match.group(1), None) if d.version
- ] or []
-
+ if dist is not None:
+ return dist.location
+ return None
+
+ def gen_setup(self, filename, fragment, tmpdir):
+ match = EGG_FRAGMENT.match(fragment)
+ dists = match and [
+ d for d in
+ interpret_distro_name(filename, match.group(1), None) if d.version
+ ] or []
+
if len(dists) == 1: # unambiguous ``#egg`` fragment
- basename = os.path.basename(filename)
-
- # Make sure the file has been downloaded to the temp dir.
- if os.path.dirname(filename) != tmpdir:
- dst = os.path.join(tmpdir, basename)
- from setuptools.command.easy_install import samefile
- if not samefile(filename, dst):
- shutil.copy2(filename, dst)
+ basename = os.path.basename(filename)
+
+ # Make sure the file has been downloaded to the temp dir.
+ if os.path.dirname(filename) != tmpdir:
+ dst = os.path.join(tmpdir, basename)
+ from setuptools.command.easy_install import samefile
+ if not samefile(filename, dst):
+ shutil.copy2(filename, dst)
filename = dst
-
- with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
- file.write(
- "from setuptools import setup\n"
- "setup(name=%r, version=%r, py_modules=[%r])\n"
- % (
- dists[0].project_name, dists[0].version,
- os.path.splitext(basename)[0]
- )
- )
- return filename
-
- elif match:
- raise DistutilsError(
- "Can't unambiguously interpret project/version identifier %r; "
- "any dashes in the name or version should be escaped using "
+
+ with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
+ file.write(
+ "from setuptools import setup\n"
+ "setup(name=%r, version=%r, py_modules=[%r])\n"
+ % (
+ dists[0].project_name, dists[0].version,
+ os.path.splitext(basename)[0]
+ )
+ )
+ return filename
+
+ elif match:
+ raise DistutilsError(
+ "Can't unambiguously interpret project/version identifier %r; "
+ "any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment, dists)
- )
- else:
- raise DistutilsError(
- "Can't process plain .py files without an '#egg=name-version'"
- " suffix to enable automatic setup script generation."
- )
-
- dl_blocksize = 8192
-
- def _download_to(self, url, filename):
- self.info("Downloading %s", url)
- # Download the file
+ )
+ else:
+ raise DistutilsError(
+ "Can't process plain .py files without an '#egg=name-version'"
+ " suffix to enable automatic setup script generation."
+ )
+
+ dl_blocksize = 8192
+
+ def _download_to(self, url, filename):
+ self.info("Downloading %s", url)
+ # Download the file
fp = None
- try:
- checker = HashChecker.from_url(url)
+ try:
+ checker = HashChecker.from_url(url)
fp = self.open_url(url)
- if isinstance(fp, urllib.error.HTTPError):
- raise DistutilsError(
+ if isinstance(fp, urllib.error.HTTPError):
+ raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code, fp.msg)
- )
- headers = fp.info()
- blocknum = 0
- bs = self.dl_blocksize
- size = -1
- if "content-length" in headers:
- # Some servers return multiple Content-Length headers :(
- sizes = get_all_headers(headers, 'Content-Length')
- size = max(map(int, sizes))
- self.reporthook(url, filename, blocknum, bs, size)
+ )
+ headers = fp.info()
+ blocknum = 0
+ bs = self.dl_blocksize
+ size = -1
+ if "content-length" in headers:
+ # Some servers return multiple Content-Length headers :(
+ sizes = get_all_headers(headers, 'Content-Length')
+ size = max(map(int, sizes))
+ self.reporthook(url, filename, blocknum, bs, size)
with open(filename, 'wb') as tfp:
- while True:
- block = fp.read(bs)
- if block:
- checker.feed(block)
- tfp.write(block)
- blocknum += 1
- self.reporthook(url, filename, blocknum, bs, size)
- else:
- break
- self.check_hash(checker, filename, tfp)
- return headers
- finally:
+ while True:
+ block = fp.read(bs)
+ if block:
+ checker.feed(block)
+ tfp.write(block)
+ blocknum += 1
+ self.reporthook(url, filename, blocknum, bs, size)
+ else:
+ break
+ self.check_hash(checker, filename, tfp)
+ return headers
+ finally:
if fp:
fp.close()
-
- def reporthook(self, url, filename, blocknum, blksize, size):
+
+ def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
-
- def open_url(self, url, warning=None):
- if url.startswith('file:'):
- return local_open(url)
- try:
- return open_with_auth(url, self.opener)
- except (ValueError, http_client.InvalidURL) as v:
- msg = ' '.join([str(arg) for arg in v.args])
- if warning:
- self.warn(warning, msg)
- else:
- raise DistutilsError('%s %s' % (url, msg))
- except urllib.error.HTTPError as v:
- return v
- except urllib.error.URLError as v:
- if warning:
- self.warn(warning, v.reason)
- else:
- raise DistutilsError("Download error for %s: %s"
- % (url, v.reason))
- except http_client.BadStatusLine as v:
- if warning:
- self.warn(warning, v.line)
- else:
- raise DistutilsError(
- '%s returned a bad status line. The server might be '
- 'down, %s' %
- (url, v.line)
- )
+
+ def open_url(self, url, warning=None):
+ if url.startswith('file:'):
+ return local_open(url)
+ try:
+ return open_with_auth(url, self.opener)
+ except (ValueError, http_client.InvalidURL) as v:
+ msg = ' '.join([str(arg) for arg in v.args])
+ if warning:
+ self.warn(warning, msg)
+ else:
+ raise DistutilsError('%s %s' % (url, msg))
+ except urllib.error.HTTPError as v:
+ return v
+ except urllib.error.URLError as v:
+ if warning:
+ self.warn(warning, v.reason)
+ else:
+ raise DistutilsError("Download error for %s: %s"
+ % (url, v.reason))
+ except http_client.BadStatusLine as v:
+ if warning:
+ self.warn(warning, v.line)
+ else:
+ raise DistutilsError(
+ '%s returned a bad status line. The server might be '
+ 'down, %s' %
+ (url, v.line)
+ )
except (http_client.HTTPException, socket.error) as v:
- if warning:
- self.warn(warning, v)
- else:
- raise DistutilsError("Download error for %s: %s"
- % (url, v))
-
- def _download_url(self, scheme, url, tmpdir):
- # Determine download filename
- #
- name, fragment = egg_info_for_url(url)
- if name:
- while '..' in name:
+ if warning:
+ self.warn(warning, v)
+ else:
+ raise DistutilsError("Download error for %s: %s"
+ % (url, v))
+
+ def _download_url(self, scheme, url, tmpdir):
+ # Determine download filename
+ #
+ name, fragment = egg_info_for_url(url)
+ if name:
+ while '..' in name:
name = name.replace('..', '.').replace('\\', '_')
- else:
+ else:
name = "__downloaded__" # default if URL has no path contents
-
- if name.endswith('.egg.zip'):
+
+ if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
-
+
filename = os.path.join(tmpdir, name)
-
- # Download the file
- #
+
+ # Download the file
+ #
if scheme == 'svn' or scheme.startswith('svn+'):
- return self._download_svn(url, filename)
+ return self._download_svn(url, filename)
elif scheme == 'git' or scheme.startswith('git+'):
- return self._download_git(url, filename)
- elif scheme.startswith('hg+'):
- return self._download_hg(url, filename)
+ return self._download_git(url, filename)
+ elif scheme.startswith('hg+'):
+ return self._download_hg(url, filename)
elif scheme == 'file':
- return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
- else:
+ return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
+ else:
self.url_ok(url, True) # raises error if not allowed
- return self._attempt_download(url, filename)
-
- def scan_url(self, url):
- self.process_url(url, True)
-
- def _attempt_download(self, url, filename):
- headers = self._download_to(url, filename)
+ return self._attempt_download(url, filename)
+
+ def scan_url(self, url):
+ self.process_url(url, True)
+
+ def _attempt_download(self, url, filename):
+ headers = self._download_to(url, filename)
if 'html' in headers.get('content-type', '').lower():
- return self._download_html(url, headers, filename)
- else:
- return filename
-
- def _download_html(self, url, headers, filename):
- file = open(filename)
- for line in file:
- if line.strip():
- # Check for a subversion index page
- if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
- # it's a subversion index page:
- file.close()
- os.unlink(filename)
- return self._download_svn(url, filename)
+ return self._download_html(url, headers, filename)
+ else:
+ return filename
+
+ def _download_html(self, url, headers, filename):
+ file = open(filename)
+ for line in file:
+ if line.strip():
+ # Check for a subversion index page
+ if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
+ # it's a subversion index page:
+ file.close()
+ os.unlink(filename)
+ return self._download_svn(url, filename)
break # not an index page
- file.close()
- os.unlink(filename)
+ file.close()
+ os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at " + url)
-
- def _download_svn(self, url, filename):
+
+ def _download_svn(self, url, filename):
warnings.warn("SVN download support is deprecated", UserWarning)
url = url.split('#', 1)[0] # remove any fragment for svn's sake
- creds = ''
- if url.lower().startswith('svn:') and '@' in url:
- scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
- if not netloc and path.startswith('//') and '/' in path[2:]:
+ creds = ''
+ if url.lower().startswith('svn:') and '@' in url:
+ scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
+ if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/', 1)
auth, host = _splituser(netloc)
- if auth:
- if ':' in auth:
+ if auth:
+ if ':' in auth:
user, pw = auth.split(':', 1)
- creds = " --username=%s --password=%s" % (user, pw)
- else:
+ creds = " --username=%s --password=%s" % (user, pw)
+ else:
creds = " --username=" + auth
- netloc = host
- parts = scheme, netloc, url, p, q, f
- url = urllib.parse.urlunparse(parts)
- self.info("Doing subversion checkout from %s to %s", url, filename)
- os.system("svn checkout%s -q %s %s" % (creds, url, filename))
- return filename
-
- @staticmethod
- def _vcs_split_rev_from_url(url, pop_prefix=False):
- scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
-
- scheme = scheme.split('+', 1)[-1]
-
- # Some fragment identification fails
+ netloc = host
+ parts = scheme, netloc, url, p, q, f
+ url = urllib.parse.urlunparse(parts)
+ self.info("Doing subversion checkout from %s to %s", url, filename)
+ os.system("svn checkout%s -q %s %s" % (creds, url, filename))
+ return filename
+
+ @staticmethod
+ def _vcs_split_rev_from_url(url, pop_prefix=False):
+ scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
+
+ scheme = scheme.split('+', 1)[-1]
+
+ # Some fragment identification fails
path = path.split('#', 1)[0]
-
- rev = None
- if '@' in path:
- path, rev = path.rsplit('@', 1)
-
- # Also, discard fragment
- url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
-
- return url, rev
-
- def _download_git(self, url, filename):
+
+ rev = None
+ if '@' in path:
+ path, rev = path.rsplit('@', 1)
+
+ # Also, discard fragment
+ url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
+
+ return url, rev
+
+ def _download_git(self, url, filename):
filename = filename.split('#', 1)[0]
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
-
- self.info("Doing git clone from %s to %s", url, filename)
- os.system("git clone --quiet %s %s" % (url, filename))
-
- if rev is not None:
- self.info("Checking out %s", rev)
+ url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
+
+ self.info("Doing git clone from %s to %s", url, filename)
+ os.system("git clone --quiet %s %s" % (url, filename))
+
+ if rev is not None:
+ self.info("Checking out %s", rev)
os.system("git -C %s checkout --quiet %s" % (
- filename,
- rev,
- ))
-
- return filename
-
- def _download_hg(self, url, filename):
+ filename,
+ rev,
+ ))
+
+ return filename
+
+ def _download_hg(self, url, filename):
filename = filename.split('#', 1)[0]
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
-
- self.info("Doing hg clone from %s to %s", url, filename)
- os.system("hg clone --quiet %s %s" % (url, filename))
-
- if rev is not None:
- self.info("Updating to %s", rev)
+ url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
+
+ self.info("Doing hg clone from %s to %s", url, filename)
+ os.system("hg clone --quiet %s %s" % (url, filename))
+
+ if rev is not None:
+ self.info("Updating to %s", rev)
os.system("hg --cwd %s up -C -r %s -q" % (
- filename,
- rev,
- ))
-
- return filename
-
- def debug(self, msg, *args):
- log.debug(msg, *args)
-
- def info(self, msg, *args):
- log.info(msg, *args)
-
- def warn(self, msg, *args):
- log.warn(msg, *args)
-
-
-# This pattern matches a character entity reference (a decimal numeric
-# references, a hexadecimal numeric reference, or a named reference).
-entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
-
-
-def decode_entity(match):
+ filename,
+ rev,
+ ))
+
+ return filename
+
+ def debug(self, msg, *args):
+ log.debug(msg, *args)
+
+ def info(self, msg, *args):
+ log.info(msg, *args)
+
+ def warn(self, msg, *args):
+ log.warn(msg, *args)
+
+
+# This pattern matches a character entity reference (a decimal numeric
+# references, a hexadecimal numeric reference, or a named reference).
+entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
+
+
+def decode_entity(match):
what = match.group(0)
return unescape(what)
-
-def htmldecode(text):
+
+def htmldecode(text):
"""
Decode HTML entities in the given text.
@@ -949,146 +949,146 @@ def htmldecode(text):
... '?tokena=A&amp;tokenb=B">package_name-0.1.2.tar.gz')
'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz'
"""
- return entity_sub(decode_entity, text)
-
-
-def socket_timeout(timeout=15):
- def _socket_timeout(func):
- def _socket_timeout(*args, **kwargs):
- old_timeout = socket.getdefaulttimeout()
- socket.setdefaulttimeout(timeout)
- try:
- return func(*args, **kwargs)
- finally:
- socket.setdefaulttimeout(old_timeout)
-
- return _socket_timeout
-
- return _socket_timeout
-
-
-def _encode_auth(auth):
- """
- A function compatible with Python 2.3-3.3 that will encode
- auth from a URL suitable for an HTTP header.
- >>> str(_encode_auth('username%3Apassword'))
- 'dXNlcm5hbWU6cGFzc3dvcmQ='
-
- Long auth strings should not cause a newline to be inserted.
- >>> long_auth = 'username:' + 'password'*10
- >>> chr(10) in str(_encode_auth(long_auth))
- False
- """
- auth_s = urllib.parse.unquote(auth)
- # convert to bytes
- auth_bytes = auth_s.encode()
+ return entity_sub(decode_entity, text)
+
+
+def socket_timeout(timeout=15):
+ def _socket_timeout(func):
+ def _socket_timeout(*args, **kwargs):
+ old_timeout = socket.getdefaulttimeout()
+ socket.setdefaulttimeout(timeout)
+ try:
+ return func(*args, **kwargs)
+ finally:
+ socket.setdefaulttimeout(old_timeout)
+
+ return _socket_timeout
+
+ return _socket_timeout
+
+
+def _encode_auth(auth):
+ """
+ A function compatible with Python 2.3-3.3 that will encode
+ auth from a URL suitable for an HTTP header.
+ >>> str(_encode_auth('username%3Apassword'))
+ 'dXNlcm5hbWU6cGFzc3dvcmQ='
+
+ Long auth strings should not cause a newline to be inserted.
+ >>> long_auth = 'username:' + 'password'*10
+ >>> chr(10) in str(_encode_auth(long_auth))
+ False
+ """
+ auth_s = urllib.parse.unquote(auth)
+ # convert to bytes
+ auth_bytes = auth_s.encode()
encoded_bytes = base64.b64encode(auth_bytes)
- # convert back to a string
- encoded = encoded_bytes.decode()
- # strip the trailing carriage return
+ # convert back to a string
+ encoded = encoded_bytes.decode()
+ # strip the trailing carriage return
return encoded.replace('\n', '')
-
+
class Credential:
- """
- A username/password pair. Use like a namedtuple.
- """
-
- def __init__(self, username, password):
- self.username = username
- self.password = password
-
- def __iter__(self):
- yield self.username
- yield self.password
-
- def __str__(self):
- return '%(username)s:%(password)s' % vars(self)
-
-
-class PyPIConfig(configparser.RawConfigParser):
- def __init__(self):
- """
- Load from ~/.pypirc
- """
- defaults = dict.fromkeys(['username', 'password', 'repository'], '')
- configparser.RawConfigParser.__init__(self, defaults)
-
- rc = os.path.join(os.path.expanduser('~'), '.pypirc')
- if os.path.exists(rc):
- self.read(rc)
-
- @property
- def creds_by_repository(self):
- sections_with_repositories = [
- section for section in self.sections()
- if self.get(section, 'repository').strip()
- ]
-
- return dict(map(self._get_repo_cred, sections_with_repositories))
-
- def _get_repo_cred(self, section):
- repo = self.get(section, 'repository').strip()
- return repo, Credential(
- self.get(section, 'username').strip(),
- self.get(section, 'password').strip(),
- )
-
- def find_credential(self, url):
- """
- If the URL indicated appears to be a repository defined in this
- config, return the credential for that repository.
- """
- for repository, cred in self.creds_by_repository.items():
- if url.startswith(repository):
- return cred
-
-
-def open_with_auth(url, opener=urllib.request.urlopen):
- """Open a urllib2 request, handling HTTP authentication"""
-
+ """
+ A username/password pair. Use like a namedtuple.
+ """
+
+ def __init__(self, username, password):
+ self.username = username
+ self.password = password
+
+ def __iter__(self):
+ yield self.username
+ yield self.password
+
+ def __str__(self):
+ return '%(username)s:%(password)s' % vars(self)
+
+
+class PyPIConfig(configparser.RawConfigParser):
+ def __init__(self):
+ """
+ Load from ~/.pypirc
+ """
+ defaults = dict.fromkeys(['username', 'password', 'repository'], '')
+ configparser.RawConfigParser.__init__(self, defaults)
+
+ rc = os.path.join(os.path.expanduser('~'), '.pypirc')
+ if os.path.exists(rc):
+ self.read(rc)
+
+ @property
+ def creds_by_repository(self):
+ sections_with_repositories = [
+ section for section in self.sections()
+ if self.get(section, 'repository').strip()
+ ]
+
+ return dict(map(self._get_repo_cred, sections_with_repositories))
+
+ def _get_repo_cred(self, section):
+ repo = self.get(section, 'repository').strip()
+ return repo, Credential(
+ self.get(section, 'username').strip(),
+ self.get(section, 'password').strip(),
+ )
+
+ def find_credential(self, url):
+ """
+ If the URL indicated appears to be a repository defined in this
+ config, return the credential for that repository.
+ """
+ for repository, cred in self.creds_by_repository.items():
+ if url.startswith(repository):
+ return cred
+
+
+def open_with_auth(url, opener=urllib.request.urlopen):
+ """Open a urllib2 request, handling HTTP authentication"""
+
parsed = urllib.parse.urlparse(url)
scheme, netloc, path, params, query, frag = parsed
-
- # Double scheme does not raise on Mac OS X as revealed by a
- # failing test. We would expect "nonnumeric port". Refs #20.
- if netloc.endswith(':'):
- raise http_client.InvalidURL("nonnumeric port: ''")
-
- if scheme in ('http', 'https'):
+
+ # Double scheme does not raise on Mac OS X as revealed by a
+ # failing test. We would expect "nonnumeric port". Refs #20.
+ if netloc.endswith(':'):
+ raise http_client.InvalidURL("nonnumeric port: ''")
+
+ if scheme in ('http', 'https'):
auth, address = _splituser(netloc)
- else:
- auth = None
-
- if not auth:
- cred = PyPIConfig().find_credential(url)
- if cred:
- auth = str(cred)
- info = cred.username, url
+ else:
+ auth = None
+
+ if not auth:
+ cred = PyPIConfig().find_credential(url)
+ if cred:
+ auth = str(cred)
+ info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)', *info)
-
- if auth:
- auth = "Basic " + _encode_auth(auth)
+
+ if auth:
+ auth = "Basic " + _encode_auth(auth)
parts = scheme, address, path, params, query, frag
- new_url = urllib.parse.urlunparse(parts)
- request = urllib.request.Request(new_url)
- request.add_header("Authorization", auth)
- else:
- request = urllib.request.Request(url)
-
- request.add_header('User-Agent', user_agent)
- fp = opener(request)
-
- if auth:
- # Put authentication info back into request URL if same host,
- # so that links found on the page will work
- s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
+ new_url = urllib.parse.urlunparse(parts)
+ request = urllib.request.Request(new_url)
+ request.add_header("Authorization", auth)
+ else:
+ request = urllib.request.Request(url)
+
+ request.add_header('User-Agent', user_agent)
+ fp = opener(request)
+
+ if auth:
+ # Put authentication info back into request URL if same host,
+ # so that links found on the page will work
+ s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2 == scheme and h2 == address:
- parts = s2, netloc, path2, param2, query2, frag2
- fp.url = urllib.parse.urlunparse(parts)
-
- return fp
-
+ parts = s2, netloc, path2, param2, query2, frag2
+ fp.url = urllib.parse.urlunparse(parts)
+
+ return fp
+
# copy of urllib.parse._splituser from Python 3.8
def _splituser(host):
@@ -1097,40 +1097,40 @@ def _splituser(host):
return (user if delim else None), host
-# adding a timeout to avoid freezing package_index
-open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
-
-
-def fix_sf_url(url):
+# adding a timeout to avoid freezing package_index
+open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
+
+
+def fix_sf_url(url):
return url # backward compatibility
-
-
-def local_open(url):
- """Read a local path, with special support for directories"""
- scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
- filename = urllib.request.url2pathname(path)
- if os.path.isfile(filename):
- return urllib.request.urlopen(url)
- elif path.endswith('/') and os.path.isdir(filename):
- files = []
- for f in os.listdir(filename):
- filepath = os.path.join(filename, f)
- if f == 'index.html':
- with open(filepath, 'r') as fp:
- body = fp.read()
- break
- elif os.path.isdir(filepath):
- f += '/'
- files.append('<a href="{name}">{name}</a>'.format(name=f))
- else:
+
+
+def local_open(url):
+ """Read a local path, with special support for directories"""
+ scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
+ filename = urllib.request.url2pathname(path)
+ if os.path.isfile(filename):
+ return urllib.request.urlopen(url)
+ elif path.endswith('/') and os.path.isdir(filename):
+ files = []
+ for f in os.listdir(filename):
+ filepath = os.path.join(filename, f)
+ if f == 'index.html':
+ with open(filepath, 'r') as fp:
+ body = fp.read()
+ break
+ elif os.path.isdir(filepath):
+ f += '/'
+ files.append('<a href="{name}">{name}</a>'.format(name=f))
+ else:
tmpl = (
"<html><head><title>{url}</title>"
- "</head><body>{files}</body></html>")
- body = tmpl.format(url=url, files='\n'.join(files))
- status, message = 200, "OK"
- else:
- status, message, body = 404, "Path not found", "Not found"
-
- headers = {'content-type': 'text/html'}
- body_stream = six.StringIO(body)
- return urllib.error.HTTPError(url, status, message, headers, body_stream)
+ "</head><body>{files}</body></html>")
+ body = tmpl.format(url=url, files='\n'.join(files))
+ status, message = 200, "OK"
+ else:
+ status, message, body = 404, "Path not found", "Not found"
+
+ headers = {'content-type': 'text/html'}
+ body_stream = six.StringIO(body)
+ return urllib.error.HTTPError(url, status, message, headers, body_stream)
diff --git a/contrib/python/setuptools/py2/setuptools/py27compat.py b/contrib/python/setuptools/py2/setuptools/py27compat.py
index 3d030d052b..1d57360f4e 100644
--- a/contrib/python/setuptools/py2/setuptools/py27compat.py
+++ b/contrib/python/setuptools/py2/setuptools/py27compat.py
@@ -1,19 +1,19 @@
-"""
-Compatibility Support for Python 2.7 and earlier
-"""
-
+"""
+Compatibility Support for Python 2.7 and earlier
+"""
+
import sys
import platform
-
+
from setuptools.extern import six
-def get_all_headers(message, key):
+def get_all_headers(message, key):
"""
Given an HTTPMessage, return all headers matching a given key.
"""
return message.get_all(key)
-
+
if six.PY2:
def get_all_headers(message, key):
diff --git a/contrib/python/setuptools/py2/setuptools/py31compat.py b/contrib/python/setuptools/py2/setuptools/py31compat.py
index a01180deda..e1da7ee2a2 100644
--- a/contrib/python/setuptools/py2/setuptools/py31compat.py
+++ b/contrib/python/setuptools/py2/setuptools/py31compat.py
@@ -1,32 +1,32 @@
__all__ = []
-
+
__metaclass__ = type
-
-try:
- # Python >=3.2
- from tempfile import TemporaryDirectory
-except ImportError:
- import shutil
- import tempfile
+
+try:
+ # Python >=3.2
+ from tempfile import TemporaryDirectory
+except ImportError:
+ import shutil
+ import tempfile
class TemporaryDirectory:
- """
- Very simple temporary directory context manager.
- Will try to delete afterward, but will also ignore OS and similar
- errors on deletion.
- """
+ """
+ Very simple temporary directory context manager.
+ Will try to delete afterward, but will also ignore OS and similar
+ errors on deletion.
+ """
def __init__(self, **kwargs):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp(**kwargs)
-
- def __enter__(self):
- return self.name
-
- def __exit__(self, exctype, excvalue, exctrace):
- try:
- shutil.rmtree(self.name, True)
+
+ def __enter__(self):
+ return self.name
+
+ def __exit__(self, exctype, excvalue, exctrace):
+ try:
+ shutil.rmtree(self.name, True)
except OSError: # removal errors are not the only possible
- pass
- self.name = None
+ pass
+ self.name = None
diff --git a/contrib/python/setuptools/py2/setuptools/sandbox.py b/contrib/python/setuptools/py2/setuptools/sandbox.py
index 82822a6cf4..685f3f72e3 100644
--- a/contrib/python/setuptools/py2/setuptools/sandbox.py
+++ b/contrib/python/setuptools/py2/setuptools/sandbox.py
@@ -1,241 +1,241 @@
-import os
-import sys
-import tempfile
-import operator
-import functools
-import itertools
-import re
-import contextlib
-import pickle
+import os
+import sys
+import tempfile
+import operator
+import functools
+import itertools
+import re
+import contextlib
+import pickle
import textwrap
-
+
from setuptools.extern import six
from setuptools.extern.six.moves import builtins, map
-
+
import pkg_resources.py31compat
-
-if sys.platform.startswith('java'):
- import org.python.modules.posix.PosixModule as _os
-else:
- _os = sys.modules[os.name]
-try:
- _file = file
-except NameError:
- _file = None
-_open = open
-from distutils.errors import DistutilsError
-from pkg_resources import working_set
-
-
-__all__ = [
- "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
-]
-
-
-def _execfile(filename, globals, locals=None):
- """
- Python 3 implementation of execfile.
- """
- mode = 'rb'
- with open(filename, mode) as stream:
- script = stream.read()
- if locals is None:
- locals = globals
- code = compile(script, filename, 'exec')
- exec(code, globals, locals)
-
-
-@contextlib.contextmanager
-def save_argv(repl=None):
- saved = sys.argv[:]
- if repl is not None:
- sys.argv[:] = repl
- try:
- yield saved
- finally:
- sys.argv[:] = saved
-
-
-@contextlib.contextmanager
-def save_path():
- saved = sys.path[:]
- try:
- yield saved
- finally:
- sys.path[:] = saved
-
-
-@contextlib.contextmanager
-def override_temp(replacement):
- """
- Monkey-patch tempfile.tempdir with replacement, ensuring it exists
- """
+
+if sys.platform.startswith('java'):
+ import org.python.modules.posix.PosixModule as _os
+else:
+ _os = sys.modules[os.name]
+try:
+ _file = file
+except NameError:
+ _file = None
+_open = open
+from distutils.errors import DistutilsError
+from pkg_resources import working_set
+
+
+__all__ = [
+ "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
+]
+
+
+def _execfile(filename, globals, locals=None):
+ """
+ Python 3 implementation of execfile.
+ """
+ mode = 'rb'
+ with open(filename, mode) as stream:
+ script = stream.read()
+ if locals is None:
+ locals = globals
+ code = compile(script, filename, 'exec')
+ exec(code, globals, locals)
+
+
+@contextlib.contextmanager
+def save_argv(repl=None):
+ saved = sys.argv[:]
+ if repl is not None:
+ sys.argv[:] = repl
+ try:
+ yield saved
+ finally:
+ sys.argv[:] = saved
+
+
+@contextlib.contextmanager
+def save_path():
+ saved = sys.path[:]
+ try:
+ yield saved
+ finally:
+ sys.path[:] = saved
+
+
+@contextlib.contextmanager
+def override_temp(replacement):
+ """
+ Monkey-patch tempfile.tempdir with replacement, ensuring it exists
+ """
pkg_resources.py31compat.makedirs(replacement, exist_ok=True)
-
- saved = tempfile.tempdir
-
- tempfile.tempdir = replacement
-
- try:
- yield
- finally:
- tempfile.tempdir = saved
-
-
-@contextlib.contextmanager
-def pushd(target):
- saved = os.getcwd()
- os.chdir(target)
- try:
- yield saved
- finally:
- os.chdir(saved)
-
-
-class UnpickleableException(Exception):
- """
- An exception representing another Exception that could not be pickled.
- """
-
- @staticmethod
- def dump(type, exc):
- """
- Always return a dumped (pickled) type and exc. If exc can't be pickled,
- wrap it in UnpickleableException first.
- """
- try:
- return pickle.dumps(type), pickle.dumps(exc)
- except Exception:
- # get UnpickleableException inside the sandbox
- from setuptools.sandbox import UnpickleableException as cls
- return cls.dump(cls, cls(repr(exc)))
-
-
-class ExceptionSaver:
- """
- A Context Manager that will save an exception, serialized, and restore it
- later.
- """
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, exc, tb):
- if not exc:
- return
-
- # dump the exception
- self._saved = UnpickleableException.dump(type, exc)
- self._tb = tb
-
- # suppress the exception
- return True
-
- def resume(self):
- "restore and re-raise any exception"
-
- if '_saved' not in vars(self):
- return
-
- type, exc = map(pickle.loads, self._saved)
- six.reraise(type, exc, self._tb)
-
-
-@contextlib.contextmanager
-def save_modules():
- """
- Context in which imported modules are saved.
-
- Translates exceptions internal to the context into the equivalent exception
- outside the context.
- """
- saved = sys.modules.copy()
- with ExceptionSaver() as saved_exc:
- yield saved
-
- sys.modules.update(saved)
- # remove any modules imported since
- del_modules = (
- mod_name for mod_name in sys.modules
- if mod_name not in saved
- # exclude any encodings modules. See #285
- and not mod_name.startswith('encodings.')
- )
- _clear_modules(del_modules)
-
- saved_exc.resume()
-
-
-def _clear_modules(module_names):
- for mod_name in list(module_names):
- del sys.modules[mod_name]
-
-
-@contextlib.contextmanager
-def save_pkg_resources_state():
- saved = pkg_resources.__getstate__()
- try:
- yield saved
- finally:
- pkg_resources.__setstate__(saved)
-
-
-@contextlib.contextmanager
-def setup_context(setup_dir):
- temp_dir = os.path.join(setup_dir, 'temp')
- with save_pkg_resources_state():
- with save_modules():
- hide_setuptools()
- with save_path():
- with save_argv():
- with override_temp(temp_dir):
- with pushd(setup_dir):
- # ensure setuptools commands are available
- __import__('setuptools')
- yield
-
-
-def _needs_hiding(mod_name):
- """
- >>> _needs_hiding('setuptools')
- True
- >>> _needs_hiding('pkg_resources')
- True
- >>> _needs_hiding('setuptools_plugin')
- False
- >>> _needs_hiding('setuptools.__init__')
- True
- >>> _needs_hiding('distutils')
- True
- >>> _needs_hiding('os')
- False
- >>> _needs_hiding('Cython')
- True
- """
+
+ saved = tempfile.tempdir
+
+ tempfile.tempdir = replacement
+
+ try:
+ yield
+ finally:
+ tempfile.tempdir = saved
+
+
+@contextlib.contextmanager
+def pushd(target):
+ saved = os.getcwd()
+ os.chdir(target)
+ try:
+ yield saved
+ finally:
+ os.chdir(saved)
+
+
+class UnpickleableException(Exception):
+ """
+ An exception representing another Exception that could not be pickled.
+ """
+
+ @staticmethod
+ def dump(type, exc):
+ """
+ Always return a dumped (pickled) type and exc. If exc can't be pickled,
+ wrap it in UnpickleableException first.
+ """
+ try:
+ return pickle.dumps(type), pickle.dumps(exc)
+ except Exception:
+ # get UnpickleableException inside the sandbox
+ from setuptools.sandbox import UnpickleableException as cls
+ return cls.dump(cls, cls(repr(exc)))
+
+
+class ExceptionSaver:
+ """
+ A Context Manager that will save an exception, serialized, and restore it
+ later.
+ """
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, exc, tb):
+ if not exc:
+ return
+
+ # dump the exception
+ self._saved = UnpickleableException.dump(type, exc)
+ self._tb = tb
+
+ # suppress the exception
+ return True
+
+ def resume(self):
+ "restore and re-raise any exception"
+
+ if '_saved' not in vars(self):
+ return
+
+ type, exc = map(pickle.loads, self._saved)
+ six.reraise(type, exc, self._tb)
+
+
+@contextlib.contextmanager
+def save_modules():
+ """
+ Context in which imported modules are saved.
+
+ Translates exceptions internal to the context into the equivalent exception
+ outside the context.
+ """
+ saved = sys.modules.copy()
+ with ExceptionSaver() as saved_exc:
+ yield saved
+
+ sys.modules.update(saved)
+ # remove any modules imported since
+ del_modules = (
+ mod_name for mod_name in sys.modules
+ if mod_name not in saved
+ # exclude any encodings modules. See #285
+ and not mod_name.startswith('encodings.')
+ )
+ _clear_modules(del_modules)
+
+ saved_exc.resume()
+
+
+def _clear_modules(module_names):
+ for mod_name in list(module_names):
+ del sys.modules[mod_name]
+
+
+@contextlib.contextmanager
+def save_pkg_resources_state():
+ saved = pkg_resources.__getstate__()
+ try:
+ yield saved
+ finally:
+ pkg_resources.__setstate__(saved)
+
+
+@contextlib.contextmanager
+def setup_context(setup_dir):
+ temp_dir = os.path.join(setup_dir, 'temp')
+ with save_pkg_resources_state():
+ with save_modules():
+ hide_setuptools()
+ with save_path():
+ with save_argv():
+ with override_temp(temp_dir):
+ with pushd(setup_dir):
+ # ensure setuptools commands are available
+ __import__('setuptools')
+ yield
+
+
+def _needs_hiding(mod_name):
+ """
+ >>> _needs_hiding('setuptools')
+ True
+ >>> _needs_hiding('pkg_resources')
+ True
+ >>> _needs_hiding('setuptools_plugin')
+ False
+ >>> _needs_hiding('setuptools.__init__')
+ True
+ >>> _needs_hiding('distutils')
+ True
+ >>> _needs_hiding('os')
+ False
+ >>> _needs_hiding('Cython')
+ True
+ """
pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)')
- return bool(pattern.match(mod_name))
-
-
-def hide_setuptools():
- """
- Remove references to setuptools' modules from sys.modules to allow the
- invocation to import the most appropriate setuptools. This technique is
- necessary to avoid issues such as #315 where setuptools upgrading itself
- would fail to find a function declared in the metadata.
- """
- modules = filter(_needs_hiding, sys.modules)
- _clear_modules(modules)
-
-
-def run_setup(setup_script, args):
- """Run a distutils setup script, sandboxed in its directory"""
- setup_dir = os.path.abspath(os.path.dirname(setup_script))
- with setup_context(setup_dir):
- try:
+ return bool(pattern.match(mod_name))
+
+
+def hide_setuptools():
+ """
+ Remove references to setuptools' modules from sys.modules to allow the
+ invocation to import the most appropriate setuptools. This technique is
+ necessary to avoid issues such as #315 where setuptools upgrading itself
+ would fail to find a function declared in the metadata.
+ """
+ modules = filter(_needs_hiding, sys.modules)
+ _clear_modules(modules)
+
+
+def run_setup(setup_script, args):
+ """Run a distutils setup script, sandboxed in its directory"""
+ setup_dir = os.path.abspath(os.path.dirname(setup_script))
+ with setup_context(setup_dir):
+ try:
sys.argv[:] = [setup_script] + list(args)
- sys.path.insert(0, setup_dir)
- # reset to include setup dir, w/clean callback list
- working_set.__init__()
+ sys.path.insert(0, setup_dir)
+ # reset to include setup dir, w/clean callback list
+ working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
# __file__ should be a byte string on Python 2 (#712)
@@ -247,28 +247,28 @@ def run_setup(setup_script, args):
with DirectorySandbox(setup_dir):
ns = dict(__file__=dunder_file, __name__='__main__')
- _execfile(setup_script, ns)
- except SystemExit as v:
- if v.args and v.args[0]:
- raise
- # Normal exit, just return
-
-
-class AbstractSandbox:
- """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
-
- _active = False
-
- def __init__(self):
- self._attrs = [
- name for name in dir(_os)
+ _execfile(setup_script, ns)
+ except SystemExit as v:
+ if v.args and v.args[0]:
+ raise
+ # Normal exit, just return
+
+
+class AbstractSandbox:
+ """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
+
+ _active = False
+
+ def __init__(self):
+ self._attrs = [
+ name for name in dir(_os)
if not name.startswith('_') and hasattr(self, name)
- ]
-
- def _copy(self, source):
- for name in self._attrs:
+ ]
+
+ def _copy(self, source):
+ for name in self._attrs:
setattr(os, name, getattr(source, name))
-
+
def __enter__(self):
self._copy(self)
if _file:
@@ -283,209 +283,209 @@ class AbstractSandbox:
builtins.open = _open
self._copy(_os)
- def run(self, func):
- """Run 'func' under os sandboxing"""
+ def run(self, func):
+ """Run 'func' under os sandboxing"""
with self:
- return func()
-
- def _mk_dual_path_wrapper(name):
+ return func()
+
+ def _mk_dual_path_wrapper(name):
original = getattr(_os, name)
def wrap(self, src, dst, *args, **kw):
- if self._active:
+ if self._active:
src, dst = self._remap_pair(name, src, dst, *args, **kw)
return original(src, dst, *args, **kw)
- return wrap
-
- for name in ["rename", "link", "symlink"]:
+ return wrap
+
+ for name in ["rename", "link", "symlink"]:
if hasattr(_os, name):
locals()[name] = _mk_dual_path_wrapper(name)
-
- def _mk_single_path_wrapper(name, original=None):
+
+ def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os, name)
def wrap(self, path, *args, **kw):
- if self._active:
+ if self._active:
path = self._remap_input(name, path, *args, **kw)
return original(path, *args, **kw)
- return wrap
-
- if _file:
- _file = _mk_single_path_wrapper('file', _file)
- _open = _mk_single_path_wrapper('open', _open)
- for name in [
- "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
- "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
- "startfile", "mkfifo", "mknod", "pathconf", "access"
- ]:
+ return wrap
+
+ if _file:
+ _file = _mk_single_path_wrapper('file', _file)
+ _open = _mk_single_path_wrapper('open', _open)
+ for name in [
+ "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
+ "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
+ "startfile", "mkfifo", "mknod", "pathconf", "access"
+ ]:
if hasattr(_os, name):
locals()[name] = _mk_single_path_wrapper(name)
-
- def _mk_single_with_return(name):
+
+ def _mk_single_with_return(name):
original = getattr(_os, name)
def wrap(self, path, *args, **kw):
- if self._active:
+ if self._active:
path = self._remap_input(name, path, *args, **kw)
return self._remap_output(name, original(path, *args, **kw))
return original(path, *args, **kw)
- return wrap
-
- for name in ['readlink', 'tempnam']:
+ return wrap
+
+ for name in ['readlink', 'tempnam']:
if hasattr(_os, name):
locals()[name] = _mk_single_with_return(name)
-
- def _mk_query(name):
+
+ def _mk_query(name):
original = getattr(_os, name)
def wrap(self, *args, **kw):
retval = original(*args, **kw)
- if self._active:
- return self._remap_output(name, retval)
- return retval
+ if self._active:
+ return self._remap_output(name, retval)
+ return retval
- return wrap
-
- for name in ['getcwd', 'tmpnam']:
+ return wrap
+
+ for name in ['getcwd', 'tmpnam']:
if hasattr(_os, name):
locals()[name] = _mk_query(name)
-
+
def _validate_path(self, path):
- """Called to remap or validate any path, whether input or output"""
- return path
-
+ """Called to remap or validate any path, whether input or output"""
+ return path
+
def _remap_input(self, operation, path, *args, **kw):
- """Called for path inputs"""
- return self._validate_path(path)
-
+ """Called for path inputs"""
+ return self._validate_path(path)
+
def _remap_output(self, operation, path):
- """Called for path outputs"""
- return self._validate_path(path)
-
+ """Called for path outputs"""
+ return self._validate_path(path)
+
def _remap_pair(self, operation, src, dst, *args, **kw):
- """Called for path pairs like rename, link, and symlink operations"""
- return (
+ """Called for path pairs like rename, link, and symlink operations"""
+ return (
self._remap_input(operation + '-from', src, *args, **kw),
self._remap_input(operation + '-to', dst, *args, **kw)
- )
-
-
-if hasattr(os, 'devnull'):
- _EXCEPTIONS = [os.devnull,]
-else:
- _EXCEPTIONS = []
-
-
-class DirectorySandbox(AbstractSandbox):
- """Restrict operations to a single subdirectory - pseudo-chroot"""
-
- write_ops = dict.fromkeys([
- "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
- "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
- ])
-
- _exception_patterns = [
- # Allow lib2to3 to attempt to save a pickled grammar object (#121)
+ )
+
+
+if hasattr(os, 'devnull'):
+ _EXCEPTIONS = [os.devnull,]
+else:
+ _EXCEPTIONS = []
+
+
+class DirectorySandbox(AbstractSandbox):
+ """Restrict operations to a single subdirectory - pseudo-chroot"""
+
+ write_ops = dict.fromkeys([
+ "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
+ "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
+ ])
+
+ _exception_patterns = [
+ # Allow lib2to3 to attempt to save a pickled grammar object (#121)
r'.*lib2to3.*\.pickle$',
- ]
- "exempt writing to paths that match the pattern"
-
- def __init__(self, sandbox, exceptions=_EXCEPTIONS):
- self._sandbox = os.path.normcase(os.path.realpath(sandbox))
+ ]
+ "exempt writing to paths that match the pattern"
+
+ def __init__(self, sandbox, exceptions=_EXCEPTIONS):
+ self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox, '')
- self._exceptions = [
- os.path.normcase(os.path.realpath(path))
- for path in exceptions
- ]
- AbstractSandbox.__init__(self)
-
- def _violation(self, operation, *args, **kw):
- from setuptools.sandbox import SandboxViolation
- raise SandboxViolation(operation, args, kw)
-
- if _file:
-
- def _file(self, path, mode='r', *args, **kw):
- if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
- self._violation("file", path, mode, *args, **kw)
+ self._exceptions = [
+ os.path.normcase(os.path.realpath(path))
+ for path in exceptions
+ ]
+ AbstractSandbox.__init__(self)
+
+ def _violation(self, operation, *args, **kw):
+ from setuptools.sandbox import SandboxViolation
+ raise SandboxViolation(operation, args, kw)
+
+ if _file:
+
+ def _file(self, path, mode='r', *args, **kw):
+ if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
+ self._violation("file", path, mode, *args, **kw)
return _file(path, mode, *args, **kw)
-
- def _open(self, path, mode='r', *args, **kw):
- if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
- self._violation("open", path, mode, *args, **kw)
+
+ def _open(self, path, mode='r', *args, **kw):
+ if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
+ self._violation("open", path, mode, *args, **kw)
return _open(path, mode, *args, **kw)
-
- def tmpnam(self):
- self._violation("tmpnam")
-
- def _ok(self, path):
- active = self._active
- try:
- self._active = False
- realpath = os.path.normcase(os.path.realpath(path))
- return (
- self._exempted(realpath)
- or realpath == self._sandbox
- or realpath.startswith(self._prefix)
- )
- finally:
- self._active = active
-
- def _exempted(self, filepath):
- start_matches = (
- filepath.startswith(exception)
- for exception in self._exceptions
- )
- pattern_matches = (
- re.match(pattern, filepath)
- for pattern in self._exception_patterns
- )
- candidates = itertools.chain(start_matches, pattern_matches)
- return any(candidates)
-
- def _remap_input(self, operation, path, *args, **kw):
- """Called for path inputs"""
- if operation in self.write_ops and not self._ok(path):
- self._violation(operation, os.path.realpath(path), *args, **kw)
- return path
-
- def _remap_pair(self, operation, src, dst, *args, **kw):
- """Called for path pairs like rename, link, and symlink operations"""
- if not self._ok(src) or not self._ok(dst):
- self._violation(operation, src, dst, *args, **kw)
+
+ def tmpnam(self):
+ self._violation("tmpnam")
+
+ def _ok(self, path):
+ active = self._active
+ try:
+ self._active = False
+ realpath = os.path.normcase(os.path.realpath(path))
+ return (
+ self._exempted(realpath)
+ or realpath == self._sandbox
+ or realpath.startswith(self._prefix)
+ )
+ finally:
+ self._active = active
+
+ def _exempted(self, filepath):
+ start_matches = (
+ filepath.startswith(exception)
+ for exception in self._exceptions
+ )
+ pattern_matches = (
+ re.match(pattern, filepath)
+ for pattern in self._exception_patterns
+ )
+ candidates = itertools.chain(start_matches, pattern_matches)
+ return any(candidates)
+
+ def _remap_input(self, operation, path, *args, **kw):
+ """Called for path inputs"""
+ if operation in self.write_ops and not self._ok(path):
+ self._violation(operation, os.path.realpath(path), *args, **kw)
+ return path
+
+ def _remap_pair(self, operation, src, dst, *args, **kw):
+ """Called for path pairs like rename, link, and symlink operations"""
+ if not self._ok(src) or not self._ok(dst):
+ self._violation(operation, src, dst, *args, **kw)
return (src, dst)
-
- def open(self, file, flags, mode=0o777, *args, **kw):
- """Called for low-level os.open()"""
- if flags & WRITE_FLAGS and not self._ok(file):
- self._violation("os.open", file, flags, mode, *args, **kw)
+
+ def open(self, file, flags, mode=0o777, *args, **kw):
+ """Called for low-level os.open()"""
+ if flags & WRITE_FLAGS and not self._ok(file):
+ self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file, flags, mode, *args, **kw)
-
-WRITE_FLAGS = functools.reduce(
- operator.or_, [getattr(_os, a, 0) for a in
- "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
-)
-
-class SandboxViolation(DistutilsError):
- """A setup script attempted to modify the filesystem outside the sandbox"""
-
+WRITE_FLAGS = functools.reduce(
+ operator.or_, [getattr(_os, a, 0) for a in
+ "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
+)
+
+
+class SandboxViolation(DistutilsError):
+ """A setup script attempted to modify the filesystem outside the sandbox"""
+
tmpl = textwrap.dedent("""
SandboxViolation: {cmd}{args!r} {kwargs}
-
+
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
-
+
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.
""").lstrip()
-
+
def __str__(self):
cmd, args, kwargs = self.args
return self.tmpl.format(**locals())
diff --git a/contrib/python/setuptools/py2/setuptools/script.tmpl b/contrib/python/setuptools/py2/setuptools/script.tmpl
index df8f68d6b4..ff5efbcab3 100644
--- a/contrib/python/setuptools/py2/setuptools/script.tmpl
+++ b/contrib/python/setuptools/py2/setuptools/script.tmpl
@@ -1,3 +1,3 @@
-# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
-__requires__ = %(spec)r
-__import__('pkg_resources').run_script(%(spec)r, %(script_name)r)
+# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
+__requires__ = %(spec)r
+__import__('pkg_resources').run_script(%(spec)r, %(script_name)r)
diff --git a/contrib/python/setuptools/py2/setuptools/site-patch.py b/contrib/python/setuptools/py2/setuptools/site-patch.py
index 16e75879fd..40b00de0a7 100644
--- a/contrib/python/setuptools/py2/setuptools/site-patch.py
+++ b/contrib/python/setuptools/py2/setuptools/site-patch.py
@@ -1,74 +1,74 @@
-def __boot():
- import sys
- import os
- PYTHONPATH = os.environ.get('PYTHONPATH')
+def __boot():
+ import sys
+ import os
+ PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH):
- PYTHONPATH = []
- else:
- PYTHONPATH = PYTHONPATH.split(os.pathsep)
-
+ PYTHONPATH = []
+ else:
+ PYTHONPATH = PYTHONPATH.split(os.pathsep)
+
pic = getattr(sys, 'path_importer_cache', {})
- stdpath = sys.path[len(PYTHONPATH):]
- mydir = os.path.dirname(__file__)
-
- for item in stdpath:
+ stdpath = sys.path[len(PYTHONPATH):]
+ mydir = os.path.dirname(__file__)
+
+ for item in stdpath:
if item == mydir or not item:
continue # skip if current dir. on Windows, or my own directory
- importer = pic.get(item)
- if importer is not None:
- loader = importer.find_module('site')
- if loader is not None:
- # This should actually reload the current module
- loader.load_module('site')
- break
- else:
- try:
+ importer = pic.get(item)
+ if importer is not None:
+ loader = importer.find_module('site')
+ if loader is not None:
+ # This should actually reload the current module
+ loader.load_module('site')
+ break
+ else:
+ try:
import imp # Avoid import loop in Python 3
stream, path, descr = imp.find_module('site', [item])
- except ImportError:
- continue
- if stream is None:
- continue
- try:
- # This should actually reload the current module
+ except ImportError:
+ continue
+ if stream is None:
+ continue
+ try:
+ # This should actually reload the current module
imp.load_module('site', stream, path, descr)
- finally:
- stream.close()
- break
- else:
- raise ImportError("Couldn't find the real 'site' module")
-
+ finally:
+ stream.close()
+ break
+ else:
+ raise ImportError("Couldn't find the real 'site' module")
+
known_paths = dict([(makepath(item)[1], 1) for item in sys.path]) # 2.2 comp
-
+
oldpos = getattr(sys, '__egginsert', 0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
-
- for item in PYTHONPATH:
- addsitedir(item)
-
+
+ for item in PYTHONPATH:
+ addsitedir(item)
+
sys.__egginsert += oldpos # restore effective old position
-
- d, nd = makepath(stdpath[0])
- insert_at = None
- new_path = []
-
- for item in sys.path:
- p, np = makepath(item)
-
+
+ d, nd = makepath(stdpath[0])
+ insert_at = None
+ new_path = []
+
+ for item in sys.path:
+ p, np = makepath(item)
+
if np == nd and insert_at is None:
- # We've hit the first 'system' path entry, so added entries go here
- insert_at = len(new_path)
-
- if np in known_paths or insert_at is None:
- new_path.append(item)
- else:
- # new path after the insert point, back-insert it
- new_path.insert(insert_at, item)
- insert_at += 1
-
- sys.path[:] = new_path
-
+ # We've hit the first 'system' path entry, so added entries go here
+ insert_at = len(new_path)
+
+ if np in known_paths or insert_at is None:
+ new_path.append(item)
+ else:
+ # new path after the insert point, back-insert it
+ new_path.insert(insert_at, item)
+ insert_at += 1
+
+ sys.path[:] = new_path
+
if __name__ == 'site':
- __boot()
- del __boot
+ __boot()
+ del __boot
diff --git a/contrib/python/setuptools/py2/setuptools/ssl_support.py b/contrib/python/setuptools/py2/setuptools/ssl_support.py
index 7f5aa7f577..226db694bb 100644
--- a/contrib/python/setuptools/py2/setuptools/ssl_support.py
+++ b/contrib/python/setuptools/py2/setuptools/ssl_support.py
@@ -1,191 +1,191 @@
-import os
-import socket
-import atexit
-import re
+import os
+import socket
+import atexit
+import re
import functools
-
+
from setuptools.extern.six.moves import urllib, http_client, map, filter
-
-from pkg_resources import ResolutionError, ExtractionError
-
-try:
- import ssl
-except ImportError:
- ssl = None
-
-__all__ = [
- 'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
- 'opener_for'
-]
-
-cert_paths = """
-/etc/pki/tls/certs/ca-bundle.crt
-/etc/ssl/certs/ca-certificates.crt
-/usr/share/ssl/certs/ca-bundle.crt
-/usr/local/share/certs/ca-root.crt
-/etc/ssl/cert.pem
-/System/Library/OpenSSL/certs/cert.pem
-/usr/local/share/certs/ca-root-nss.crt
+
+from pkg_resources import ResolutionError, ExtractionError
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+__all__ = [
+ 'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
+ 'opener_for'
+]
+
+cert_paths = """
+/etc/pki/tls/certs/ca-bundle.crt
+/etc/ssl/certs/ca-certificates.crt
+/usr/share/ssl/certs/ca-bundle.crt
+/usr/local/share/certs/ca-root.crt
+/etc/ssl/cert.pem
+/System/Library/OpenSSL/certs/cert.pem
+/usr/local/share/certs/ca-root-nss.crt
/etc/ssl/ca-bundle.pem
-""".strip().split()
-
-try:
- HTTPSHandler = urllib.request.HTTPSHandler
- HTTPSConnection = http_client.HTTPSConnection
-except AttributeError:
- HTTPSHandler = HTTPSConnection = object
-
-is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
-
-
-try:
- from ssl import CertificateError, match_hostname
-except ImportError:
- try:
- from backports.ssl_match_hostname import CertificateError
- from backports.ssl_match_hostname import match_hostname
- except ImportError:
- CertificateError = None
- match_hostname = None
-
-if not CertificateError:
-
- class CertificateError(ValueError):
- pass
-
-
-if not match_hostname:
-
- def _dnsname_match(dn, hostname, max_wildcards=1):
- """Matching according to RFC 6125, section 6.4.3
-
+""".strip().split()
+
+try:
+ HTTPSHandler = urllib.request.HTTPSHandler
+ HTTPSConnection = http_client.HTTPSConnection
+except AttributeError:
+ HTTPSHandler = HTTPSConnection = object
+
+is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
+
+
+try:
+ from ssl import CertificateError, match_hostname
+except ImportError:
+ try:
+ from backports.ssl_match_hostname import CertificateError
+ from backports.ssl_match_hostname import match_hostname
+ except ImportError:
+ CertificateError = None
+ match_hostname = None
+
+if not CertificateError:
+
+ class CertificateError(ValueError):
+ pass
+
+
+if not match_hostname:
+
+ def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
https://tools.ietf.org/html/rfc6125#section-6.4.3
- """
- pats = []
- if not dn:
- return False
-
- # Ported from python3-syntax:
- # leftmost, *remainder = dn.split(r'.')
- parts = dn.split(r'.')
- leftmost = parts[0]
- remainder = parts[1:]
-
- wildcards = leftmost.count('*')
- if wildcards > max_wildcards:
- # Issue #17980: avoid denials of service by refusing more
- # than one wildcard per fragment. A survey of established
- # policy among SSL implementations showed it to be a
- # reasonable choice.
- raise CertificateError(
- "too many wildcards in certificate DNS name: " + repr(dn))
-
- # speed up common case w/o wildcards
- if not wildcards:
- return dn.lower() == hostname.lower()
-
- # RFC 6125, section 6.4.3, subitem 1.
- # The client SHOULD NOT attempt to match a presented identifier in which
- # the wildcard character comprises a label other than the left-most label.
- if leftmost == '*':
- # When '*' is a fragment by itself, it matches a non-empty dotless
- # fragment.
- pats.append('[^.]+')
- elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
- # RFC 6125, section 6.4.3, subitem 3.
- # The client SHOULD NOT attempt to match a presented identifier
- # where the wildcard character is embedded within an A-label or
- # U-label of an internationalized domain name.
- pats.append(re.escape(leftmost))
- else:
- # Otherwise, '*' matches any dotless string, e.g. www*
- pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
-
- # add the remaining fragments, ignore any wildcards
- for frag in remainder:
- pats.append(re.escape(frag))
-
- pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
- return pat.match(hostname)
-
- def match_hostname(cert, hostname):
- """Verify that *cert* (in decoded format as returned by
- SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
- rules are followed, but IP addresses are not accepted for *hostname*.
-
- CertificateError is raised on failure. On success, the function
- returns nothing.
- """
- if not cert:
- raise ValueError("empty or no certificate")
- dnsnames = []
- san = cert.get('subjectAltName', ())
- for key, value in san:
- if key == 'DNS':
- if _dnsname_match(value, hostname):
- return
- dnsnames.append(value)
- if not dnsnames:
- # The subject is only checked when there is no dNSName entry
- # in subjectAltName
- for sub in cert.get('subject', ()):
- for key, value in sub:
- # XXX according to RFC 2818, the most specific Common Name
- # must be used.
- if key == 'commonName':
- if _dnsname_match(value, hostname):
- return
- dnsnames.append(value)
- if len(dnsnames) > 1:
- raise CertificateError("hostname %r "
- "doesn't match either of %s"
- % (hostname, ', '.join(map(repr, dnsnames))))
- elif len(dnsnames) == 1:
- raise CertificateError("hostname %r "
- "doesn't match %r"
- % (hostname, dnsnames[0]))
- else:
- raise CertificateError("no appropriate commonName or "
- "subjectAltName fields were found")
-
-
-class VerifyingHTTPSHandler(HTTPSHandler):
- """Simple verifying handler: no auth, subclasses, timeouts, etc."""
-
- def __init__(self, ca_bundle):
- self.ca_bundle = ca_bundle
- HTTPSHandler.__init__(self)
-
- def https_open(self, req):
- return self.do_open(
- lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
- )
-
-
-class VerifyingHTTPSConn(HTTPSConnection):
- """Simple verifying connection: no auth, subclasses, timeouts, etc."""
-
- def __init__(self, host, ca_bundle, **kw):
- HTTPSConnection.__init__(self, host, **kw)
- self.ca_bundle = ca_bundle
-
- def connect(self):
- sock = socket.create_connection(
- (self.host, self.port), getattr(self, 'source_address', None)
- )
-
- # Handle the socket if a (proxy) tunnel is present
- if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
- self.sock = sock
- self._tunnel()
- # http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
- # change self.host to mean the proxy server host when tunneling is
- # being used. Adapt, since we are interested in the destination
- # host for the match_hostname() comparison.
- actual_host = self._tunnel_host
- else:
- actual_host = self.host
-
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r'.')
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count('*')
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn))
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == '*':
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append('[^.]+')
+ elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+ return pat.match(hostname)
+
+ def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate")
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError("hostname %r "
+ "doesn't match either of %s"
+ % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r "
+ "doesn't match %r"
+ % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError("no appropriate commonName or "
+ "subjectAltName fields were found")
+
+
+class VerifyingHTTPSHandler(HTTPSHandler):
+ """Simple verifying handler: no auth, subclasses, timeouts, etc."""
+
+ def __init__(self, ca_bundle):
+ self.ca_bundle = ca_bundle
+ HTTPSHandler.__init__(self)
+
+ def https_open(self, req):
+ return self.do_open(
+ lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
+ )
+
+
+class VerifyingHTTPSConn(HTTPSConnection):
+ """Simple verifying connection: no auth, subclasses, timeouts, etc."""
+
+ def __init__(self, host, ca_bundle, **kw):
+ HTTPSConnection.__init__(self, host, **kw)
+ self.ca_bundle = ca_bundle
+
+ def connect(self):
+ sock = socket.create_connection(
+ (self.host, self.port), getattr(self, 'source_address', None)
+ )
+
+ # Handle the socket if a (proxy) tunnel is present
+ if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
+ self.sock = sock
+ self._tunnel()
+ # http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
+ # change self.host to mean the proxy server host when tunneling is
+ # being used. Adapt, since we are interested in the destination
+ # host for the match_hostname() comparison.
+ actual_host = self._tunnel_host
+ else:
+ actual_host = self.host
+
if hasattr(ssl, 'create_default_context'):
ctx = ssl.create_default_context(cafile=self.ca_bundle)
self.sock = ctx.wrap_socket(sock, server_hostname=actual_host)
@@ -194,21 +194,21 @@ class VerifyingHTTPSConn(HTTPSConnection):
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
- try:
- match_hostname(self.sock.getpeercert(), actual_host)
- except CertificateError:
- self.sock.shutdown(socket.SHUT_RDWR)
- self.sock.close()
- raise
-
-
-def opener_for(ca_bundle=None):
- """Get a urlopen() replacement that uses ca_bundle for verification"""
- return urllib.request.build_opener(
- VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
- ).open
-
-
+ try:
+ match_hostname(self.sock.getpeercert(), actual_host)
+ except CertificateError:
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ raise
+
+
+def opener_for(ca_bundle=None):
+ """Get a urlopen() replacement that uses ca_bundle for verification"""
+ return urllib.request.build_opener(
+ VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
+ ).open
+
+
# from jaraco.functools
def once(func):
@functools.wraps(func)
@@ -217,34 +217,34 @@ def once(func):
func.always_returns = func(*args, **kwargs)
return func.always_returns
return wrapper
-
+
@once
-def get_win_certfile():
- try:
+def get_win_certfile():
+ try:
import wincertstore
- except ImportError:
- return None
-
+ except ImportError:
+ return None
+
class CertFile(wincertstore.CertFile):
def __init__(self):
super(CertFile, self).__init__()
- atexit.register(self.close)
-
- def close(self):
- try:
+ atexit.register(self.close)
+
+ def close(self):
+ try:
super(CertFile, self).close()
- except OSError:
- pass
-
+ except OSError:
+ pass
+
_wincerts = CertFile()
_wincerts.addstore('CA')
_wincerts.addstore('ROOT')
- return _wincerts.name
-
-
-def find_ca_bundle():
- """Return an existing CA bundle path, or None"""
+ return _wincerts.name
+
+
+def find_ca_bundle():
+ """Return an existing CA bundle path, or None"""
extant_cert_paths = filter(os.path.isfile, cert_paths)
return (
get_win_certfile()
@@ -254,7 +254,7 @@ def find_ca_bundle():
def _certifi_where():
- try:
+ try:
return __import__('certifi').where()
- except (ImportError, ResolutionError, ExtractionError):
+ except (ImportError, ResolutionError, ExtractionError):
pass
diff --git a/contrib/python/setuptools/py2/setuptools/unicode_utils.py b/contrib/python/setuptools/py2/setuptools/unicode_utils.py
index 9128c79f74..7c63efd20b 100644
--- a/contrib/python/setuptools/py2/setuptools/unicode_utils.py
+++ b/contrib/python/setuptools/py2/setuptools/unicode_utils.py
@@ -1,44 +1,44 @@
-import unicodedata
-import sys
-
+import unicodedata
+import sys
+
from setuptools.extern import six
-
-
-# HFS Plus uses decomposed UTF-8
-def decompose(path):
- if isinstance(path, six.text_type):
- return unicodedata.normalize('NFD', path)
- try:
- path = path.decode('utf-8')
- path = unicodedata.normalize('NFD', path)
- path = path.encode('utf-8')
- except UnicodeError:
- pass # Not UTF-8
- return path
-
-
-def filesys_decode(path):
- """
- Ensure that the given path is decoded,
- NONE when no expected encoding works
- """
-
- if isinstance(path, six.text_type):
- return path
-
- fs_enc = sys.getfilesystemencoding() or 'utf-8'
- candidates = fs_enc, 'utf-8'
-
- for enc in candidates:
- try:
- return path.decode(enc)
- except UnicodeDecodeError:
- continue
-
-
-def try_encode(string, enc):
- "turn unicode encoding into a functional routine"
- try:
- return string.encode(enc)
- except UnicodeEncodeError:
- return None
+
+
+# HFS Plus uses decomposed UTF-8
+def decompose(path):
+ if isinstance(path, six.text_type):
+ return unicodedata.normalize('NFD', path)
+ try:
+ path = path.decode('utf-8')
+ path = unicodedata.normalize('NFD', path)
+ path = path.encode('utf-8')
+ except UnicodeError:
+ pass # Not UTF-8
+ return path
+
+
+def filesys_decode(path):
+ """
+ Ensure that the given path is decoded,
+ NONE when no expected encoding works
+ """
+
+ if isinstance(path, six.text_type):
+ return path
+
+ fs_enc = sys.getfilesystemencoding() or 'utf-8'
+ candidates = fs_enc, 'utf-8'
+
+ for enc in candidates:
+ try:
+ return path.decode(enc)
+ except UnicodeDecodeError:
+ continue
+
+
+def try_encode(string, enc):
+ "turn unicode encoding into a functional routine"
+ try:
+ return string.encode(enc)
+ except UnicodeEncodeError:
+ return None
diff --git a/contrib/python/setuptools/py2/setuptools/windows_support.py b/contrib/python/setuptools/py2/setuptools/windows_support.py
index a4a389fede..cb977cff95 100644
--- a/contrib/python/setuptools/py2/setuptools/windows_support.py
+++ b/contrib/python/setuptools/py2/setuptools/windows_support.py
@@ -1,29 +1,29 @@
-import platform
-import ctypes
-
-
-def windows_only(func):
- if platform.system() != 'Windows':
- return lambda *args, **kwargs: None
- return func
-
-
-@windows_only
-def hide_file(path):
- """
- Set the hidden attribute on a file or directory.
-
- From http://stackoverflow.com/questions/19622133/
-
- `path` must be text.
- """
- __import__('ctypes.wintypes')
- SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
- SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
- SetFileAttributes.restype = ctypes.wintypes.BOOL
-
- FILE_ATTRIBUTE_HIDDEN = 0x02
-
- ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
- if not ret:
- raise ctypes.WinError()
+import platform
+import ctypes
+
+
+def windows_only(func):
+ if platform.system() != 'Windows':
+ return lambda *args, **kwargs: None
+ return func
+
+
+@windows_only
+def hide_file(path):
+ """
+ Set the hidden attribute on a file or directory.
+
+ From http://stackoverflow.com/questions/19622133/
+
+ `path` must be text.
+ """
+ __import__('ctypes.wintypes')
+ SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
+ SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
+ SetFileAttributes.restype = ctypes.wintypes.BOOL
+
+ FILE_ATTRIBUTE_HIDDEN = 0x02
+
+ ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
+ if not ret:
+ raise ctypes.WinError()
diff --git a/contrib/python/setuptools/py2/ya.make b/contrib/python/setuptools/py2/ya.make
index f096e3524e..4c36dcf1ab 100644
--- a/contrib/python/setuptools/py2/ya.make
+++ b/contrib/python/setuptools/py2/ya.make
@@ -1,9 +1,9 @@
PY2_LIBRARY()
-
+
LICENSE(MIT)
OWNER(g:python-contrib borman orivej)
-
+
VERSION(44.1.1)
PEERDIR(
@@ -16,10 +16,10 @@ NO_CHECK_IMPORTS(
NO_LINT()
-PY_SRCS(
- TOP_LEVEL
+PY_SRCS(
+ TOP_LEVEL
easy_install.py
- pkg_resources/__init__.py
+ pkg_resources/__init__.py
pkg_resources/_vendor/__init__.py
pkg_resources/_vendor/appdirs.py
pkg_resources/_vendor/packaging/__about__.py
@@ -35,7 +35,7 @@ PY_SRCS(
pkg_resources/_vendor/six.py
pkg_resources/extern/__init__.py
pkg_resources/py31compat.py
- setuptools/__init__.py
+ setuptools/__init__.py
setuptools/_deprecation_warning.py
setuptools/_imp.py
setuptools/_vendor/__init__.py
@@ -52,61 +52,61 @@ PY_SRCS(
setuptools/_vendor/packaging/version.py
setuptools/_vendor/pyparsing.py
setuptools/_vendor/six.py
- setuptools/archive_util.py
+ setuptools/archive_util.py
setuptools/build_meta.py
- setuptools/command/__init__.py
- setuptools/command/alias.py
- setuptools/command/bdist_egg.py
- setuptools/command/bdist_rpm.py
- setuptools/command/bdist_wininst.py
+ setuptools/command/__init__.py
+ setuptools/command/alias.py
+ setuptools/command/bdist_egg.py
+ setuptools/command/bdist_rpm.py
+ setuptools/command/bdist_wininst.py
setuptools/command/build_clib.py
- setuptools/command/build_ext.py
- setuptools/command/build_py.py
- setuptools/command/develop.py
+ setuptools/command/build_ext.py
+ setuptools/command/build_py.py
+ setuptools/command/develop.py
setuptools/command/dist_info.py
- setuptools/command/easy_install.py
- setuptools/command/egg_info.py
- setuptools/command/install.py
- setuptools/command/install_egg_info.py
- setuptools/command/install_lib.py
- setuptools/command/install_scripts.py
+ setuptools/command/easy_install.py
+ setuptools/command/egg_info.py
+ setuptools/command/install.py
+ setuptools/command/install_egg_info.py
+ setuptools/command/install_lib.py
+ setuptools/command/install_scripts.py
setuptools/command/py36compat.py
- setuptools/command/register.py
- setuptools/command/rotate.py
- setuptools/command/saveopts.py
- setuptools/command/sdist.py
- setuptools/command/setopt.py
- setuptools/command/test.py
+ setuptools/command/register.py
+ setuptools/command/rotate.py
+ setuptools/command/saveopts.py
+ setuptools/command/sdist.py
+ setuptools/command/setopt.py
+ setuptools/command/test.py
setuptools/command/upload.py
- setuptools/command/upload_docs.py
+ setuptools/command/upload_docs.py
setuptools/config.py
setuptools/dep_util.py
- setuptools/depends.py
- setuptools/dist.py
+ setuptools/depends.py
+ setuptools/dist.py
setuptools/errors.py
- setuptools/extension.py
+ setuptools/extension.py
setuptools/extern/__init__.py
setuptools/glob.py
setuptools/installer.py
- setuptools/launch.py
- setuptools/lib2to3_ex.py
+ setuptools/launch.py
+ setuptools/lib2to3_ex.py
setuptools/monkey.py
setuptools/msvc.py
setuptools/namespaces.py
- setuptools/package_index.py
- setuptools/py27compat.py
- setuptools/py31compat.py
+ setuptools/package_index.py
+ setuptools/py27compat.py
+ setuptools/py31compat.py
setuptools/py33compat.py
setuptools/py34compat.py
- setuptools/sandbox.py
- setuptools/site-patch.py
- setuptools/ssl_support.py
- setuptools/unicode_utils.py
- setuptools/version.py
+ setuptools/sandbox.py
+ setuptools/site-patch.py
+ setuptools/ssl_support.py
+ setuptools/unicode_utils.py
+ setuptools/version.py
setuptools/wheel.py
- setuptools/windows_support.py
-)
-
+ setuptools/windows_support.py
+)
+
RESOURCE_FILES(
PREFIX contrib/python/setuptools/py2/
.dist-info/METADATA
@@ -114,4 +114,4 @@ RESOURCE_FILES(
.dist-info/top_level.txt
)
-END()
+END()
diff --git a/contrib/python/setuptools/py3/pkg_resources/__init__.py b/contrib/python/setuptools/py3/pkg_resources/__init__.py
index b5959b688c..6b947a49a7 100644
--- a/contrib/python/setuptools/py3/pkg_resources/__init__.py
+++ b/contrib/python/setuptools/py3/pkg_resources/__init__.py
@@ -1,90 +1,90 @@
-"""
-Package resource API
---------------------
-
-A resource is a logical file contained within a package, or a logical
-subdirectory thereof. The package resource API expects resource names
-to have their path parts separated with ``/``, *not* whatever the local
-path separator is. Do not use os.path operations to manipulate resource
-names being passed into the API.
-
-The package resource API is designed to work with normal filesystem packages,
-.egg files, and unpacked .egg files. It can also work in a limited way with
-.zip files and with custom PEP 302 loaders that support the ``get_data()``
-method.
-"""
-
-import sys
-import os
-import io
-import time
-import re
-import types
-import zipfile
-import zipimport
-import warnings
-import stat
-import functools
-import pkgutil
-import operator
-import platform
-import collections
-import plistlib
-import email.parser
+"""
+Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof. The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is. Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files. It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+"""
+
+import sys
+import os
+import io
+import time
+import re
+import types
+import zipfile
+import zipimport
+import warnings
+import stat
+import functools
+import pkgutil
+import operator
+import platform
+import collections
+import plistlib
+import email.parser
import errno
-import tempfile
-import textwrap
+import tempfile
+import textwrap
import itertools
import inspect
import ntpath
import posixpath
import importlib
-from pkgutil import get_importer
-
-try:
- import _imp
-except ImportError:
- # Python 3.2 compatibility
- import imp as _imp
-
+from pkgutil import get_importer
+
+try:
+ import _imp
+except ImportError:
+ # Python 3.2 compatibility
+ import imp as _imp
+
try:
FileExistsError
except NameError:
FileExistsError = OSError
-
-# capture these to bypass sandboxing
-from os import utime
-try:
- from os import mkdir, rename, unlink
- WRITE_SUPPORT = True
-except ImportError:
- # no write support, probably under GAE
- WRITE_SUPPORT = False
-
-from os import open as os_open
-from os.path import isdir, split
-
-try:
- import importlib.machinery as importlib_machinery
- # access attribute to force import under delayed import mechanisms.
- importlib_machinery.__name__
-except ImportError:
- importlib_machinery = None
-
+
+# capture these to bypass sandboxing
+from os import utime
+try:
+ from os import mkdir, rename, unlink
+ WRITE_SUPPORT = True
+except ImportError:
+ # no write support, probably under GAE
+ WRITE_SUPPORT = False
+
+from os import open as os_open
+from os.path import isdir, split
+
+try:
+ import importlib.machinery as importlib_machinery
+ # access attribute to force import under delayed import mechanisms.
+ importlib_machinery.__name__
+except ImportError:
+ importlib_machinery = None
+
from pkg_resources.extern import appdirs
from pkg_resources.extern import packaging
__import__('pkg_resources.extern.packaging.version')
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.requirements')
__import__('pkg_resources.extern.packaging.markers')
-
+
if sys.version_info < (3, 5):
raise RuntimeError("Python 3.5 or later is required")
-# declare some globals that will be defined later to
-# satisfy the linters.
-require = None
-working_set = None
+# declare some globals that will be defined later to
+# satisfy the linters.
+require = None
+working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
@@ -100,833 +100,833 @@ resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
-
-
-class PEP440Warning(RuntimeWarning):
- """
- Used when there is an issue with a version or specifier not complying with
- PEP 440.
- """
-
-
-def parse_version(v):
- try:
+
+
+class PEP440Warning(RuntimeWarning):
+ """
+ Used when there is an issue with a version or specifier not complying with
+ PEP 440.
+ """
+
+
+def parse_version(v):
+ try:
return packaging.version.Version(v)
- except packaging.version.InvalidVersion:
+ except packaging.version.InvalidVersion:
warnings.warn(
f"{v} is an invalid version and will not be supported in "
"a future release",
PkgResourcesDeprecationWarning,
)
return packaging.version.LegacyVersion(v)
-
-
-_state_vars = {}
-
-
-def _declare_state(vartype, **kw):
- globals().update(kw)
- _state_vars.update(dict.fromkeys(kw, vartype))
-
-
-def __getstate__():
- state = {}
- g = globals()
- for k, v in _state_vars.items():
+
+
+_state_vars = {}
+
+
+def _declare_state(vartype, **kw):
+ globals().update(kw)
+ _state_vars.update(dict.fromkeys(kw, vartype))
+
+
+def __getstate__():
+ state = {}
+ g = globals()
+ for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
- return state
-
+ return state
-def __setstate__(state):
- g = globals()
- for k, v in state.items():
+
+def __setstate__(state):
+ g = globals()
+ for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
- return state
-
-
-def _sget_dict(val):
- return val.copy()
-
-
-def _sset_dict(key, ob, state):
- ob.clear()
- ob.update(state)
-
-
-def _sget_object(val):
- return val.__getstate__()
-
-
-def _sset_object(key, ob, state):
- ob.__setstate__(state)
-
-
-_sget_none = _sset_none = lambda *args: None
-
-
-def get_supported_platform():
- """Return this platform's maximum compatible version.
-
- distutils.util.get_platform() normally reports the minimum version
+ return state
+
+
+def _sget_dict(val):
+ return val.copy()
+
+
+def _sset_dict(key, ob, state):
+ ob.clear()
+ ob.update(state)
+
+
+def _sget_object(val):
+ return val.__getstate__()
+
+
+def _sset_object(key, ob, state):
+ ob.__setstate__(state)
+
+
+_sget_none = _sset_none = lambda *args: None
+
+
+def get_supported_platform():
+ """Return this platform's maximum compatible version.
+
+ distutils.util.get_platform() normally reports the minimum version
of macOS that would be required to *use* extensions produced by
- distutils. But what we want when checking compatibility is to know the
+ distutils. But what we want when checking compatibility is to know the
version of macOS that we are *running*. To allow usage of packages that
explicitly require a newer version of macOS, we must also know the
- current version of the OS.
-
- If this condition occurs for any other platform with a version in its
- platform strings, this function should be extended accordingly.
- """
- plat = get_build_platform()
- m = macosVersionString.match(plat)
- if m is not None and sys.platform == "darwin":
- try:
+ current version of the OS.
+
+ If this condition occurs for any other platform with a version in its
+ platform strings, this function should be extended accordingly.
+ """
+ plat = get_build_platform()
+ m = macosVersionString.match(plat)
+ if m is not None and sys.platform == "darwin":
+ try:
plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
- except ValueError:
+ except ValueError:
# not macOS
- pass
- return plat
-
+ pass
+ return plat
+
-__all__ = [
- # Basic resource access and distribution/entry point discovery
+__all__ = [
+ # Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
- 'load_entry_point', 'get_entry_map', 'get_entry_info',
- 'iter_entry_points',
- 'resource_string', 'resource_stream', 'resource_filename',
- 'resource_listdir', 'resource_exists', 'resource_isdir',
-
- # Environmental control
- 'declare_namespace', 'working_set', 'add_activation_listener',
- 'find_distributions', 'set_extraction_path', 'cleanup_resources',
- 'get_default_cache',
-
- # Primary implementation classes
- 'Environment', 'WorkingSet', 'ResourceManager',
- 'Distribution', 'Requirement', 'EntryPoint',
-
- # Exceptions
- 'ResolutionError', 'VersionConflict', 'DistributionNotFound',
- 'UnknownExtra', 'ExtractionError',
-
- # Warnings
- 'PEP440Warning',
-
- # Parsing functions and string utilities
- 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
- 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
- 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
-
- # filesystem utilities
- 'ensure_directory', 'normalize_path',
-
- # Distribution "precedence" constants
- 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
-
- # "Provider" interfaces, implementations, and registration/lookup APIs
- 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
- 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
- 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
- 'register_finder', 'register_namespace_handler', 'register_loader_type',
- 'fixup_namespace_packages', 'get_importer',
-
+ 'load_entry_point', 'get_entry_map', 'get_entry_info',
+ 'iter_entry_points',
+ 'resource_string', 'resource_stream', 'resource_filename',
+ 'resource_listdir', 'resource_exists', 'resource_isdir',
+
+ # Environmental control
+ 'declare_namespace', 'working_set', 'add_activation_listener',
+ 'find_distributions', 'set_extraction_path', 'cleanup_resources',
+ 'get_default_cache',
+
+ # Primary implementation classes
+ 'Environment', 'WorkingSet', 'ResourceManager',
+ 'Distribution', 'Requirement', 'EntryPoint',
+
+ # Exceptions
+ 'ResolutionError', 'VersionConflict', 'DistributionNotFound',
+ 'UnknownExtra', 'ExtractionError',
+
+ # Warnings
+ 'PEP440Warning',
+
+ # Parsing functions and string utilities
+ 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
+ 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
+ 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
+
+ # filesystem utilities
+ 'ensure_directory', 'normalize_path',
+
+ # Distribution "precedence" constants
+ 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
+
+ # "Provider" interfaces, implementations, and registration/lookup APIs
+ 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
+ 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
+ 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
+ 'register_finder', 'register_namespace_handler', 'register_loader_type',
+ 'fixup_namespace_packages', 'get_importer',
+
# Warnings
'PkgResourcesDeprecationWarning',
- # Deprecated/backward compatibility only
- 'run_main', 'AvailableDistributions',
-]
-
+ # Deprecated/backward compatibility only
+ 'run_main', 'AvailableDistributions',
+]
+
-class ResolutionError(Exception):
- """Abstract base for dependency resolution errors"""
+class ResolutionError(Exception):
+ """Abstract base for dependency resolution errors"""
- def __repr__(self):
+ def __repr__(self):
return self.__class__.__name__ + repr(self.args)
-
-
-class VersionConflict(ResolutionError):
- """
- An already-installed version conflicts with the requested version.
-
- Should be initialized with the installed Distribution and the requested
- Requirement.
- """
-
- _template = "{self.dist} is installed but {self.req} is required"
-
- @property
- def dist(self):
- return self.args[0]
-
- @property
- def req(self):
- return self.args[1]
-
- def report(self):
- return self._template.format(**locals())
-
- def with_context(self, required_by):
- """
- If required_by is non-empty, return a version of self that is a
- ContextualVersionConflict.
- """
- if not required_by:
- return self
- args = self.args + (required_by,)
- return ContextualVersionConflict(*args)
-
-
-class ContextualVersionConflict(VersionConflict):
- """
- A VersionConflict that accepts a third parameter, the set of the
- requirements that required the installed Distribution.
- """
-
- _template = VersionConflict._template + ' by {self.required_by}'
-
- @property
- def required_by(self):
- return self.args[2]
-
-
-class DistributionNotFound(ResolutionError):
- """A requested distribution was not found"""
-
- _template = ("The '{self.req}' distribution was not found "
- "and is required by {self.requirers_str}")
-
- @property
- def req(self):
- return self.args[0]
-
- @property
- def requirers(self):
- return self.args[1]
-
- @property
- def requirers_str(self):
- if not self.requirers:
- return 'the application'
- return ', '.join(self.requirers)
-
- def report(self):
- return self._template.format(**locals())
-
- def __str__(self):
- return self.report()
-
-
-class UnknownExtra(ResolutionError):
- """Distribution doesn't have an "extra feature" of the given name"""
-
-
-_provider_factories = {}
-
+
+
+class VersionConflict(ResolutionError):
+ """
+ An already-installed version conflicts with the requested version.
+
+ Should be initialized with the installed Distribution and the requested
+ Requirement.
+ """
+
+ _template = "{self.dist} is installed but {self.req} is required"
+
+ @property
+ def dist(self):
+ return self.args[0]
+
+ @property
+ def req(self):
+ return self.args[1]
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def with_context(self, required_by):
+ """
+ If required_by is non-empty, return a version of self that is a
+ ContextualVersionConflict.
+ """
+ if not required_by:
+ return self
+ args = self.args + (required_by,)
+ return ContextualVersionConflict(*args)
+
+
+class ContextualVersionConflict(VersionConflict):
+ """
+ A VersionConflict that accepts a third parameter, the set of the
+ requirements that required the installed Distribution.
+ """
+
+ _template = VersionConflict._template + ' by {self.required_by}'
+
+ @property
+ def required_by(self):
+ return self.args[2]
+
+
+class DistributionNotFound(ResolutionError):
+ """A requested distribution was not found"""
+
+ _template = ("The '{self.req}' distribution was not found "
+ "and is required by {self.requirers_str}")
+
+ @property
+ def req(self):
+ return self.args[0]
+
+ @property
+ def requirers(self):
+ return self.args[1]
+
+ @property
+ def requirers_str(self):
+ if not self.requirers:
+ return 'the application'
+ return ', '.join(self.requirers)
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def __str__(self):
+ return self.report()
+
+
+class UnknownExtra(ResolutionError):
+ """Distribution doesn't have an "extra feature" of the given name"""
+
+
+_provider_factories = {}
+
PY_MAJOR = '{}.{}'.format(*sys.version_info)
-EGG_DIST = 3
-BINARY_DIST = 2
-SOURCE_DIST = 1
-CHECKOUT_DIST = 0
-DEVELOP_DIST = -1
-
-
-def register_loader_type(loader_type, provider_factory):
- """Register `provider_factory` to make providers for `loader_type`
-
- `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
- and `provider_factory` is a function that, passed a *module* object,
- returns an ``IResourceProvider`` for that module.
- """
- _provider_factories[loader_type] = provider_factory
-
-
-def get_provider(moduleOrReq):
- """Return an IResourceProvider for the named module or requirement"""
- if isinstance(moduleOrReq, Requirement):
- return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
- try:
- module = sys.modules[moduleOrReq]
- except KeyError:
- __import__(moduleOrReq)
- module = sys.modules[moduleOrReq]
- loader = getattr(module, '__loader__', None)
- return _find_adapter(_provider_factories, loader)(module)
-
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+
+def register_loader_type(loader_type, provider_factory):
+ """Register `provider_factory` to make providers for `loader_type`
+
+ `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+ and `provider_factory` is a function that, passed a *module* object,
+ returns an ``IResourceProvider`` for that module.
+ """
+ _provider_factories[loader_type] = provider_factory
+
+
+def get_provider(moduleOrReq):
+ """Return an IResourceProvider for the named module or requirement"""
+ if isinstance(moduleOrReq, Requirement):
+ return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+ try:
+ module = sys.modules[moduleOrReq]
+ except KeyError:
+ __import__(moduleOrReq)
+ module = sys.modules[moduleOrReq]
+ loader = getattr(module, '__loader__', None)
+ return _find_adapter(_provider_factories, loader)(module)
+
def _macos_vers(_cache=[]):
- if not _cache:
- version = platform.mac_ver()[0]
- # fallback for MacPorts
- if version == '':
- plist = '/System/Library/CoreServices/SystemVersion.plist'
- if os.path.exists(plist):
- if hasattr(plistlib, 'readPlist'):
- plist_content = plistlib.readPlist(plist)
- if 'ProductVersion' in plist_content:
- version = plist_content['ProductVersion']
-
- _cache.append(version.split('.'))
- return _cache[0]
-
+ if not _cache:
+ version = platform.mac_ver()[0]
+ # fallback for MacPorts
+ if version == '':
+ plist = '/System/Library/CoreServices/SystemVersion.plist'
+ if os.path.exists(plist):
+ if hasattr(plistlib, 'readPlist'):
+ plist_content = plistlib.readPlist(plist)
+ if 'ProductVersion' in plist_content:
+ version = plist_content['ProductVersion']
+
+ _cache.append(version.split('.'))
+ return _cache[0]
+
def _macos_arch(machine):
- return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
-
+ return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
-def get_build_platform():
- """Return this platform's string for platform-specific distributions
-
- XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+
+def get_build_platform():
+ """Return this platform's string for platform-specific distributions
+
+ XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and macOS.
- """
+ """
from sysconfig import get_platform
-
- plat = get_platform()
- if sys.platform == "darwin" and not plat.startswith('macosx-'):
- try:
+
+ plat = get_platform()
+ if sys.platform == "darwin" and not plat.startswith('macosx-'):
+ try:
version = _macos_vers()
- machine = os.uname()[4].replace(" ", "_")
+ machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]), int(version[1]),
_macos_arch(machine),
)
- except ValueError:
- # if someone is running a non-Mac darwin system, this will fall
- # through to the default implementation
- pass
- return plat
-
-
-macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
-darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
-# XXX backward compat
-get_platform = get_build_platform
-
-
-def compatible_platforms(provided, required):
- """Can code for the `provided` platform run on the `required` platform?
-
- Returns true if either platform is ``None``, or the platforms are equal.
-
- XXX Needs compatibility checks for Linux and other unixy OSes.
- """
+ except ValueError:
+ # if someone is running a non-Mac darwin system, this will fall
+ # through to the default implementation
+ pass
+ return plat
+
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+# XXX backward compat
+get_platform = get_build_platform
+
+
+def compatible_platforms(provided, required):
+ """Can code for the `provided` platform run on the `required` platform?
+
+ Returns true if either platform is ``None``, or the platforms are equal.
+
+ XXX Needs compatibility checks for Linux and other unixy OSes.
+ """
if provided is None or required is None or provided == required:
- # easy case
- return True
-
+ # easy case
+ return True
+
# macOS special cases
- reqMac = macosVersionString.match(required)
- if reqMac:
- provMac = macosVersionString.match(provided)
-
- # is this a Mac package?
- if not provMac:
- # this is backwards compatibility for packages built before
- # setuptools 0.6. All packages built after this point will
+ reqMac = macosVersionString.match(required)
+ if reqMac:
+ provMac = macosVersionString.match(provided)
+
+ # is this a Mac package?
+ if not provMac:
+ # this is backwards compatibility for packages built before
+ # setuptools 0.6. All packages built after this point will
# use the new macOS designation.
- provDarwin = darwinVersionString.match(provided)
- if provDarwin:
- dversion = int(provDarwin.group(1))
- macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
- if dversion == 7 and macosversion >= "10.3" or \
- dversion == 8 and macosversion >= "10.4":
- return True
+ provDarwin = darwinVersionString.match(provided)
+ if provDarwin:
+ dversion = int(provDarwin.group(1))
+ macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+ if dversion == 7 and macosversion >= "10.3" or \
+ dversion == 8 and macosversion >= "10.4":
+ return True
# egg isn't macOS or legacy darwin
- return False
-
- # are they the same major version and machine type?
- if provMac.group(1) != reqMac.group(1) or \
- provMac.group(3) != reqMac.group(3):
- return False
-
- # is the required OS major update >= the provided one?
- if int(provMac.group(2)) > int(reqMac.group(2)):
- return False
-
- return True
-
- # XXX Linux and other platforms' special cases should go here
- return False
-
-
-def run_script(dist_spec, script_name):
- """Locate distribution `dist_spec` and run its `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- require(dist_spec)[0].run_script(script_name, ns)
-
-
-# backward compatibility
-run_main = run_script
-
-
-def get_distribution(dist):
- """Return a current distribution object for a Requirement or string"""
+ return False
+
+ # are they the same major version and machine type?
+ if provMac.group(1) != reqMac.group(1) or \
+ provMac.group(3) != reqMac.group(3):
+ return False
+
+ # is the required OS major update >= the provided one?
+ if int(provMac.group(2)) > int(reqMac.group(2)):
+ return False
+
+ return True
+
+ # XXX Linux and other platforms' special cases should go here
+ return False
+
+
+def run_script(dist_spec, script_name):
+ """Locate distribution `dist_spec` and run its `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ require(dist_spec)[0].run_script(script_name, ns)
+
+
+# backward compatibility
+run_main = run_script
+
+
+def get_distribution(dist):
+ """Return a current distribution object for a Requirement or string"""
if isinstance(dist, str):
- dist = Requirement.parse(dist)
- if isinstance(dist, Requirement):
- dist = get_provider(dist)
- if not isinstance(dist, Distribution):
- raise TypeError("Expected string, Requirement, or Distribution", dist)
- return dist
-
-
-def load_entry_point(dist, group, name):
- """Return `name` entry point of `group` for `dist` or raise ImportError"""
- return get_distribution(dist).load_entry_point(group, name)
-
-
-def get_entry_map(dist, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- return get_distribution(dist).get_entry_map(group)
-
-
-def get_entry_info(dist, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return get_distribution(dist).get_entry_info(group, name)
-
-
-class IMetadataProvider:
- def has_metadata(name):
- """Does the package's distribution contain the named metadata?"""
-
- def get_metadata(name):
- """The named metadata resource as a string"""
-
- def get_metadata_lines(name):
- """Yield named metadata resource as list of non-blank non-comment lines
-
- Leading and trailing whitespace is stripped from each line, and lines
- with ``#`` as the first non-blank character are omitted."""
-
- def metadata_isdir(name):
- """Is the named metadata a directory? (like ``os.path.isdir()``)"""
-
- def metadata_listdir(name):
- """List of metadata names in the directory (like ``os.listdir()``)"""
-
- def run_script(script_name, namespace):
- """Execute the named script in the supplied namespace dictionary"""
-
-
-class IResourceProvider(IMetadataProvider):
- """An object that provides access to package resources"""
-
- def get_resource_filename(manager, resource_name):
- """Return a true filesystem path for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_stream(manager, resource_name):
- """Return a readable file-like object for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_string(manager, resource_name):
- """Return a string containing the contents of `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def has_resource(resource_name):
- """Does the package contain the named resource?"""
-
- def resource_isdir(resource_name):
- """Is the named resource a directory? (like ``os.path.isdir()``)"""
-
- def resource_listdir(resource_name):
- """List of resource names in the directory (like ``os.listdir()``)"""
-
-
+ dist = Requirement.parse(dist)
+ if isinstance(dist, Requirement):
+ dist = get_provider(dist)
+ if not isinstance(dist, Distribution):
+ raise TypeError("Expected string, Requirement, or Distribution", dist)
+ return dist
+
+
+def load_entry_point(dist, group, name):
+ """Return `name` entry point of `group` for `dist` or raise ImportError"""
+ return get_distribution(dist).load_entry_point(group, name)
+
+
+def get_entry_map(dist, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ return get_distribution(dist).get_entry_map(group)
+
+
+def get_entry_info(dist, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return get_distribution(dist).get_entry_info(group, name)
+
+
+class IMetadataProvider:
+ def has_metadata(name):
+ """Does the package's distribution contain the named metadata?"""
+
+ def get_metadata(name):
+ """The named metadata resource as a string"""
+
+ def get_metadata_lines(name):
+ """Yield named metadata resource as list of non-blank non-comment lines
+
+ Leading and trailing whitespace is stripped from each line, and lines
+ with ``#`` as the first non-blank character are omitted."""
+
+ def metadata_isdir(name):
+ """Is the named metadata a directory? (like ``os.path.isdir()``)"""
+
+ def metadata_listdir(name):
+ """List of metadata names in the directory (like ``os.listdir()``)"""
+
+ def run_script(script_name, namespace):
+ """Execute the named script in the supplied namespace dictionary"""
+
+
+class IResourceProvider(IMetadataProvider):
+ """An object that provides access to package resources"""
+
+ def get_resource_filename(manager, resource_name):
+ """Return a true filesystem path for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_stream(manager, resource_name):
+ """Return a readable file-like object for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_string(manager, resource_name):
+ """Return a string containing the contents of `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def has_resource(resource_name):
+ """Does the package contain the named resource?"""
+
+ def resource_isdir(resource_name):
+ """Is the named resource a directory? (like ``os.path.isdir()``)"""
+
+ def resource_listdir(resource_name):
+ """List of resource names in the directory (like ``os.listdir()``)"""
+
+
class WorkingSet:
- """A collection of active distributions on sys.path (or a similar list)"""
-
- def __init__(self, entries=None):
- """Create working set from list of path entries (default=sys.path)"""
- self.entries = []
- self.entry_keys = {}
- self.by_key = {}
- self.callbacks = []
-
- if entries is None:
- entries = sys.path
-
- for entry in entries:
- self.add_entry(entry)
-
- @classmethod
- def _build_master(cls):
- """
- Prepare the master working set.
- """
- ws = cls()
- try:
- from __main__ import __requires__
- except ImportError:
- # The main program does not list any requirements
- return ws
-
- # ensure the requirements are met
- try:
- ws.require(__requires__)
- except VersionConflict:
- return cls._build_from_requirements(__requires__)
-
- return ws
-
- @classmethod
- def _build_from_requirements(cls, req_spec):
- """
- Build a working set from a requirement spec. Rewrites sys.path.
- """
- # try it without defaults already on sys.path
- # by starting with an empty path
- ws = cls([])
- reqs = parse_requirements(req_spec)
- dists = ws.resolve(reqs, Environment())
- for dist in dists:
- ws.add(dist)
-
- # add any missing entries from sys.path
- for entry in sys.path:
- if entry not in ws.entries:
- ws.add_entry(entry)
-
- # then copy back to sys.path
- sys.path[:] = ws.entries
- return ws
-
- def add_entry(self, entry):
- """Add a path item to ``.entries``, finding any distributions on it
-
- ``find_distributions(entry, True)`` is used to find distributions
- corresponding to the path entry, and they are added. `entry` is
- always appended to ``.entries``, even if it is already present.
- (This is because ``sys.path`` can contain the same value more than
- once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
- equal ``sys.path``.)
- """
- self.entry_keys.setdefault(entry, [])
- self.entries.append(entry)
- for dist in find_distributions(entry, True):
- self.add(dist, entry, False)
-
- def __contains__(self, dist):
- """True if `dist` is the active distribution for its project"""
- return self.by_key.get(dist.key) == dist
-
- def find(self, req):
- """Find a distribution matching requirement `req`
-
- If there is an active distribution for the requested project, this
- returns it as long as it meets the version requirement specified by
- `req`. But, if there is an active distribution for the project and it
- does *not* meet the `req` requirement, ``VersionConflict`` is raised.
- If there is no active distribution for the requested project, ``None``
- is returned.
- """
- dist = self.by_key.get(req.key)
- if dist is not None and dist not in req:
- # XXX add more info
- raise VersionConflict(dist, req)
- return dist
-
- def iter_entry_points(self, group, name=None):
- """Yield entry point objects from `group` matching `name`
-
- If `name` is None, yields all entry points in `group` from all
- distributions in the working set, otherwise only ones matching
- both `group` and `name` are yielded (in distribution order).
- """
+ """A collection of active distributions on sys.path (or a similar list)"""
+
+ def __init__(self, entries=None):
+ """Create working set from list of path entries (default=sys.path)"""
+ self.entries = []
+ self.entry_keys = {}
+ self.by_key = {}
+ self.callbacks = []
+
+ if entries is None:
+ entries = sys.path
+
+ for entry in entries:
+ self.add_entry(entry)
+
+ @classmethod
+ def _build_master(cls):
+ """
+ Prepare the master working set.
+ """
+ ws = cls()
+ try:
+ from __main__ import __requires__
+ except ImportError:
+ # The main program does not list any requirements
+ return ws
+
+ # ensure the requirements are met
+ try:
+ ws.require(__requires__)
+ except VersionConflict:
+ return cls._build_from_requirements(__requires__)
+
+ return ws
+
+ @classmethod
+ def _build_from_requirements(cls, req_spec):
+ """
+ Build a working set from a requirement spec. Rewrites sys.path.
+ """
+ # try it without defaults already on sys.path
+ # by starting with an empty path
+ ws = cls([])
+ reqs = parse_requirements(req_spec)
+ dists = ws.resolve(reqs, Environment())
+ for dist in dists:
+ ws.add(dist)
+
+ # add any missing entries from sys.path
+ for entry in sys.path:
+ if entry not in ws.entries:
+ ws.add_entry(entry)
+
+ # then copy back to sys.path
+ sys.path[:] = ws.entries
+ return ws
+
+ def add_entry(self, entry):
+ """Add a path item to ``.entries``, finding any distributions on it
+
+ ``find_distributions(entry, True)`` is used to find distributions
+ corresponding to the path entry, and they are added. `entry` is
+ always appended to ``.entries``, even if it is already present.
+ (This is because ``sys.path`` can contain the same value more than
+ once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+ equal ``sys.path``.)
+ """
+ self.entry_keys.setdefault(entry, [])
+ self.entries.append(entry)
+ for dist in find_distributions(entry, True):
+ self.add(dist, entry, False)
+
+ def __contains__(self, dist):
+ """True if `dist` is the active distribution for its project"""
+ return self.by_key.get(dist.key) == dist
+
+ def find(self, req):
+ """Find a distribution matching requirement `req`
+
+ If there is an active distribution for the requested project, this
+ returns it as long as it meets the version requirement specified by
+ `req`. But, if there is an active distribution for the project and it
+ does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+ If there is no active distribution for the requested project, ``None``
+ is returned.
+ """
+ dist = self.by_key.get(req.key)
+ if dist is not None and dist not in req:
+ # XXX add more info
+ raise VersionConflict(dist, req)
+ return dist
+
+ def iter_entry_points(self, group, name=None):
+ """Yield entry point objects from `group` matching `name`
+
+ If `name` is None, yields all entry points in `group` from all
+ distributions in the working set, otherwise only ones matching
+ both `group` and `name` are yielded (in distribution order).
+ """
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
-
- def run_script(self, requires, script_name):
- """Locate distribution for `requires` and run `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- self.require(requires)[0].run_script(script_name, ns)
-
- def __iter__(self):
- """Yield distributions for non-duplicate projects in the working set
-
- The yield order is the order in which the items' path entries were
- added to the working set.
- """
- seen = {}
- for item in self.entries:
- if item not in self.entry_keys:
- # workaround a cache issue
- continue
-
- for key in self.entry_keys[item]:
- if key not in seen:
+
+ def run_script(self, requires, script_name):
+ """Locate distribution for `requires` and run `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ self.require(requires)[0].run_script(script_name, ns)
+
+ def __iter__(self):
+ """Yield distributions for non-duplicate projects in the working set
+
+ The yield order is the order in which the items' path entries were
+ added to the working set.
+ """
+ seen = {}
+ for item in self.entries:
+ if item not in self.entry_keys:
+ # workaround a cache issue
+ continue
+
+ for key in self.entry_keys[item]:
+ if key not in seen:
seen[key] = 1
- yield self.by_key[key]
-
- def add(self, dist, entry=None, insert=True, replace=False):
- """Add `dist` to working set, associated with `entry`
-
- If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
- On exit from this routine, `entry` is added to the end of the working
- set's ``.entries`` (if it wasn't already present).
-
- `dist` is only added to the working set if it's for a project that
- doesn't already have a distribution in the set, unless `replace=True`.
- If it's added, any callbacks registered with the ``subscribe()`` method
- will be called.
- """
- if insert:
- dist.insert_on(self.entries, entry, replace=replace)
-
- if entry is None:
- entry = dist.location
+ yield self.by_key[key]
+
+ def add(self, dist, entry=None, insert=True, replace=False):
+ """Add `dist` to working set, associated with `entry`
+
+ If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+ On exit from this routine, `entry` is added to the end of the working
+ set's ``.entries`` (if it wasn't already present).
+
+ `dist` is only added to the working set if it's for a project that
+ doesn't already have a distribution in the set, unless `replace=True`.
+ If it's added, any callbacks registered with the ``subscribe()`` method
+ will be called.
+ """
+ if insert:
+ dist.insert_on(self.entries, entry, replace=replace)
+
+ if entry is None:
+ entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
- if not replace and dist.key in self.by_key:
- # ignore hidden distros
- return
-
- self.by_key[dist.key] = dist
- if dist.key not in keys:
- keys.append(dist.key)
- if dist.key not in keys2:
- keys2.append(dist.key)
- self._added_new(dist)
-
+ if not replace and dist.key in self.by_key:
+ # ignore hidden distros
+ return
+
+ self.by_key[dist.key] = dist
+ if dist.key not in keys:
+ keys.append(dist.key)
+ if dist.key not in keys2:
+ keys2.append(dist.key)
+ self._added_new(dist)
+
# FIXME: 'WorkingSet.resolve' is too complex (11)
def resolve(self, requirements, env=None, installer=None, # noqa: C901
replace_conflicting=False, extras=None):
- """List all distributions needed to (recursively) meet `requirements`
-
- `requirements` must be a sequence of ``Requirement`` objects. `env`,
- if supplied, should be an ``Environment`` instance. If
- not supplied, it defaults to all distributions available within any
- entry or distribution in the working set. `installer`, if supplied,
- will be invoked with each requirement that cannot be met by an
- already-installed distribution; it should return a ``Distribution`` or
- ``None``.
-
+ """List all distributions needed to (recursively) meet `requirements`
+
+ `requirements` must be a sequence of ``Requirement`` objects. `env`,
+ if supplied, should be an ``Environment`` instance. If
+ not supplied, it defaults to all distributions available within any
+ entry or distribution in the working set. `installer`, if supplied,
+ will be invoked with each requirement that cannot be met by an
+ already-installed distribution; it should return a ``Distribution`` or
+ ``None``.
+
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
- any requirements are found on the path that have the correct name but
- the wrong version. Otherwise, if an `installer` is supplied it will be
- invoked to obtain the correct version of the requirement and activate
- it.
+ any requirements are found on the path that have the correct name but
+ the wrong version. Otherwise, if an `installer` is supplied it will be
+ invoked to obtain the correct version of the requirement and activate
+ it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
- """
-
- # set up the stack
- requirements = list(requirements)[::-1]
- # set of processed requirements
- processed = {}
- # key -> dist
- best = {}
- to_activate = []
-
+ """
+
+ # set up the stack
+ requirements = list(requirements)[::-1]
+ # set of processed requirements
+ processed = {}
+ # key -> dist
+ best = {}
+ to_activate = []
+
req_extras = _ReqExtras()
- # Mapping of requirement to set of distributions that required it;
- # useful for reporting info about conflicts.
- required_by = collections.defaultdict(set)
-
- while requirements:
- # process dependencies breadth-first
- req = requirements.pop(0)
- if req in processed:
- # Ignore cyclic or redundant dependencies
- continue
+ # Mapping of requirement to set of distributions that required it;
+ # useful for reporting info about conflicts.
+ required_by = collections.defaultdict(set)
+
+ while requirements:
+ # process dependencies breadth-first
+ req = requirements.pop(0)
+ if req in processed:
+ # Ignore cyclic or redundant dependencies
+ continue
if not req_extras.markers_pass(req, extras):
continue
- dist = best.get(req.key)
- if dist is None:
- # Find the best distribution and add it to the map
- dist = self.by_key.get(req.key)
- if dist is None or (dist not in req and replace_conflicting):
- ws = self
- if env is None:
- if dist is None:
- env = Environment(self.entries)
- else:
- # Use an empty environment and workingset to avoid
- # any further conflicts with the conflicting
- # distribution
- env = Environment([])
- ws = WorkingSet([])
+ dist = best.get(req.key)
+ if dist is None:
+ # Find the best distribution and add it to the map
+ dist = self.by_key.get(req.key)
+ if dist is None or (dist not in req and replace_conflicting):
+ ws = self
+ if env is None:
+ if dist is None:
+ env = Environment(self.entries)
+ else:
+ # Use an empty environment and workingset to avoid
+ # any further conflicts with the conflicting
+ # distribution
+ env = Environment([])
+ ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer,
replace_conflicting=replace_conflicting
)
- if dist is None:
- requirers = required_by.get(req, None)
- raise DistributionNotFound(req, requirers)
- to_activate.append(dist)
- if dist not in req:
- # Oops, the "best" so far conflicts with a dependency
- dependent_req = required_by[req]
- raise VersionConflict(dist, req).with_context(dependent_req)
-
- # push the new requirements onto the stack
- new_requirements = dist.requires(req.extras)[::-1]
- requirements.extend(new_requirements)
-
- # Register the new requirements needed by req
- for new_requirement in new_requirements:
- required_by[new_requirement].add(req.project_name)
+ if dist is None:
+ requirers = required_by.get(req, None)
+ raise DistributionNotFound(req, requirers)
+ to_activate.append(dist)
+ if dist not in req:
+ # Oops, the "best" so far conflicts with a dependency
+ dependent_req = required_by[req]
+ raise VersionConflict(dist, req).with_context(dependent_req)
+
+ # push the new requirements onto the stack
+ new_requirements = dist.requires(req.extras)[::-1]
+ requirements.extend(new_requirements)
+
+ # Register the new requirements needed by req
+ for new_requirement in new_requirements:
+ required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
-
- processed[req] = True
-
- # return list of distros to activate
- return to_activate
-
+
+ processed[req] = True
+
+ # return list of distros to activate
+ return to_activate
+
def find_plugins(
self, plugin_env, full_env=None, installer=None, fallback=True):
- """Find all activatable distributions in `plugin_env`
-
- Example usage::
-
- distributions, errors = working_set.find_plugins(
- Environment(plugin_dirlist)
- )
- # add plugins+libs to sys.path
- map(working_set.add, distributions)
- # display errors
- print('Could not load', errors)
-
- The `plugin_env` should be an ``Environment`` instance that contains
- only distributions that are in the project's "plugin directory" or
- directories. The `full_env`, if supplied, should be an ``Environment``
- contains all currently-available distributions. If `full_env` is not
- supplied, one is created automatically from the ``WorkingSet`` this
- method is called on, which will typically mean that every directory on
- ``sys.path`` will be scanned for distributions.
-
- `installer` is a standard installer callback as used by the
- ``resolve()`` method. The `fallback` flag indicates whether we should
- attempt to resolve older versions of a plugin if the newest version
- cannot be resolved.
-
- This method returns a 2-tuple: (`distributions`, `error_info`), where
- `distributions` is a list of the distributions found in `plugin_env`
- that were loadable, along with any other distributions that are needed
- to resolve their dependencies. `error_info` is a dictionary mapping
- unloadable plugin distributions to an exception instance describing the
- error that occurred. Usually this will be a ``DistributionNotFound`` or
- ``VersionConflict`` instance.
- """
-
- plugin_projects = list(plugin_env)
- # scan project names in alphabetic order
- plugin_projects.sort()
-
- error_info = {}
- distributions = {}
-
- if full_env is None:
- env = Environment(self.entries)
- env += plugin_env
- else:
- env = full_env + plugin_env
-
- shadow_set = self.__class__([])
- # put all our entries in shadow_set
- list(map(shadow_set.add, self))
-
- for project_name in plugin_projects:
-
- for dist in plugin_env[project_name]:
-
- req = [dist.as_requirement()]
-
- try:
- resolvees = shadow_set.resolve(req, env, installer)
-
- except ResolutionError as v:
- # save error info
- error_info[dist] = v
- if fallback:
- # try the next older version of project
- continue
- else:
- # give up on this project, keep going
- break
-
- else:
- list(map(shadow_set.add, resolvees))
- distributions.update(dict.fromkeys(resolvees))
-
- # success, no need to try any more versions of this project
- break
-
- distributions = list(distributions)
- distributions.sort()
-
- return distributions, error_info
-
- def require(self, *requirements):
- """Ensure that distributions matching `requirements` are activated
-
- `requirements` must be a string or a (possibly-nested) sequence
- thereof, specifying the distributions and versions required. The
- return value is a sequence of the distributions that needed to be
- activated to fulfill the requirements; all relevant distributions are
- included, even if they were already activated in this working set.
- """
- needed = self.resolve(parse_requirements(requirements))
-
- for dist in needed:
- self.add(dist)
-
- return needed
-
+ """Find all activatable distributions in `plugin_env`
+
+ Example usage::
+
+ distributions, errors = working_set.find_plugins(
+ Environment(plugin_dirlist)
+ )
+ # add plugins+libs to sys.path
+ map(working_set.add, distributions)
+ # display errors
+ print('Could not load', errors)
+
+ The `plugin_env` should be an ``Environment`` instance that contains
+ only distributions that are in the project's "plugin directory" or
+ directories. The `full_env`, if supplied, should be an ``Environment``
+ contains all currently-available distributions. If `full_env` is not
+ supplied, one is created automatically from the ``WorkingSet`` this
+ method is called on, which will typically mean that every directory on
+ ``sys.path`` will be scanned for distributions.
+
+ `installer` is a standard installer callback as used by the
+ ``resolve()`` method. The `fallback` flag indicates whether we should
+ attempt to resolve older versions of a plugin if the newest version
+ cannot be resolved.
+
+ This method returns a 2-tuple: (`distributions`, `error_info`), where
+ `distributions` is a list of the distributions found in `plugin_env`
+ that were loadable, along with any other distributions that are needed
+ to resolve their dependencies. `error_info` is a dictionary mapping
+ unloadable plugin distributions to an exception instance describing the
+ error that occurred. Usually this will be a ``DistributionNotFound`` or
+ ``VersionConflict`` instance.
+ """
+
+ plugin_projects = list(plugin_env)
+ # scan project names in alphabetic order
+ plugin_projects.sort()
+
+ error_info = {}
+ distributions = {}
+
+ if full_env is None:
+ env = Environment(self.entries)
+ env += plugin_env
+ else:
+ env = full_env + plugin_env
+
+ shadow_set = self.__class__([])
+ # put all our entries in shadow_set
+ list(map(shadow_set.add, self))
+
+ for project_name in plugin_projects:
+
+ for dist in plugin_env[project_name]:
+
+ req = [dist.as_requirement()]
+
+ try:
+ resolvees = shadow_set.resolve(req, env, installer)
+
+ except ResolutionError as v:
+ # save error info
+ error_info[dist] = v
+ if fallback:
+ # try the next older version of project
+ continue
+ else:
+ # give up on this project, keep going
+ break
+
+ else:
+ list(map(shadow_set.add, resolvees))
+ distributions.update(dict.fromkeys(resolvees))
+
+ # success, no need to try any more versions of this project
+ break
+
+ distributions = list(distributions)
+ distributions.sort()
+
+ return distributions, error_info
+
+ def require(self, *requirements):
+ """Ensure that distributions matching `requirements` are activated
+
+ `requirements` must be a string or a (possibly-nested) sequence
+ thereof, specifying the distributions and versions required. The
+ return value is a sequence of the distributions that needed to be
+ activated to fulfill the requirements; all relevant distributions are
+ included, even if they were already activated in this working set.
+ """
+ needed = self.resolve(parse_requirements(requirements))
+
+ for dist in needed:
+ self.add(dist)
+
+ return needed
+
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
- if callback in self.callbacks:
- return
- self.callbacks.append(callback)
+ if callback in self.callbacks:
+ return
+ self.callbacks.append(callback)
if not existing:
return
- for dist in self:
- callback(dist)
-
- def _added_new(self, dist):
- for callback in self.callbacks:
- callback(dist)
-
- def __getstate__(self):
- return (
- self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
- self.callbacks[:]
- )
-
- def __setstate__(self, e_k_b_c):
- entries, keys, by_key, callbacks = e_k_b_c
- self.entries = entries[:]
- self.entry_keys = keys.copy()
- self.by_key = by_key.copy()
- self.callbacks = callbacks[:]
-
-
+ for dist in self:
+ callback(dist)
+
+ def _added_new(self, dist):
+ for callback in self.callbacks:
+ callback(dist)
+
+ def __getstate__(self):
+ return (
+ self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
+ self.callbacks[:]
+ )
+
+ def __setstate__(self, e_k_b_c):
+ entries, keys, by_key, callbacks = e_k_b_c
+ self.entries = entries[:]
+ self.entry_keys = keys.copy()
+ self.by_key = by_key.copy()
+ self.callbacks = callbacks[:]
+
+
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
@@ -948,354 +948,354 @@ class _ReqExtras(dict):
class Environment:
- """Searchable snapshot of distributions on a search path"""
-
+ """Searchable snapshot of distributions on a search path"""
+
def __init__(
self, search_path=None, platform=get_supported_platform(),
- python=PY_MAJOR):
- """Snapshot distributions available on a search path
-
- Any distributions found on `search_path` are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used.
-
- `platform` is an optional string specifying the name of the platform
- that platform-specific distributions must be compatible with. If
- unspecified, it defaults to the current platform. `python` is an
+ python=PY_MAJOR):
+ """Snapshot distributions available on a search path
+
+ Any distributions found on `search_path` are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used.
+
+ `platform` is an optional string specifying the name of the platform
+ that platform-specific distributions must be compatible with. If
+ unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.6'``);
- it defaults to the current version.
-
- You may explicitly set `platform` (and/or `python`) to ``None`` if you
- wish to map *all* distributions, not just those compatible with the
- running platform or Python version.
- """
- self._distmap = {}
- self.platform = platform
- self.python = python
- self.scan(search_path)
-
- def can_add(self, dist):
- """Is distribution `dist` acceptable for this environment?
-
- The distribution must match the platform and python version
- requirements specified when this environment was created, or False
- is returned.
- """
+ it defaults to the current version.
+
+ You may explicitly set `platform` (and/or `python`) to ``None`` if you
+ wish to map *all* distributions, not just those compatible with the
+ running platform or Python version.
+ """
+ self._distmap = {}
+ self.platform = platform
+ self.python = python
+ self.scan(search_path)
+
+ def can_add(self, dist):
+ """Is distribution `dist` acceptable for this environment?
+
+ The distribution must match the platform and python version
+ requirements specified when this environment was created, or False
+ is returned.
+ """
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
-
- def remove(self, dist):
- """Remove `dist` from the environment"""
- self._distmap[dist.key].remove(dist)
-
- def scan(self, search_path=None):
- """Scan `search_path` for distributions usable in this environment
-
- Any distributions found are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used. Only distributions conforming to
- the platform/python version defined at initialization are added.
- """
- if search_path is None:
- search_path = sys.path
-
- for item in search_path:
- for dist in find_distributions(item):
- self.add(dist)
-
- def __getitem__(self, project_name):
- """Return a newest-to-oldest list of distributions for `project_name`
-
- Uses case-insensitive `project_name` comparison, assuming all the
- project's distributions use their project's name converted to all
- lowercase as their key.
-
- """
- distribution_key = project_name.lower()
- return self._distmap.get(distribution_key, [])
-
- def add(self, dist):
- """Add `dist` if we ``can_add()`` it and it has not already been added
- """
- if self.can_add(dist) and dist.has_version():
- dists = self._distmap.setdefault(dist.key, [])
- if dist not in dists:
- dists.append(dist)
- dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
-
+
+ def remove(self, dist):
+ """Remove `dist` from the environment"""
+ self._distmap[dist.key].remove(dist)
+
+ def scan(self, search_path=None):
+ """Scan `search_path` for distributions usable in this environment
+
+ Any distributions found are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used. Only distributions conforming to
+ the platform/python version defined at initialization are added.
+ """
+ if search_path is None:
+ search_path = sys.path
+
+ for item in search_path:
+ for dist in find_distributions(item):
+ self.add(dist)
+
+ def __getitem__(self, project_name):
+ """Return a newest-to-oldest list of distributions for `project_name`
+
+ Uses case-insensitive `project_name` comparison, assuming all the
+ project's distributions use their project's name converted to all
+ lowercase as their key.
+
+ """
+ distribution_key = project_name.lower()
+ return self._distmap.get(distribution_key, [])
+
+ def add(self, dist):
+ """Add `dist` if we ``can_add()`` it and it has not already been added
+ """
+ if self.can_add(dist) and dist.has_version():
+ dists = self._distmap.setdefault(dist.key, [])
+ if dist not in dists:
+ dists.append(dist)
+ dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
+
def best_match(
self, req, working_set, installer=None, replace_conflicting=False):
- """Find distribution best matching `req` and usable on `working_set`
-
- This calls the ``find(req)`` method of the `working_set` to see if a
- suitable distribution is already active. (This may raise
- ``VersionConflict`` if an unsuitable version of the project is already
- active in the specified `working_set`.) If a suitable distribution
- isn't active, this method returns the newest distribution in the
- environment that meets the ``Requirement`` in `req`. If no suitable
- distribution is found, and `installer` is supplied, then the result of
- calling the environment's ``obtain(req, installer)`` method will be
- returned.
- """
+ """Find distribution best matching `req` and usable on `working_set`
+
+ This calls the ``find(req)`` method of the `working_set` to see if a
+ suitable distribution is already active. (This may raise
+ ``VersionConflict`` if an unsuitable version of the project is already
+ active in the specified `working_set`.) If a suitable distribution
+ isn't active, this method returns the newest distribution in the
+ environment that meets the ``Requirement`` in `req`. If no suitable
+ distribution is found, and `installer` is supplied, then the result of
+ calling the environment's ``obtain(req, installer)`` method will be
+ returned.
+ """
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
- if dist is not None:
- return dist
- for dist in self[req.key]:
- if dist in req:
- return dist
- # try to download/install
- return self.obtain(req, installer)
-
- def obtain(self, requirement, installer=None):
- """Obtain a distribution matching `requirement` (e.g. via download)
-
- Obtain a distro that matches requirement (e.g. via download). In the
- base ``Environment`` class, this routine just returns
- ``installer(requirement)``, unless `installer` is None, in which case
- None is returned instead. This method is a hook that allows subclasses
- to attempt other ways of obtaining a distribution before falling back
- to the `installer` argument."""
- if installer is not None:
- return installer(requirement)
-
- def __iter__(self):
- """Yield the unique project names of the available distributions"""
- for key in self._distmap.keys():
- if self[key]:
- yield key
-
- def __iadd__(self, other):
- """In-place addition of a distribution or environment"""
- if isinstance(other, Distribution):
- self.add(other)
- elif isinstance(other, Environment):
- for project in other:
- for dist in other[project]:
- self.add(dist)
- else:
- raise TypeError("Can't add %r to environment" % (other,))
- return self
-
- def __add__(self, other):
- """Add an environment or distribution to an environment"""
- new = self.__class__([], platform=None, python=None)
- for env in self, other:
- new += env
- return new
-
-
-# XXX backward compatibility
-AvailableDistributions = Environment
-
-
-class ExtractionError(RuntimeError):
- """An error occurred extracting a resource
-
- The following attributes are available from instances of this exception:
-
- manager
- The resource manager that raised this exception
-
- cache_path
- The base directory for resource extraction
-
- original_error
- The exception instance that caused extraction to fail
- """
-
-
-class ResourceManager:
- """Manage resource extraction and packages"""
- extraction_path = None
-
- def __init__(self):
- self.cached_files = {}
-
- def resource_exists(self, package_or_requirement, resource_name):
- """Does the named resource exist?"""
- return get_provider(package_or_requirement).has_resource(resource_name)
-
- def resource_isdir(self, package_or_requirement, resource_name):
- """Is the named resource an existing directory?"""
- return get_provider(package_or_requirement).resource_isdir(
- resource_name
- )
-
- def resource_filename(self, package_or_requirement, resource_name):
- """Return a true filesystem path for specified resource"""
- return get_provider(package_or_requirement).get_resource_filename(
- self, resource_name
- )
-
- def resource_stream(self, package_or_requirement, resource_name):
- """Return a readable file-like object for specified resource"""
- return get_provider(package_or_requirement).get_resource_stream(
- self, resource_name
- )
-
- def resource_string(self, package_or_requirement, resource_name):
- """Return specified resource as a string"""
- return get_provider(package_or_requirement).get_resource_string(
- self, resource_name
- )
-
- def resource_listdir(self, package_or_requirement, resource_name):
- """List the contents of the named resource directory"""
- return get_provider(package_or_requirement).resource_listdir(
- resource_name
- )
-
- def extraction_error(self):
- """Give an error message for problems extracting file(s)"""
-
- old_exc = sys.exc_info()[1]
- cache_path = self.extraction_path or get_default_cache()
-
- tmpl = textwrap.dedent("""
- Can't extract file(s) to egg cache
-
+ if dist is not None:
+ return dist
+ for dist in self[req.key]:
+ if dist in req:
+ return dist
+ # try to download/install
+ return self.obtain(req, installer)
+
+ def obtain(self, requirement, installer=None):
+ """Obtain a distribution matching `requirement` (e.g. via download)
+
+ Obtain a distro that matches requirement (e.g. via download). In the
+ base ``Environment`` class, this routine just returns
+ ``installer(requirement)``, unless `installer` is None, in which case
+ None is returned instead. This method is a hook that allows subclasses
+ to attempt other ways of obtaining a distribution before falling back
+ to the `installer` argument."""
+ if installer is not None:
+ return installer(requirement)
+
+ def __iter__(self):
+ """Yield the unique project names of the available distributions"""
+ for key in self._distmap.keys():
+ if self[key]:
+ yield key
+
+ def __iadd__(self, other):
+ """In-place addition of a distribution or environment"""
+ if isinstance(other, Distribution):
+ self.add(other)
+ elif isinstance(other, Environment):
+ for project in other:
+ for dist in other[project]:
+ self.add(dist)
+ else:
+ raise TypeError("Can't add %r to environment" % (other,))
+ return self
+
+ def __add__(self, other):
+ """Add an environment or distribution to an environment"""
+ new = self.__class__([], platform=None, python=None)
+ for env in self, other:
+ new += env
+ return new
+
+
+# XXX backward compatibility
+AvailableDistributions = Environment
+
+
+class ExtractionError(RuntimeError):
+ """An error occurred extracting a resource
+
+ The following attributes are available from instances of this exception:
+
+ manager
+ The resource manager that raised this exception
+
+ cache_path
+ The base directory for resource extraction
+
+ original_error
+ The exception instance that caused extraction to fail
+ """
+
+
+class ResourceManager:
+ """Manage resource extraction and packages"""
+ extraction_path = None
+
+ def __init__(self):
+ self.cached_files = {}
+
+ def resource_exists(self, package_or_requirement, resource_name):
+ """Does the named resource exist?"""
+ return get_provider(package_or_requirement).has_resource(resource_name)
+
+ def resource_isdir(self, package_or_requirement, resource_name):
+ """Is the named resource an existing directory?"""
+ return get_provider(package_or_requirement).resource_isdir(
+ resource_name
+ )
+
+ def resource_filename(self, package_or_requirement, resource_name):
+ """Return a true filesystem path for specified resource"""
+ return get_provider(package_or_requirement).get_resource_filename(
+ self, resource_name
+ )
+
+ def resource_stream(self, package_or_requirement, resource_name):
+ """Return a readable file-like object for specified resource"""
+ return get_provider(package_or_requirement).get_resource_stream(
+ self, resource_name
+ )
+
+ def resource_string(self, package_or_requirement, resource_name):
+ """Return specified resource as a string"""
+ return get_provider(package_or_requirement).get_resource_string(
+ self, resource_name
+ )
+
+ def resource_listdir(self, package_or_requirement, resource_name):
+ """List the contents of the named resource directory"""
+ return get_provider(package_or_requirement).resource_listdir(
+ resource_name
+ )
+
+ def extraction_error(self):
+ """Give an error message for problems extracting file(s)"""
+
+ old_exc = sys.exc_info()[1]
+ cache_path = self.extraction_path or get_default_cache()
+
+ tmpl = textwrap.dedent("""
+ Can't extract file(s) to egg cache
+
The following error occurred while trying to extract file(s)
to the Python egg cache:
-
- {old_exc}
-
- The Python egg cache directory is currently set to:
-
- {cache_path}
-
+
+ {old_exc}
+
+ The Python egg cache directory is currently set to:
+
+ {cache_path}
+
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
- """).lstrip()
- err = ExtractionError(tmpl.format(**locals()))
- err.manager = self
- err.cache_path = cache_path
- err.original_error = old_exc
- raise err
-
- def get_cache_path(self, archive_name, names=()):
- """Return absolute location in cache for `archive_name` and `names`
-
- The parent directory of the resulting path will be created if it does
- not already exist. `archive_name` should be the base filename of the
- enclosing egg (which may not be the name of the enclosing zipfile!),
- including its ".egg" extension. `names`, if provided, should be a
- sequence of path name parts "under" the egg's extraction location.
-
- This method should only be called by resource providers that need to
- obtain an extraction location, and only for names they intend to
- extract, as it tracks the generated names for possible cleanup later.
- """
- extract_path = self.extraction_path or get_default_cache()
+ """).lstrip()
+ err = ExtractionError(tmpl.format(**locals()))
+ err.manager = self
+ err.cache_path = cache_path
+ err.original_error = old_exc
+ raise err
+
+ def get_cache_path(self, archive_name, names=()):
+ """Return absolute location in cache for `archive_name` and `names`
+
+ The parent directory of the resulting path will be created if it does
+ not already exist. `archive_name` should be the base filename of the
+ enclosing egg (which may not be the name of the enclosing zipfile!),
+ including its ".egg" extension. `names`, if provided, should be a
+ sequence of path name parts "under" the egg's extraction location.
+
+ This method should only be called by resource providers that need to
+ obtain an extraction location, and only for names they intend to
+ extract, as it tracks the generated names for possible cleanup later.
+ """
+ extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
- try:
- _bypass_ensure_directory(target_path)
+ try:
+ _bypass_ensure_directory(target_path)
except Exception:
- self.extraction_error()
-
- self._warn_unsafe_extraction_path(extract_path)
-
- self.cached_files[target_path] = 1
- return target_path
-
- @staticmethod
- def _warn_unsafe_extraction_path(path):
- """
- If the default extraction path is overridden and set to an insecure
- location, such as /tmp, it opens up an opportunity for an attacker to
- replace an extracted file with an unauthorized payload. Warn the user
- if a known insecure location is used.
-
- See Distribute #375 for more details.
- """
- if os.name == 'nt' and not path.startswith(os.environ['windir']):
- # On Windows, permissions are generally restrictive by default
- # and temp directories are not writable by other users, so
- # bypass the warning.
- return
- mode = os.stat(path).st_mode
- if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
+ self.extraction_error()
+
+ self._warn_unsafe_extraction_path(extract_path)
+
+ self.cached_files[target_path] = 1
+ return target_path
+
+ @staticmethod
+ def _warn_unsafe_extraction_path(path):
+ """
+ If the default extraction path is overridden and set to an insecure
+ location, such as /tmp, it opens up an opportunity for an attacker to
+ replace an extracted file with an unauthorized payload. Warn the user
+ if a known insecure location is used.
+
+ See Distribute #375 for more details.
+ """
+ if os.name == 'nt' and not path.startswith(os.environ['windir']):
+ # On Windows, permissions are generally restrictive by default
+ # and temp directories are not writable by other users, so
+ # bypass the warning.
+ return
+ mode = os.stat(path).st_mode
+ if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"Extraction path is writable by group/others "
"and vulnerable to attack when "
"used with get_resource_filename ({path}). "
"Consider a more secure "
- "location (set with .set_extraction_path or the "
+ "location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)."
).format(**locals())
- warnings.warn(msg, UserWarning)
-
- def postprocess(self, tempname, filename):
- """Perform any platform-specific postprocessing of `tempname`
-
- This is where Mac header rewrites should be done; other platforms don't
- have anything special they should do.
-
- Resource providers should call this method ONLY after successfully
- extracting a compressed resource. They must NOT call it on resources
- that are already in the filesystem.
-
- `tempname` is the current (temporary) name of the file, and `filename`
- is the name it will be renamed to by the caller after this routine
- returns.
- """
-
- if os.name == 'posix':
- # Make the resource executable
- mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
- os.chmod(tempname, mode)
-
- def set_extraction_path(self, path):
- """Set the base path where resources will be extracted to, if needed.
-
- If you do not call this routine before any extractions take place, the
- path defaults to the return value of ``get_default_cache()``. (Which
- is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
- platform-specific fallbacks. See that routine's documentation for more
- details.)
-
- Resources are extracted to subdirectories of this path based upon
- information given by the ``IResourceProvider``. You may set this to a
- temporary directory, but then you must call ``cleanup_resources()`` to
- delete the extracted files when done. There is no guarantee that
- ``cleanup_resources()`` will be able to remove all extracted files.
-
- (Note: you may not change the extraction path for a given resource
- manager once resources have been extracted, unless you first call
- ``cleanup_resources()``.)
- """
- if self.cached_files:
- raise ValueError(
- "Can't change extraction path, files already extracted"
- )
-
- self.extraction_path = path
-
- def cleanup_resources(self, force=False):
- """
- Delete all extracted resource files and directories, returning a list
- of the file and directory names that could not be successfully removed.
- This function does not have any concurrency protection, so it should
- generally only be called when the extraction path is a temporary
- directory exclusive to a single process. This method is not
- automatically called; you must call it explicitly or register it as an
- ``atexit`` function if you wish to ensure cleanup of a temporary
- directory used for extractions.
- """
- # XXX
-
-
-def get_default_cache():
- """
+ warnings.warn(msg, UserWarning)
+
+ def postprocess(self, tempname, filename):
+ """Perform any platform-specific postprocessing of `tempname`
+
+ This is where Mac header rewrites should be done; other platforms don't
+ have anything special they should do.
+
+ Resource providers should call this method ONLY after successfully
+ extracting a compressed resource. They must NOT call it on resources
+ that are already in the filesystem.
+
+ `tempname` is the current (temporary) name of the file, and `filename`
+ is the name it will be renamed to by the caller after this routine
+ returns.
+ """
+
+ if os.name == 'posix':
+ # Make the resource executable
+ mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
+ os.chmod(tempname, mode)
+
+ def set_extraction_path(self, path):
+ """Set the base path where resources will be extracted to, if needed.
+
+ If you do not call this routine before any extractions take place, the
+ path defaults to the return value of ``get_default_cache()``. (Which
+ is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+ platform-specific fallbacks. See that routine's documentation for more
+ details.)
+
+ Resources are extracted to subdirectories of this path based upon
+ information given by the ``IResourceProvider``. You may set this to a
+ temporary directory, but then you must call ``cleanup_resources()`` to
+ delete the extracted files when done. There is no guarantee that
+ ``cleanup_resources()`` will be able to remove all extracted files.
+
+ (Note: you may not change the extraction path for a given resource
+ manager once resources have been extracted, unless you first call
+ ``cleanup_resources()``.)
+ """
+ if self.cached_files:
+ raise ValueError(
+ "Can't change extraction path, files already extracted"
+ )
+
+ self.extraction_path = path
+
+ def cleanup_resources(self, force=False):
+ """
+ Delete all extracted resource files and directories, returning a list
+ of the file and directory names that could not be successfully removed.
+ This function does not have any concurrency protection, so it should
+ generally only be called when the extraction path is a temporary
+ directory exclusive to a single process. This method is not
+ automatically called; you must call it explicitly or register it as an
+ ``atexit`` function if you wish to ensure cleanup of a temporary
+ directory used for extractions.
+ """
+ # XXX
+
+
+def get_default_cache():
+ """
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
@@ -1304,45 +1304,45 @@ def get_default_cache():
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
-
-
-def safe_name(name):
- """Convert an arbitrary string to a standard distribution name
-
- Any runs of non-alphanumeric/. characters are replaced with a single '-'.
- """
- return re.sub('[^A-Za-z0-9.]+', '-', name)
-
-
-def safe_version(version):
- """
- Convert an arbitrary string to a standard version string
- """
- try:
- # normalize the version
- return str(packaging.version.Version(version))
- except packaging.version.InvalidVersion:
+
+
+def safe_name(name):
+ """Convert an arbitrary string to a standard distribution name
+
+ Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+ """
+ Convert an arbitrary string to a standard version string
+ """
+ try:
+ # normalize the version
+ return str(packaging.version.Version(version))
+ except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
- return re.sub('[^A-Za-z0-9.]+', '-', version)
-
-
-def safe_extra(extra):
- """Convert an arbitrary string to a standard 'extra' name
-
- Any runs of non-alphanumeric characters are replaced with a single '_',
- and the result is always lowercased.
- """
+ return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def safe_extra(extra):
+ """Convert an arbitrary string to a standard 'extra' name
+
+ Any runs of non-alphanumeric characters are replaced with a single '_',
+ and the result is always lowercased.
+ """
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
-
-
-def to_filename(name):
- """Convert a project or version name to its filename-escaped form
-
- Any '-' characters are currently replaced with '_'.
- """
+
+
+def to_filename(name):
+ """Convert a project or version name to its filename-escaped form
+
+ Any '-' characters are currently replaced with '_'.
+ """
return name.replace('-', '_')
-
-
+
+
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
@@ -1355,14 +1355,14 @@ def invalid_marker(text):
e.lineno = None
return e
return False
-
-
+
+
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
-
+
This implementation uses the 'pyparsing' module.
"""
try:
@@ -1370,38 +1370,38 @@ def evaluate_marker(text, extra=None):
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e) from e
-
-
-class NullProvider:
- """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
-
- egg_name = None
- egg_info = None
- loader = None
-
- def __init__(self, module):
- self.loader = getattr(module, '__loader__', None)
- self.module_path = os.path.dirname(getattr(module, '__file__', ''))
-
- def get_resource_filename(self, manager, resource_name):
- return self._fn(self.module_path, resource_name)
-
- def get_resource_stream(self, manager, resource_name):
- return io.BytesIO(self.get_resource_string(manager, resource_name))
-
- def get_resource_string(self, manager, resource_name):
- return self._get(self._fn(self.module_path, resource_name))
-
- def has_resource(self, resource_name):
- return self._has(self._fn(self.module_path, resource_name))
-
+
+
+class NullProvider:
+ """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+ egg_name = None
+ egg_info = None
+ loader = None
+
+ def __init__(self, module):
+ self.loader = getattr(module, '__loader__', None)
+ self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+ def get_resource_filename(self, manager, resource_name):
+ return self._fn(self.module_path, resource_name)
+
+ def get_resource_stream(self, manager, resource_name):
+ return io.BytesIO(self.get_resource_string(manager, resource_name))
+
+ def get_resource_string(self, manager, resource_name):
+ return self._get(self._fn(self.module_path, resource_name))
+
+ def has_resource(self, resource_name):
+ return self._has(self._fn(self.module_path, resource_name))
+
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
- def has_metadata(self, name):
+ def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
-
+
path = self._get_metadata_path(name)
return self._has(path)
@@ -1417,69 +1417,69 @@ class NullProvider:
# troubleshooting, and without changing the exception type.
exc.reason += ' in {} file at path: {}'.format(name, path)
raise
-
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
- def resource_isdir(self, resource_name):
- return self._isdir(self._fn(self.module_path, resource_name))
-
- def metadata_isdir(self, name):
- return self.egg_info and self._isdir(self._fn(self.egg_info, name))
-
- def resource_listdir(self, resource_name):
- return self._listdir(self._fn(self.module_path, resource_name))
-
- def metadata_listdir(self, name):
- if self.egg_info:
- return self._listdir(self._fn(self.egg_info, name))
- return []
-
- def run_script(self, script_name, namespace):
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+ def resource_isdir(self, resource_name):
+ return self._isdir(self._fn(self.module_path, resource_name))
+
+ def metadata_isdir(self, name):
+ return self.egg_info and self._isdir(self._fn(self.egg_info, name))
+
+ def resource_listdir(self, resource_name):
+ return self._listdir(self._fn(self.module_path, resource_name))
+
+ def metadata_listdir(self, name):
+ if self.egg_info:
+ return self._listdir(self._fn(self.egg_info, name))
+ return []
+
+ def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
- if not self.has_metadata(script):
+ if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}"
.format(**locals()),
)
- script_text = self.get_metadata(script).replace('\r\n', '\n')
- script_text = script_text.replace('\r', '\n')
- script_filename = self._fn(self.egg_info, script)
- namespace['__file__'] = script_filename
- if os.path.exists(script_filename):
+ script_text = self.get_metadata(script).replace('\r\n', '\n')
+ script_text = script_text.replace('\r', '\n')
+ script_filename = self._fn(self.egg_info, script)
+ namespace['__file__'] = script_filename
+ if os.path.exists(script_filename):
with open(script_filename) as fid:
source = fid.read()
- code = compile(source, script_filename, 'exec')
- exec(code, namespace, namespace)
- else:
- from linecache import cache
- cache[script_filename] = (
- len(script_text), 0, script_text.split('\n'), script_filename
- )
+ code = compile(source, script_filename, 'exec')
+ exec(code, namespace, namespace)
+ else:
+ from linecache import cache
+ cache[script_filename] = (
+ len(script_text), 0, script_text.split('\n'), script_filename
+ )
script_code = compile(script_text, script_filename, 'exec')
- exec(script_code, namespace, namespace)
-
- def _has(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _isdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _listdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _fn(self, base, resource_name):
+ exec(script_code, namespace, namespace)
+
+ def _has(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _isdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _listdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
- if resource_name:
- return os.path.join(base, *resource_name.split('/'))
- return base
-
+ if resource_name:
+ return os.path.join(base, *resource_name.split('/'))
+ return base
+
@staticmethod
def _validate_resource_path(path):
"""
@@ -1555,17 +1555,17 @@ is not allowed.
stacklevel=4,
)
- def _get(self, path):
- if hasattr(self.loader, 'get_data'):
- return self.loader.get_data(path)
- raise NotImplementedError(
- "Can't perform this operation for loaders without 'get_data()'"
- )
-
-
-register_loader_type(object, NullProvider)
-
-
+ def _get(self, path):
+ if hasattr(self.loader, 'get_data'):
+ return self.loader.get_data(path)
+ raise NotImplementedError(
+ "Can't perform this operation for loaders without 'get_data()'"
+ )
+
+
+register_loader_type(object, NullProvider)
+
+
def _parents(path):
"""
yield all parents of path including path
@@ -1577,61 +1577,61 @@ def _parents(path):
path, _ = os.path.split(path)
-class EggProvider(NullProvider):
- """Provider based on a virtual filesystem"""
-
- def __init__(self, module):
- NullProvider.__init__(self, module)
- self._setup_prefix()
-
- def _setup_prefix(self):
+class EggProvider(NullProvider):
+ """Provider based on a virtual filesystem"""
+
+ def __init__(self, module):
+ NullProvider.__init__(self, module)
+ self._setup_prefix()
+
+ def _setup_prefix(self):
# Assume that metadata may be nested inside a "basket"
# of multiple eggs and use module_path instead of .archive.
eggs = filter(_is_egg_path, _parents(self.module_path))
egg = next(eggs, None)
egg and self._set_egg(egg)
-
+
def _set_egg(self, path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
-class DefaultProvider(EggProvider):
- """Provides access to package resources in the filesystem"""
-
- def _has(self, path):
- return os.path.exists(path)
-
- def _isdir(self, path):
- return os.path.isdir(path)
-
- def _listdir(self, path):
- return os.listdir(path)
-
- def get_resource_stream(self, manager, resource_name):
- return open(self._fn(self.module_path, resource_name), 'rb')
-
- def _get(self, path):
- with open(path, 'rb') as stream:
- return stream.read()
-
- @classmethod
- def _register(cls):
+class DefaultProvider(EggProvider):
+ """Provides access to package resources in the filesystem"""
+
+ def _has(self, path):
+ return os.path.exists(path)
+
+ def _isdir(self, path):
+ return os.path.isdir(path)
+
+ def _listdir(self, path):
+ return os.listdir(path)
+
+ def get_resource_stream(self, manager, resource_name):
+ return open(self._fn(self.module_path, resource_name), 'rb')
+
+ def _get(self, path):
+ with open(path, 'rb') as stream:
+ return stream.read()
+
+ @classmethod
+ def _register(cls):
loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
-
-
-DefaultProvider._register()
-
-
-class EmptyProvider(NullProvider):
- """Provider that returns nothing for all requests"""
-
- module_path = None
-
+
+
+DefaultProvider._register()
+
+
+class EmptyProvider(NullProvider):
+ """Provider that returns nothing for all requests"""
+
+ module_path = None
+
_isdir = _has = lambda self, path: False
def _get(self, path):
@@ -1640,261 +1640,261 @@ class EmptyProvider(NullProvider):
def _listdir(self, path):
return []
- def __init__(self):
- pass
-
-
-empty_provider = EmptyProvider()
-
-
-class ZipManifests(dict):
- """
- zip manifest builder
- """
-
- @classmethod
- def build(cls, path):
- """
- Build a dictionary similar to the zipimport directory
- caches, except instead of tuples, store ZipInfo objects.
-
- Use a platform-specific path separator (os.sep) for the path keys
- for compatibility with pypy on Windows.
- """
+ def __init__(self):
+ pass
+
+
+empty_provider = EmptyProvider()
+
+
+class ZipManifests(dict):
+ """
+ zip manifest builder
+ """
+
+ @classmethod
+ def build(cls, path):
+ """
+ Build a dictionary similar to the zipimport directory
+ caches, except instead of tuples, store ZipInfo objects.
+
+ Use a platform-specific path separator (os.sep) for the path keys
+ for compatibility with pypy on Windows.
+ """
with zipfile.ZipFile(path) as zfile:
- items = (
- (
- name.replace('/', os.sep),
- zfile.getinfo(name),
- )
- for name in zfile.namelist()
- )
- return dict(items)
-
- load = build
-
-
-class MemoizedZipManifests(ZipManifests):
- """
- Memoized zipfile manifests.
- """
- manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
-
- def load(self, path):
- """
- Load a manifest at path or return a suitable manifest already loaded.
- """
- path = os.path.normpath(path)
- mtime = os.stat(path).st_mtime
-
- if path not in self or self[path].mtime != mtime:
- manifest = self.build(path)
- self[path] = self.manifest_mod(manifest, mtime)
-
- return self[path].manifest
-
-
-class ZipProvider(EggProvider):
- """Resource support for zips and eggs"""
-
- eagers = None
- _zip_manifests = MemoizedZipManifests()
-
- def __init__(self, module):
- EggProvider.__init__(self, module)
+ items = (
+ (
+ name.replace('/', os.sep),
+ zfile.getinfo(name),
+ )
+ for name in zfile.namelist()
+ )
+ return dict(items)
+
+ load = build
+
+
+class MemoizedZipManifests(ZipManifests):
+ """
+ Memoized zipfile manifests.
+ """
+ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
+
+ def load(self, path):
+ """
+ Load a manifest at path or return a suitable manifest already loaded.
+ """
+ path = os.path.normpath(path)
+ mtime = os.stat(path).st_mtime
+
+ if path not in self or self[path].mtime != mtime:
+ manifest = self.build(path)
+ self[path] = self.manifest_mod(manifest, mtime)
+
+ return self[path].manifest
+
+
+class ZipProvider(EggProvider):
+ """Resource support for zips and eggs"""
+
+ eagers = None
+ _zip_manifests = MemoizedZipManifests()
+
+ def __init__(self, module):
+ EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
-
- def _zipinfo_name(self, fspath):
- # Convert a virtual filename (full path to file) into a zipfile subpath
- # usable with the zipimport directory cache for our target archive
+
+ def _zipinfo_name(self, fspath):
+ # Convert a virtual filename (full path to file) into a zipfile subpath
+ # usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
- if fspath.startswith(self.zip_pre):
- return fspath[len(self.zip_pre):]
- raise AssertionError(
- "%s is not a subpath of %s" % (fspath, self.zip_pre)
- )
-
- def _parts(self, zip_path):
- # Convert a zipfile subpath into an egg-relative path part list.
- # pseudo-fs path
+ if fspath.startswith(self.zip_pre):
+ return fspath[len(self.zip_pre):]
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.zip_pre)
+ )
+
+ def _parts(self, zip_path):
+ # Convert a zipfile subpath into an egg-relative path part list.
+ # pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
- raise AssertionError(
- "%s is not a subpath of %s" % (fspath, self.egg_root)
- )
-
- @property
- def zipinfo(self):
- return self._zip_manifests.load(self.loader.archive)
-
- def get_resource_filename(self, manager, resource_name):
- if not self.egg_name:
- raise NotImplementedError(
- "resource_filename() only supported for .egg, not .zip"
- )
- # no need to lock for extraction, since we use temp names
- zip_path = self._resource_to_zip(resource_name)
- eagers = self._get_eager_resources()
- if '/'.join(self._parts(zip_path)) in eagers:
- for name in eagers:
- self._extract_resource(manager, self._eager_to_zip(name))
- return self._extract_resource(manager, zip_path)
-
- @staticmethod
- def _get_date_and_size(zip_stat):
- size = zip_stat.file_size
- # ymdhms+wday, yday, dst
- date_time = zip_stat.date_time + (0, 0, -1)
- # 1980 offset already done
- timestamp = time.mktime(date_time)
- return timestamp, size
-
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.egg_root)
+ )
+
+ @property
+ def zipinfo(self):
+ return self._zip_manifests.load(self.loader.archive)
+
+ def get_resource_filename(self, manager, resource_name):
+ if not self.egg_name:
+ raise NotImplementedError(
+ "resource_filename() only supported for .egg, not .zip"
+ )
+ # no need to lock for extraction, since we use temp names
+ zip_path = self._resource_to_zip(resource_name)
+ eagers = self._get_eager_resources()
+ if '/'.join(self._parts(zip_path)) in eagers:
+ for name in eagers:
+ self._extract_resource(manager, self._eager_to_zip(name))
+ return self._extract_resource(manager, zip_path)
+
+ @staticmethod
+ def _get_date_and_size(zip_stat):
+ size = zip_stat.file_size
+ # ymdhms+wday, yday, dst
+ date_time = zip_stat.date_time + (0, 0, -1)
+ # 1980 offset already done
+ timestamp = time.mktime(date_time)
+ return timestamp, size
+
# FIXME: 'ZipProvider._extract_resource' is too complex (12)
def _extract_resource(self, manager, zip_path): # noqa: C901
-
- if zip_path in self._index():
- for name in self._index()[zip_path]:
- last = self._extract_resource(
- manager, os.path.join(zip_path, name)
- )
- # return the extracted directory name
- return os.path.dirname(last)
-
- timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
-
- if not WRITE_SUPPORT:
- raise IOError('"os.rename" and "os.unlink" are not supported '
- 'on this platform')
- try:
-
- real_path = manager.get_cache_path(
- self.egg_name, self._parts(zip_path)
- )
-
- if self._is_current(real_path, zip_path):
- return real_path
-
+
+ if zip_path in self._index():
+ for name in self._index()[zip_path]:
+ last = self._extract_resource(
+ manager, os.path.join(zip_path, name)
+ )
+ # return the extracted directory name
+ return os.path.dirname(last)
+
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+
+ if not WRITE_SUPPORT:
+ raise IOError('"os.rename" and "os.unlink" are not supported '
+ 'on this platform')
+ try:
+
+ real_path = manager.get_cache_path(
+ self.egg_name, self._parts(zip_path)
+ )
+
+ if self._is_current(real_path, zip_path):
+ return real_path
+
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
- os.write(outf, self.loader.get_data(zip_path))
- os.close(outf)
- utime(tmpnam, (timestamp, timestamp))
- manager.postprocess(tmpnam, real_path)
-
- try:
- rename(tmpnam, real_path)
-
- except os.error:
- if os.path.isfile(real_path):
- if self._is_current(real_path, zip_path):
- # the file became current since it was checked above,
- # so proceed.
- return real_path
- # Windows, del old file and retry
+ os.write(outf, self.loader.get_data(zip_path))
+ os.close(outf)
+ utime(tmpnam, (timestamp, timestamp))
+ manager.postprocess(tmpnam, real_path)
+
+ try:
+ rename(tmpnam, real_path)
+
+ except os.error:
+ if os.path.isfile(real_path):
+ if self._is_current(real_path, zip_path):
+ # the file became current since it was checked above,
+ # so proceed.
+ return real_path
+ # Windows, del old file and retry
elif os.name == 'nt':
- unlink(real_path)
- rename(tmpnam, real_path)
- return real_path
- raise
-
- except os.error:
- # report a user-friendly error
- manager.extraction_error()
-
- return real_path
-
- def _is_current(self, file_path, zip_path):
- """
- Return True if the file_path is current for this zip_path
- """
- timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
- if not os.path.isfile(file_path):
- return False
- stat = os.stat(file_path)
+ unlink(real_path)
+ rename(tmpnam, real_path)
+ return real_path
+ raise
+
+ except os.error:
+ # report a user-friendly error
+ manager.extraction_error()
+
+ return real_path
+
+ def _is_current(self, file_path, zip_path):
+ """
+ Return True if the file_path is current for this zip_path
+ """
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+ if not os.path.isfile(file_path):
+ return False
+ stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
- return False
- # check that the contents match
- zip_contents = self.loader.get_data(zip_path)
- with open(file_path, 'rb') as f:
- file_contents = f.read()
- return zip_contents == file_contents
-
- def _get_eager_resources(self):
- if self.eagers is None:
- eagers = []
- for name in ('native_libs.txt', 'eager_resources.txt'):
- if self.has_metadata(name):
- eagers.extend(self.get_metadata_lines(name))
- self.eagers = eagers
- return self.eagers
-
- def _index(self):
- try:
- return self._dirindex
- except AttributeError:
- ind = {}
- for path in self.zipinfo:
- parts = path.split(os.sep)
- while parts:
- parent = os.sep.join(parts[:-1])
- if parent in ind:
- ind[parent].append(parts[-1])
- break
- else:
- ind[parent] = [parts.pop()]
- self._dirindex = ind
- return ind
-
- def _has(self, fspath):
- zip_path = self._zipinfo_name(fspath)
- return zip_path in self.zipinfo or zip_path in self._index()
-
- def _isdir(self, fspath):
- return self._zipinfo_name(fspath) in self._index()
-
- def _listdir(self, fspath):
- return list(self._index().get(self._zipinfo_name(fspath), ()))
-
- def _eager_to_zip(self, resource_name):
- return self._zipinfo_name(self._fn(self.egg_root, resource_name))
-
- def _resource_to_zip(self, resource_name):
- return self._zipinfo_name(self._fn(self.module_path, resource_name))
-
-
-register_loader_type(zipimport.zipimporter, ZipProvider)
-
-
-class FileMetadata(EmptyProvider):
- """Metadata handler for standalone PKG-INFO files
-
- Usage::
-
- metadata = FileMetadata("/path/to/PKG-INFO")
-
- This provider rejects all data and metadata requests except for PKG-INFO,
- which is treated as existing, and will be the contents of the file at
- the provided location.
- """
-
- def __init__(self, path):
- self.path = path
-
+ return False
+ # check that the contents match
+ zip_contents = self.loader.get_data(zip_path)
+ with open(file_path, 'rb') as f:
+ file_contents = f.read()
+ return zip_contents == file_contents
+
+ def _get_eager_resources(self):
+ if self.eagers is None:
+ eagers = []
+ for name in ('native_libs.txt', 'eager_resources.txt'):
+ if self.has_metadata(name):
+ eagers.extend(self.get_metadata_lines(name))
+ self.eagers = eagers
+ return self.eagers
+
+ def _index(self):
+ try:
+ return self._dirindex
+ except AttributeError:
+ ind = {}
+ for path in self.zipinfo:
+ parts = path.split(os.sep)
+ while parts:
+ parent = os.sep.join(parts[:-1])
+ if parent in ind:
+ ind[parent].append(parts[-1])
+ break
+ else:
+ ind[parent] = [parts.pop()]
+ self._dirindex = ind
+ return ind
+
+ def _has(self, fspath):
+ zip_path = self._zipinfo_name(fspath)
+ return zip_path in self.zipinfo or zip_path in self._index()
+
+ def _isdir(self, fspath):
+ return self._zipinfo_name(fspath) in self._index()
+
+ def _listdir(self, fspath):
+ return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+ def _eager_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.egg_root, resource_name))
+
+ def _resource_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.module_path, resource_name))
+
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+class FileMetadata(EmptyProvider):
+ """Metadata handler for standalone PKG-INFO files
+
+ Usage::
+
+ metadata = FileMetadata("/path/to/PKG-INFO")
+
+ This provider rejects all data and metadata requests except for PKG-INFO,
+ which is treated as existing, and will be the contents of the file at
+ the provided location.
+ """
+
+ def __init__(self, path):
+ self.path = path
+
def _get_metadata_path(self, name):
return self.path
- def has_metadata(self, name):
+ def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
-
- def get_metadata(self, name):
+
+ def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
-
+
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
@@ -1907,106 +1907,106 @@ class FileMetadata(EmptyProvider):
msg = tmpl.format(**locals())
warnings.warn(msg)
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
-
-class PathMetadata(DefaultProvider):
- """Metadata provider for egg directories
-
- Usage::
-
- # Development eggs:
-
- egg_info = "/path/to/PackageName.egg-info"
- base_dir = os.path.dirname(egg_info)
- metadata = PathMetadata(base_dir, egg_info)
- dist_name = os.path.splitext(os.path.basename(egg_info))[0]
- dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
-
- # Unpacked egg directories:
-
- egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
- metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
- dist = Distribution.from_filename(egg_path, metadata=metadata)
- """
-
- def __init__(self, path, egg_info):
- self.module_path = path
- self.egg_info = egg_info
-
-
-class EggMetadata(ZipProvider):
- """Metadata provider for .egg files"""
-
- def __init__(self, importer):
- """Create a metadata provider from a zipimporter"""
-
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+
+class PathMetadata(DefaultProvider):
+ """Metadata provider for egg directories
+
+ Usage::
+
+ # Development eggs:
+
+ egg_info = "/path/to/PackageName.egg-info"
+ base_dir = os.path.dirname(egg_info)
+ metadata = PathMetadata(base_dir, egg_info)
+ dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+ dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
+
+ # Unpacked egg directories:
+
+ egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+ metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+ dist = Distribution.from_filename(egg_path, metadata=metadata)
+ """
+
+ def __init__(self, path, egg_info):
+ self.module_path = path
+ self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+ """Metadata provider for .egg files"""
+
+ def __init__(self, importer):
+ """Create a metadata provider from a zipimporter"""
+
self.zip_pre = importer.archive + os.sep
- self.loader = importer
- if importer.prefix:
- self.module_path = os.path.join(importer.archive, importer.prefix)
- else:
- self.module_path = importer.archive
- self._setup_prefix()
-
-
+ self.loader = importer
+ if importer.prefix:
+ self.module_path = os.path.join(importer.archive, importer.prefix)
+ else:
+ self.module_path = importer.archive
+ self._setup_prefix()
+
+
_declare_state('dict', _distribution_finders={})
-def register_finder(importer_type, distribution_finder):
- """Register `distribution_finder` to find distributions in sys.path items
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `distribution_finder` is a callable that, passed a path
- item and the importer instance, yields ``Distribution`` instances found on
- that path item. See ``pkg_resources.find_on_path`` for an example."""
- _distribution_finders[importer_type] = distribution_finder
-
-
-def find_distributions(path_item, only=False):
- """Yield distributions accessible via `path_item`"""
- importer = get_importer(path_item)
- finder = _find_adapter(_distribution_finders, importer)
- return finder(importer, path_item, only)
-
-
-def find_eggs_in_zip(importer, path_item, only=False):
- """
- Find eggs in zip files; possibly multiple nested eggs.
- """
- if importer.archive.endswith('.whl'):
- # wheels are not supported with this finder
- # they don't have PKG-INFO metadata, and won't ever contain eggs
- return
- metadata = EggMetadata(importer)
- if metadata.has_metadata('PKG-INFO'):
- yield Distribution.from_filename(path_item, metadata=metadata)
- if only:
- # don't yield nested distros
- return
+def register_finder(importer_type, distribution_finder):
+ """Register `distribution_finder` to find distributions in sys.path items
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `distribution_finder` is a callable that, passed a path
+ item and the importer instance, yields ``Distribution`` instances found on
+ that path item. See ``pkg_resources.find_on_path`` for an example."""
+ _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item, only=False):
+ """Yield distributions accessible via `path_item`"""
+ importer = get_importer(path_item)
+ finder = _find_adapter(_distribution_finders, importer)
+ return finder(importer, path_item, only)
+
+
+def find_eggs_in_zip(importer, path_item, only=False):
+ """
+ Find eggs in zip files; possibly multiple nested eggs.
+ """
+ if importer.archive.endswith('.whl'):
+ # wheels are not supported with this finder
+ # they don't have PKG-INFO metadata, and won't ever contain eggs
+ return
+ metadata = EggMetadata(importer)
+ if metadata.has_metadata('PKG-INFO'):
+ yield Distribution.from_filename(path_item, metadata=metadata)
+ if only:
+ # don't yield nested distros
+ return
for subitem in metadata.resource_listdir(''):
if _is_egg_path(subitem):
- subpath = os.path.join(path_item, subitem)
+ subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
- yield dist
+ yield dist
elif subitem.lower().endswith(('.dist-info', '.egg-info')):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
-
-register_finder(zipimport.zipimporter, find_eggs_in_zip)
-
-def find_nothing(importer, path_item, only=False):
- return ()
+register_finder(zipimport.zipimporter, find_eggs_in_zip)
+
+
+def find_nothing(importer, path_item, only=False):
+ return ()
+
+register_finder(object, find_nothing)
-register_finder(object, find_nothing)
-
def _by_version_descending(names):
"""
@@ -2043,15 +2043,15 @@ def _by_version_descending(names):
return sorted(names, key=_by_version, reverse=True)
-def find_on_path(importer, path_item, only=False):
- """Yield distributions accessible on a sys.path directory"""
- path_item = _normalize_cached(path_item)
-
+def find_on_path(importer, path_item, only=False):
+ """Yield distributions accessible on a sys.path directory"""
+ path_item = _normalize_cached(path_item)
+
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
- )
+ )
)
return
@@ -2169,39 +2169,39 @@ def resolve_egg_link(path):
return next(dist_groups, ())
-register_finder(pkgutil.ImpImporter, find_on_path)
-
-if hasattr(importlib_machinery, 'FileFinder'):
- register_finder(importlib_machinery.FileFinder, find_on_path)
-
-_declare_state('dict', _namespace_handlers={})
-_declare_state('dict', _namespace_packages={})
-
-
-def register_namespace_handler(importer_type, namespace_handler):
- """Register `namespace_handler` to declare namespace packages
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `namespace_handler` is a callable like this::
-
- def namespace_handler(importer, path_entry, moduleName, module):
- # return a path_entry to use for child packages
-
- Namespace handlers are only called if the importer object has already
- agreed that it can handle the relevant path item, and they should only
- return a subpath if the module __path__ does not already contain an
- equivalent subpath. For an example namespace handler, see
- ``pkg_resources.file_ns_handler``.
- """
- _namespace_handlers[importer_type] = namespace_handler
-
-
-def _handle_ns(packageName, path_item):
- """Ensure that named package includes a subpath of path_item (if needed)"""
-
- importer = get_importer(path_item)
- if importer is None:
- return None
+register_finder(pkgutil.ImpImporter, find_on_path)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_finder(importlib_machinery.FileFinder, find_on_path)
+
+_declare_state('dict', _namespace_handlers={})
+_declare_state('dict', _namespace_packages={})
+
+
+def register_namespace_handler(importer_type, namespace_handler):
+ """Register `namespace_handler` to declare namespace packages
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `namespace_handler` is a callable like this::
+
+ def namespace_handler(importer, path_entry, moduleName, module):
+ # return a path_entry to use for child packages
+
+ Namespace handlers are only called if the importer object has already
+ agreed that it can handle the relevant path item, and they should only
+ return a subpath if the module __path__ does not already contain an
+ equivalent subpath. For an example namespace handler, see
+ ``pkg_resources.file_ns_handler``.
+ """
+ _namespace_handlers[importer_type] = namespace_handler
+
+
+def _handle_ns(packageName, path_item):
+ """Ensure that named package includes a subpath of path_item (if needed)"""
+
+ importer = get_importer(path_item)
+ if importer is None:
+ return None
# use find_spec (PEP 451) and fall-back to find_module (PEP 302)
try:
@@ -2212,34 +2212,34 @@ def _handle_ns(packageName, path_item):
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
- if loader is None:
- return None
- module = sys.modules.get(packageName)
- if module is None:
- module = sys.modules[packageName] = types.ModuleType(packageName)
- module.__path__ = []
- _set_parent_ns(packageName)
+ if loader is None:
+ return None
+ module = sys.modules.get(packageName)
+ if module is None:
+ module = sys.modules[packageName] = types.ModuleType(packageName)
+ module.__path__ = []
+ _set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
- raise TypeError("Not a package:", packageName)
- handler = _find_adapter(_namespace_handlers, importer)
- subpath = handler(importer, path_item, packageName, module)
- if subpath is not None:
- path = module.__path__
- path.append(subpath)
+ raise TypeError("Not a package:", packageName)
+ handler = _find_adapter(_namespace_handlers, importer)
+ subpath = handler(importer, path_item, packageName, module)
+ if subpath is not None:
+ path = module.__path__
+ path.append(subpath)
importlib.import_module(packageName)
- _rebuild_mod_path(path, packageName, module)
- return subpath
-
-
-def _rebuild_mod_path(orig_path, package_name, module):
- """
- Rebuild module.__path__ ensuring that all entries are ordered
- corresponding to their sys.path order
- """
- sys_path = [_normalize_cached(p) for p in sys.path]
+ _rebuild_mod_path(path, packageName, module)
+ return subpath
+
+
+def _rebuild_mod_path(orig_path, package_name, module):
+ """
+ Rebuild module.__path__ ensuring that all entries are ordered
+ corresponding to their sys.path order
+ """
+ sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
- """
+ """
Workaround for #520 and #513.
"""
try:
@@ -2249,13 +2249,13 @@ def _rebuild_mod_path(orig_path, package_name, module):
def position_in_sys_path(path):
"""
- Return the ordinal of the path based on its position in sys.path
- """
+ Return the ordinal of the path based on its position in sys.path
+ """
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
-
+
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
@@ -2263,86 +2263,86 @@ def _rebuild_mod_path(orig_path, package_name, module):
module.__path__[:] = new_path
else:
module.__path__ = new_path
-
-
-def declare_namespace(packageName):
- """Declare that package 'packageName' is a namespace package"""
-
- _imp.acquire_lock()
- try:
- if packageName in _namespace_packages:
- return
-
+
+
+def declare_namespace(packageName):
+ """Declare that package 'packageName' is a namespace package"""
+
+ _imp.acquire_lock()
+ try:
+ if packageName in _namespace_packages:
+ return
+
path = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
- declare_namespace(parent)
- if parent not in _namespace_packages:
- __import__(parent)
- try:
- path = sys.modules[parent].__path__
+ declare_namespace(parent)
+ if parent not in _namespace_packages:
+ __import__(parent)
+ try:
+ path = sys.modules[parent].__path__
except AttributeError as e:
raise TypeError("Not a package:", parent) from e
-
- # Track what packages are namespaces, so when new path items are added,
- # they can be updated
+
+ # Track what packages are namespaces, so when new path items are added,
+ # they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
-
- for path_item in path:
- # Ensure all the parent's path items are reflected in the child,
- # if they apply
- _handle_ns(packageName, path_item)
-
- finally:
- _imp.release_lock()
-
-
-def fixup_namespace_packages(path_item, parent=None):
- """Ensure that previously-declared namespace packages include path_item"""
- _imp.acquire_lock()
- try:
+
+ for path_item in path:
+ # Ensure all the parent's path items are reflected in the child,
+ # if they apply
+ _handle_ns(packageName, path_item)
+
+ finally:
+ _imp.release_lock()
+
+
+def fixup_namespace_packages(path_item, parent=None):
+ """Ensure that previously-declared namespace packages include path_item"""
+ _imp.acquire_lock()
+ try:
for package in _namespace_packages.get(parent, ()):
- subpath = _handle_ns(package, path_item)
- if subpath:
- fixup_namespace_packages(subpath, package)
- finally:
- _imp.release_lock()
-
-
-def file_ns_handler(importer, path_item, packageName, module):
- """Compute an ns-package subpath for a filesystem or zipfile importer"""
-
- subpath = os.path.join(path_item, packageName.split('.')[-1])
- normalized = _normalize_cached(subpath)
- for item in module.__path__:
+ subpath = _handle_ns(package, path_item)
+ if subpath:
+ fixup_namespace_packages(subpath, package)
+ finally:
+ _imp.release_lock()
+
+
+def file_ns_handler(importer, path_item, packageName, module):
+ """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+ subpath = os.path.join(path_item, packageName.split('.')[-1])
+ normalized = _normalize_cached(subpath)
+ for item in module.__path__:
if _normalize_cached(item) == normalized:
- break
- else:
- # Only return the path if it's not already there
- return subpath
-
-
-register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
-register_namespace_handler(zipimport.zipimporter, file_ns_handler)
-
-if hasattr(importlib_machinery, 'FileFinder'):
- register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
-
-
-def null_ns_handler(importer, path_item, packageName, module):
- return None
-
-
-register_namespace_handler(object, null_ns_handler)
-
-
-def normalize_path(filename):
- """Normalize a file/dir name for comparison purposes"""
+ break
+ else:
+ # Only return the path if it's not already there
+ return subpath
+
+
+register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
+
+
+def null_ns_handler(importer, path_item, packageName, module):
+ return None
+
+
+register_namespace_handler(object, null_ns_handler)
+
+
+def normalize_path(filename):
+ """Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(
_cygwin_patch(filename))))
-
+
def _cygwin_patch(filename): # pragma: nocover
"""
@@ -2355,13 +2355,13 @@ def _cygwin_patch(filename): # pragma: nocover
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
-def _normalize_cached(filename, _cache={}):
- try:
- return _cache[filename]
- except KeyError:
- _cache[filename] = result = normalize_path(filename)
- return result
-
+def _normalize_cached(filename, _cache={}):
+ try:
+ return _cache[filename]
+ except KeyError:
+ _cache[filename] = result = normalize_path(filename)
+ return result
+
def _is_egg_path(path):
"""
@@ -2378,27 +2378,27 @@ def _is_zip_egg(path):
)
-def _is_unpacked_egg(path):
- """
- Determine if given path appears to be an unpacked egg.
- """
- return (
+def _is_unpacked_egg(path):
+ """
+ Determine if given path appears to be an unpacked egg.
+ """
+ return (
path.lower().endswith('.egg') and
os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
- )
-
-
-def _set_parent_ns(packageName):
- parts = packageName.split('.')
- name = parts.pop()
- if parts:
- parent = '.'.join(parts)
- setattr(sys.modules[parent], name, sys.modules[packageName])
-
-
+ )
+
+
+def _set_parent_ns(packageName):
+ parts = packageName.split('.')
+ name = parts.pop()
+ if parts:
+ parent = '.'.join(parts)
+ setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
def _nonblank(str):
return str and not str.startswith('#')
-
+
@functools.singledispatch
def yield_lines(iterable):
@@ -2411,310 +2411,310 @@ def _(text):
return filter(_nonblank, map(str.strip, text.splitlines()))
-MODULE = re.compile(r"\w+(\.\w+)*$").match
-EGG_NAME = re.compile(
- r"""
- (?P<name>[^-]+) (
- -(?P<ver>[^-]+) (
- -py(?P<pyver>[^-]+) (
- -(?P<plat>.+)
- )?
- )?
- )?
- """,
- re.VERBOSE | re.IGNORECASE,
-).match
-
-
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+ r"""
+ (?P<name>[^-]+) (
+ -(?P<ver>[^-]+) (
+ -py(?P<pyver>[^-]+) (
+ -(?P<plat>.+)
+ )?
+ )?
+ )?
+ """,
+ re.VERBOSE | re.IGNORECASE,
+).match
+
+
class EntryPoint:
- """Object representing an advertised importable object"""
-
- def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
- if not MODULE(module_name):
- raise ValueError("Invalid module name", module_name)
- self.name = name
- self.module_name = module_name
- self.attrs = tuple(attrs)
+ """Object representing an advertised importable object"""
+
+ def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
+ if not MODULE(module_name):
+ raise ValueError("Invalid module name", module_name)
+ self.name = name
+ self.module_name = module_name
+ self.attrs = tuple(attrs)
self.extras = tuple(extras)
- self.dist = dist
-
- def __str__(self):
- s = "%s = %s" % (self.name, self.module_name)
- if self.attrs:
- s += ':' + '.'.join(self.attrs)
- if self.extras:
- s += ' [%s]' % ','.join(self.extras)
- return s
-
- def __repr__(self):
- return "EntryPoint.parse(%r)" % str(self)
-
- def load(self, require=True, *args, **kwargs):
- """
- Require packages for this EntryPoint, then resolve it.
- """
- if not require or args or kwargs:
- warnings.warn(
- "Parameters to load are deprecated. Call .resolve and "
- ".require separately.",
+ self.dist = dist
+
+ def __str__(self):
+ s = "%s = %s" % (self.name, self.module_name)
+ if self.attrs:
+ s += ':' + '.'.join(self.attrs)
+ if self.extras:
+ s += ' [%s]' % ','.join(self.extras)
+ return s
+
+ def __repr__(self):
+ return "EntryPoint.parse(%r)" % str(self)
+
+ def load(self, require=True, *args, **kwargs):
+ """
+ Require packages for this EntryPoint, then resolve it.
+ """
+ if not require or args or kwargs:
+ warnings.warn(
+ "Parameters to load are deprecated. Call .resolve and "
+ ".require separately.",
PkgResourcesDeprecationWarning,
- stacklevel=2,
- )
- if require:
- self.require(*args, **kwargs)
- return self.resolve()
-
- def resolve(self):
- """
- Resolve the entry point from its module and attrs.
- """
- module = __import__(self.module_name, fromlist=['__name__'], level=0)
- try:
- return functools.reduce(getattr, self.attrs, module)
- except AttributeError as exc:
+ stacklevel=2,
+ )
+ if require:
+ self.require(*args, **kwargs)
+ return self.resolve()
+
+ def resolve(self):
+ """
+ Resolve the entry point from its module and attrs.
+ """
+ module = __import__(self.module_name, fromlist=['__name__'], level=0)
+ try:
+ return functools.reduce(getattr, self.attrs, module)
+ except AttributeError as exc:
raise ImportError(str(exc)) from exc
-
- def require(self, env=None, installer=None):
- if self.extras and not self.dist:
- raise UnknownExtra("Can't require() without a distribution", self)
+
+ def require(self, env=None, installer=None):
+ if self.extras and not self.dist:
+ raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
- reqs = self.dist.requires(self.extras)
+ reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
- list(map(working_set.add, items))
-
- pattern = re.compile(
- r'\s*'
- r'(?P<name>.+?)\s*'
- r'=\s*'
- r'(?P<module>[\w.]+)\s*'
- r'(:\s*(?P<attr>[\w.]+))?\s*'
- r'(?P<extras>\[.*\])?\s*$'
- )
-
- @classmethod
- def parse(cls, src, dist=None):
- """Parse a single entry point from string `src`
-
- Entry point syntax follows the form::
-
- name = some.module:some.attr [extra1, extra2]
-
- The entry name and module name are required, but the ``:attrs`` and
- ``[extras]`` parts are optional
- """
- m = cls.pattern.match(src)
- if not m:
- msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
- raise ValueError(msg, src)
- res = m.groupdict()
- extras = cls._parse_extras(res['extras'])
- attrs = res['attr'].split('.') if res['attr'] else ()
- return cls(res['name'], res['module'], attrs, extras, dist)
-
- @classmethod
- def _parse_extras(cls, extras_spec):
- if not extras_spec:
- return ()
- req = Requirement.parse('x' + extras_spec)
- if req.specs:
- raise ValueError()
- return req.extras
-
- @classmethod
- def parse_group(cls, group, lines, dist=None):
- """Parse an entry point group"""
- if not MODULE(group):
- raise ValueError("Invalid group name", group)
- this = {}
- for line in yield_lines(lines):
- ep = cls.parse(line, dist)
- if ep.name in this:
- raise ValueError("Duplicate entry point", group, ep.name)
+ list(map(working_set.add, items))
+
+ pattern = re.compile(
+ r'\s*'
+ r'(?P<name>.+?)\s*'
+ r'=\s*'
+ r'(?P<module>[\w.]+)\s*'
+ r'(:\s*(?P<attr>[\w.]+))?\s*'
+ r'(?P<extras>\[.*\])?\s*$'
+ )
+
+ @classmethod
+ def parse(cls, src, dist=None):
+ """Parse a single entry point from string `src`
+
+ Entry point syntax follows the form::
+
+ name = some.module:some.attr [extra1, extra2]
+
+ The entry name and module name are required, but the ``:attrs`` and
+ ``[extras]`` parts are optional
+ """
+ m = cls.pattern.match(src)
+ if not m:
+ msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
+ raise ValueError(msg, src)
+ res = m.groupdict()
+ extras = cls._parse_extras(res['extras'])
+ attrs = res['attr'].split('.') if res['attr'] else ()
+ return cls(res['name'], res['module'], attrs, extras, dist)
+
+ @classmethod
+ def _parse_extras(cls, extras_spec):
+ if not extras_spec:
+ return ()
+ req = Requirement.parse('x' + extras_spec)
+ if req.specs:
+ raise ValueError()
+ return req.extras
+
+ @classmethod
+ def parse_group(cls, group, lines, dist=None):
+ """Parse an entry point group"""
+ if not MODULE(group):
+ raise ValueError("Invalid group name", group)
+ this = {}
+ for line in yield_lines(lines):
+ ep = cls.parse(line, dist)
+ if ep.name in this:
+ raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
- return this
-
- @classmethod
- def parse_map(cls, data, dist=None):
- """Parse a map of entry point groups"""
- if isinstance(data, dict):
- data = data.items()
- else:
- data = split_sections(data)
- maps = {}
- for group, lines in data:
- if group is None:
- if not lines:
- continue
- raise ValueError("Entry points must be listed in groups")
- group = group.strip()
- if group in maps:
- raise ValueError("Duplicate group name", group)
- maps[group] = cls.parse_group(group, lines, dist)
- return maps
-
-
-def _version_from_file(lines):
- """
- Given an iterable of lines from a Metadata file, return
- the value of the Version field, if present, or None otherwise.
- """
+ return this
+
+ @classmethod
+ def parse_map(cls, data, dist=None):
+ """Parse a map of entry point groups"""
+ if isinstance(data, dict):
+ data = data.items()
+ else:
+ data = split_sections(data)
+ maps = {}
+ for group, lines in data:
+ if group is None:
+ if not lines:
+ continue
+ raise ValueError("Entry points must be listed in groups")
+ group = group.strip()
+ if group in maps:
+ raise ValueError("Duplicate group name", group)
+ maps[group] = cls.parse_group(group, lines, dist)
+ return maps
+
+
+def _version_from_file(lines):
+ """
+ Given an iterable of lines from a Metadata file, return
+ the value of the Version field, if present, or None otherwise.
+ """
def is_version_line(line):
return line.lower().startswith('version:')
- version_lines = filter(is_version_line, lines)
- line = next(iter(version_lines), '')
- _, _, value = line.partition(':')
- return safe_version(value.strip()) or None
-
-
+ version_lines = filter(is_version_line, lines)
+ line = next(iter(version_lines), '')
+ _, _, value = line.partition(':')
+ return safe_version(value.strip()) or None
+
+
class Distribution:
- """Wrap an actual or potential sys.path entry w/metadata"""
- PKG_INFO = 'PKG-INFO'
-
+ """Wrap an actual or potential sys.path entry w/metadata"""
+ PKG_INFO = 'PKG-INFO'
+
def __init__(
self, location=None, metadata=None, project_name=None,
- version=None, py_version=PY_MAJOR, platform=None,
- precedence=EGG_DIST):
- self.project_name = safe_name(project_name or 'Unknown')
- if version is not None:
- self._version = safe_version(version)
- self.py_version = py_version
- self.platform = platform
- self.location = location
- self.precedence = precedence
- self._provider = metadata or empty_provider
-
- @classmethod
- def from_location(cls, location, basename, metadata=None, **kw):
+ version=None, py_version=PY_MAJOR, platform=None,
+ precedence=EGG_DIST):
+ self.project_name = safe_name(project_name or 'Unknown')
+ if version is not None:
+ self._version = safe_version(version)
+ self.py_version = py_version
+ self.platform = platform
+ self.location = location
+ self.precedence = precedence
+ self._provider = metadata or empty_provider
+
+ @classmethod
+ def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
- basename, ext = os.path.splitext(basename)
- if ext.lower() in _distributionImpl:
- cls = _distributionImpl[ext.lower()]
-
- match = EGG_NAME(basename)
- if match:
- project_name, version, py_version, platform = match.group(
- 'name', 'ver', 'pyver', 'plat'
- )
- return cls(
- location, metadata, project_name=project_name, version=version,
- py_version=py_version, platform=platform, **kw
- )._reload_version()
-
- def _reload_version(self):
- return self
-
- @property
- def hashcmp(self):
- return (
- self.parsed_version,
- self.precedence,
- self.key,
+ basename, ext = os.path.splitext(basename)
+ if ext.lower() in _distributionImpl:
+ cls = _distributionImpl[ext.lower()]
+
+ match = EGG_NAME(basename)
+ if match:
+ project_name, version, py_version, platform = match.group(
+ 'name', 'ver', 'pyver', 'plat'
+ )
+ return cls(
+ location, metadata, project_name=project_name, version=version,
+ py_version=py_version, platform=platform, **kw
+ )._reload_version()
+
+ def _reload_version(self):
+ return self
+
+ @property
+ def hashcmp(self):
+ return (
+ self.parsed_version,
+ self.precedence,
+ self.key,
self.location,
- self.py_version or '',
- self.platform or '',
- )
-
- def __hash__(self):
- return hash(self.hashcmp)
-
- def __lt__(self, other):
- return self.hashcmp < other.hashcmp
-
- def __le__(self, other):
- return self.hashcmp <= other.hashcmp
-
- def __gt__(self, other):
- return self.hashcmp > other.hashcmp
-
- def __ge__(self, other):
- return self.hashcmp >= other.hashcmp
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- # It's not a Distribution, so they are not equal
- return False
- return self.hashcmp == other.hashcmp
-
- def __ne__(self, other):
- return not self == other
-
- # These properties have to be lazy so that we don't have to load any
- # metadata until/unless it's actually needed. (i.e., some distributions
- # may not know their name or version without loading PKG-INFO)
-
- @property
- def key(self):
- try:
- return self._key
- except AttributeError:
- self._key = key = self.project_name.lower()
- return key
-
- @property
- def parsed_version(self):
- if not hasattr(self, "_parsed_version"):
- self._parsed_version = parse_version(self.version)
-
- return self._parsed_version
-
- def _warn_legacy_version(self):
- LV = packaging.version.LegacyVersion
- is_legacy = isinstance(self._parsed_version, LV)
- if not is_legacy:
- return
-
- # While an empty version is technically a legacy version and
- # is not a valid PEP 440 version, it's also unlikely to
- # actually come from someone and instead it is more likely that
- # it comes from setuptools attempting to parse a filename and
- # including it in the list. So for that we'll gate this warning
- # on if the version is anything at all or not.
- if not self.version:
- return
-
- tmpl = textwrap.dedent("""
- '{project_name} ({version})' is being parsed as a legacy,
- non PEP 440,
- version. You may find odd behavior and sort order.
- In particular it will be sorted as less than 0.0. It
- is recommended to migrate to PEP 440 compatible
- versions.
- """).strip().replace('\n', ' ')
-
- warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
-
- @property
- def version(self):
- try:
- return self._version
+ self.py_version or '',
+ self.platform or '',
+ )
+
+ def __hash__(self):
+ return hash(self.hashcmp)
+
+ def __lt__(self, other):
+ return self.hashcmp < other.hashcmp
+
+ def __le__(self, other):
+ return self.hashcmp <= other.hashcmp
+
+ def __gt__(self, other):
+ return self.hashcmp > other.hashcmp
+
+ def __ge__(self, other):
+ return self.hashcmp >= other.hashcmp
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ # It's not a Distribution, so they are not equal
+ return False
+ return self.hashcmp == other.hashcmp
+
+ def __ne__(self, other):
+ return not self == other
+
+ # These properties have to be lazy so that we don't have to load any
+ # metadata until/unless it's actually needed. (i.e., some distributions
+ # may not know their name or version without loading PKG-INFO)
+
+ @property
+ def key(self):
+ try:
+ return self._key
+ except AttributeError:
+ self._key = key = self.project_name.lower()
+ return key
+
+ @property
+ def parsed_version(self):
+ if not hasattr(self, "_parsed_version"):
+ self._parsed_version = parse_version(self.version)
+
+ return self._parsed_version
+
+ def _warn_legacy_version(self):
+ LV = packaging.version.LegacyVersion
+ is_legacy = isinstance(self._parsed_version, LV)
+ if not is_legacy:
+ return
+
+ # While an empty version is technically a legacy version and
+ # is not a valid PEP 440 version, it's also unlikely to
+ # actually come from someone and instead it is more likely that
+ # it comes from setuptools attempting to parse a filename and
+ # including it in the list. So for that we'll gate this warning
+ # on if the version is anything at all or not.
+ if not self.version:
+ return
+
+ tmpl = textwrap.dedent("""
+ '{project_name} ({version})' is being parsed as a legacy,
+ non PEP 440,
+ version. You may find odd behavior and sort order.
+ In particular it will be sorted as less than 0.0. It
+ is recommended to migrate to PEP 440 compatible
+ versions.
+ """).strip().replace('\n', ' ')
+
+ warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
+
+ @property
+ def version(self):
+ try:
+ return self._version
except AttributeError as e:
version = self._get_version()
- if version is None:
+ if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = (
"Missing 'Version:' header and/or {} file at path: {}"
).format(self.PKG_INFO, path)
raise ValueError(msg, self) from e
- return version
-
- @property
- def _dep_map(self):
+ return version
+
+ @property
+ def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
- try:
- return self.__dep_map
- except AttributeError:
+ try:
+ return self.__dep_map
+ except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
-
+
@staticmethod
def _filter_extras(dm):
"""
@@ -2744,20 +2744,20 @@ class Distribution:
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
- def requires(self, extras=()):
- """List of Requirements needed for this distro if `extras` are used"""
- dm = self._dep_map
- deps = []
- deps.extend(dm.get(None, ()))
- for ext in extras:
- try:
- deps.extend(dm[safe_extra(ext)])
+ def requires(self, extras=()):
+ """List of Requirements needed for this distro if `extras` are used"""
+ dm = self._dep_map
+ deps = []
+ deps.extend(dm.get(None, ()))
+ for ext in extras:
+ try:
+ deps.extend(dm[safe_extra(ext)])
except KeyError as e:
- raise UnknownExtra(
- "%s has no such extra feature %r" % (self, ext)
+ raise UnknownExtra(
+ "%s has no such extra feature %r" % (self, ext)
) from e
- return deps
-
+ return deps
+
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
@@ -2775,11 +2775,11 @@ class Distribution:
return path
- def _get_metadata(self, name):
- if self.has_metadata(name):
- for line in self.get_metadata_lines(name):
- yield line
-
+ def _get_metadata(self, name):
+ if self.has_metadata(name):
+ for line in self.get_metadata_lines(name):
+ yield line
+
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
@@ -2787,47 +2787,47 @@ class Distribution:
return version
def activate(self, path=None, replace=False):
- """Ensure distribution is importable on `path` (default=sys.path)"""
- if path is None:
- path = sys.path
+ """Ensure distribution is importable on `path` (default=sys.path)"""
+ if path is None:
+ path = sys.path
self.insert_on(path, replace=replace)
- if path is sys.path:
- fixup_namespace_packages(self.location)
- for pkg in self._get_metadata('namespace_packages.txt'):
- if pkg in sys.modules:
- declare_namespace(pkg)
-
- def egg_name(self):
- """Return what this distribution's standard .egg filename should be"""
- filename = "%s-%s-py%s" % (
- to_filename(self.project_name), to_filename(self.version),
- self.py_version or PY_MAJOR
- )
-
- if self.platform:
- filename += '-' + self.platform
- return filename
-
- def __repr__(self):
- if self.location:
- return "%s (%s)" % (self, self.location)
- else:
- return str(self)
-
- def __str__(self):
- try:
- version = getattr(self, 'version', None)
- except ValueError:
- version = None
- version = version or "[unknown version]"
- return "%s %s" % (self.project_name, version)
-
- def __getattr__(self, attr):
- """Delegate all unrecognized public attributes to .metadata provider"""
- if attr.startswith('_'):
- raise AttributeError(attr)
- return getattr(self._provider, attr)
-
+ if path is sys.path:
+ fixup_namespace_packages(self.location)
+ for pkg in self._get_metadata('namespace_packages.txt'):
+ if pkg in sys.modules:
+ declare_namespace(pkg)
+
+ def egg_name(self):
+ """Return what this distribution's standard .egg filename should be"""
+ filename = "%s-%s-py%s" % (
+ to_filename(self.project_name), to_filename(self.version),
+ self.py_version or PY_MAJOR
+ )
+
+ if self.platform:
+ filename += '-' + self.platform
+ return filename
+
+ def __repr__(self):
+ if self.location:
+ return "%s (%s)" % (self, self.location)
+ else:
+ return str(self)
+
+ def __str__(self):
+ try:
+ version = getattr(self, 'version', None)
+ except ValueError:
+ version = None
+ version = version or "[unknown version]"
+ return "%s %s" % (self.project_name, version)
+
+ def __getattr__(self, attr):
+ """Delegate all unrecognized public attributes to .metadata provider"""
+ if attr.startswith('_'):
+ raise AttributeError(attr)
+ return getattr(self._provider, attr)
+
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
@@ -2837,49 +2837,49 @@ class Distribution:
)
)
- @classmethod
- def from_filename(cls, filename, metadata=None, **kw):
- return cls.from_location(
- _normalize_cached(filename), os.path.basename(filename), metadata,
- **kw
- )
-
- def as_requirement(self):
- """Return a ``Requirement`` that matches this distribution exactly"""
- if isinstance(self.parsed_version, packaging.version.Version):
- spec = "%s==%s" % (self.project_name, self.parsed_version)
- else:
- spec = "%s===%s" % (self.project_name, self.parsed_version)
-
- return Requirement.parse(spec)
-
- def load_entry_point(self, group, name):
- """Return the `name` entry point of `group` or raise ImportError"""
- ep = self.get_entry_info(group, name)
- if ep is None:
- raise ImportError("Entry point %r not found" % ((group, name),))
- return ep.load()
-
- def get_entry_map(self, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- try:
- ep_map = self._ep_map
- except AttributeError:
- ep_map = self._ep_map = EntryPoint.parse_map(
- self._get_metadata('entry_points.txt'), self
- )
- if group is not None:
+ @classmethod
+ def from_filename(cls, filename, metadata=None, **kw):
+ return cls.from_location(
+ _normalize_cached(filename), os.path.basename(filename), metadata,
+ **kw
+ )
+
+ def as_requirement(self):
+ """Return a ``Requirement`` that matches this distribution exactly"""
+ if isinstance(self.parsed_version, packaging.version.Version):
+ spec = "%s==%s" % (self.project_name, self.parsed_version)
+ else:
+ spec = "%s===%s" % (self.project_name, self.parsed_version)
+
+ return Requirement.parse(spec)
+
+ def load_entry_point(self, group, name):
+ """Return the `name` entry point of `group` or raise ImportError"""
+ ep = self.get_entry_info(group, name)
+ if ep is None:
+ raise ImportError("Entry point %r not found" % ((group, name),))
+ return ep.load()
+
+ def get_entry_map(self, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ try:
+ ep_map = self._ep_map
+ except AttributeError:
+ ep_map = self._ep_map = EntryPoint.parse_map(
+ self._get_metadata('entry_points.txt'), self
+ )
+ if group is not None:
return ep_map.get(group, {})
- return ep_map
-
- def get_entry_info(self, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return self.get_entry_map(group).get(name)
-
+ return ep_map
+
+ def get_entry_info(self, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return self.get_entry_map(group).get(name)
+
# FIXME: 'Distribution.insert_on' is too complex (13)
def insert_on(self, path, loc=None, replace=False): # noqa: C901
"""Ensure self.location is on path
-
+
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
@@ -2897,195 +2897,195 @@ class Distribution:
- Else: add it to the front of path.
"""
- loc = loc or self.location
- if not loc:
- return
-
- nloc = _normalize_cached(loc)
- bdir = os.path.dirname(nloc)
+ loc = loc or self.location
+ if not loc:
+ return
+
+ nloc = _normalize_cached(loc)
+ bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
-
- for p, item in enumerate(npath):
- if item == nloc:
+
+ for p, item in enumerate(npath):
+ if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
- elif item == bdir and self.precedence == EGG_DIST:
- # if it's an .egg, give it precedence over its directory
+ elif item == bdir and self.precedence == EGG_DIST:
+ # if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
- if path is sys.path:
- self.check_version_conflict()
- path.insert(p, loc)
- npath.insert(p, nloc)
- break
- else:
- if path is sys.path:
- self.check_version_conflict()
- if replace:
- path.insert(0, loc)
- else:
- path.append(loc)
- return
-
- # p is the spot where we found or inserted loc; now remove duplicates
- while True:
- try:
+ if path is sys.path:
+ self.check_version_conflict()
+ path.insert(p, loc)
+ npath.insert(p, nloc)
+ break
+ else:
+ if path is sys.path:
+ self.check_version_conflict()
+ if replace:
+ path.insert(0, loc)
+ else:
+ path.append(loc)
+ return
+
+ # p is the spot where we found or inserted loc; now remove duplicates
+ while True:
+ try:
np = npath.index(nloc, p + 1)
- except ValueError:
- break
- else:
- del npath[np], path[np]
- # ha!
- p = np
-
- return
-
- def check_version_conflict(self):
- if self.key == 'setuptools':
- # ignore the inevitable setuptools self-conflicts :(
- return
-
- nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
- loc = normalize_path(self.location)
- for modname in self._get_metadata('top_level.txt'):
- if (modname not in sys.modules or modname in nsp
- or modname in _namespace_packages):
- continue
- if modname in ('pkg_resources', 'setuptools', 'site'):
- continue
- fn = getattr(sys.modules[modname], '__file__', None)
- if fn and (normalize_path(fn).startswith(loc) or
- fn.startswith(self.location)):
- continue
- issue_warning(
- "Module %s was already imported from %s, but %s is being added"
- " to sys.path" % (modname, fn, self.location),
- )
-
- def has_version(self):
- try:
- self.version
- except ValueError:
- issue_warning("Unbuilt egg for " + repr(self))
- return False
- return True
-
+ except ValueError:
+ break
+ else:
+ del npath[np], path[np]
+ # ha!
+ p = np
+
+ return
+
+ def check_version_conflict(self):
+ if self.key == 'setuptools':
+ # ignore the inevitable setuptools self-conflicts :(
+ return
+
+ nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+ loc = normalize_path(self.location)
+ for modname in self._get_metadata('top_level.txt'):
+ if (modname not in sys.modules or modname in nsp
+ or modname in _namespace_packages):
+ continue
+ if modname in ('pkg_resources', 'setuptools', 'site'):
+ continue
+ fn = getattr(sys.modules[modname], '__file__', None)
+ if fn and (normalize_path(fn).startswith(loc) or
+ fn.startswith(self.location)):
+ continue
+ issue_warning(
+ "Module %s was already imported from %s, but %s is being added"
+ " to sys.path" % (modname, fn, self.location),
+ )
+
+ def has_version(self):
+ try:
+ self.version
+ except ValueError:
+ issue_warning("Unbuilt egg for " + repr(self))
+ return False
+ return True
+
def clone(self, **kw):
- """Copy this distribution, substituting in any changed keyword args"""
- names = 'project_name version py_version platform location precedence'
- for attr in names.split():
- kw.setdefault(attr, getattr(self, attr, None))
- kw.setdefault('metadata', self._provider)
- return self.__class__(**kw)
-
- @property
- def extras(self):
- return [dep for dep in self._dep_map if dep]
-
-
-class EggInfoDistribution(Distribution):
- def _reload_version(self):
- """
- Packages installed by distutils (e.g. numpy or scipy),
- which uses an old safe_version, and so
- their version numbers can get mangled when
- converted to filenames (e.g., 1.11.0.dev0+2329eae to
- 1.11.0.dev0_2329eae). These distributions will not be
- parsed properly
- downstream by Distribution and safe_version, so
- take an extra step and try to get the version number from
- the metadata file itself instead of the filename.
- """
+ """Copy this distribution, substituting in any changed keyword args"""
+ names = 'project_name version py_version platform location precedence'
+ for attr in names.split():
+ kw.setdefault(attr, getattr(self, attr, None))
+ kw.setdefault('metadata', self._provider)
+ return self.__class__(**kw)
+
+ @property
+ def extras(self):
+ return [dep for dep in self._dep_map if dep]
+
+
+class EggInfoDistribution(Distribution):
+ def _reload_version(self):
+ """
+ Packages installed by distutils (e.g. numpy or scipy),
+ which uses an old safe_version, and so
+ their version numbers can get mangled when
+ converted to filenames (e.g., 1.11.0.dev0+2329eae to
+ 1.11.0.dev0_2329eae). These distributions will not be
+ parsed properly
+ downstream by Distribution and safe_version, so
+ take an extra step and try to get the version number from
+ the metadata file itself instead of the filename.
+ """
md_version = self._get_version()
- if md_version:
- self._version = md_version
- return self
-
-
-class DistInfoDistribution(Distribution):
+ if md_version:
+ self._version = md_version
+ return self
+
+
+class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
- PKG_INFO = 'METADATA'
- EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
-
- @property
- def _parsed_pkg_info(self):
- """Parse and cache metadata"""
- try:
- return self._pkg_info
- except AttributeError:
- metadata = self.get_metadata(self.PKG_INFO)
- self._pkg_info = email.parser.Parser().parsestr(metadata)
- return self._pkg_info
-
- @property
- def _dep_map(self):
- try:
- return self.__dep_map
- except AttributeError:
- self.__dep_map = self._compute_dependencies()
- return self.__dep_map
-
- def _compute_dependencies(self):
- """Recompute this distribution's dependencies."""
- dm = self.__dep_map = {None: []}
-
- reqs = []
- # Including any condition expressions
- for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
+ PKG_INFO = 'METADATA'
+ EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
+
+ @property
+ def _parsed_pkg_info(self):
+ """Parse and cache metadata"""
+ try:
+ return self._pkg_info
+ except AttributeError:
+ metadata = self.get_metadata(self.PKG_INFO)
+ self._pkg_info = email.parser.Parser().parsestr(metadata)
+ return self._pkg_info
+
+ @property
+ def _dep_map(self):
+ try:
+ return self.__dep_map
+ except AttributeError:
+ self.__dep_map = self._compute_dependencies()
+ return self.__dep_map
+
+ def _compute_dependencies(self):
+ """Recompute this distribution's dependencies."""
+ dm = self.__dep_map = {None: []}
+
+ reqs = []
+ # Including any condition expressions
+ for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
-
- def reqs_for_extra(extra):
- for req in reqs:
+
+ def reqs_for_extra(extra):
+ for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
- yield req
-
- common = frozenset(reqs_for_extra(None))
- dm[None].extend(common)
-
- for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
+ yield req
+
+ common = frozenset(reqs_for_extra(None))
+ dm[None].extend(common)
+
+ for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
-
- return dm
-
-
-_distributionImpl = {
- '.egg': Distribution,
- '.egg-info': EggInfoDistribution,
- '.dist-info': DistInfoDistribution,
+
+ return dm
+
+
+_distributionImpl = {
+ '.egg': Distribution,
+ '.egg-info': EggInfoDistribution,
+ '.dist-info': DistInfoDistribution,
}
-
-
+
+
def issue_warning(*args, **kw):
- level = 1
- g = globals()
- try:
- # find the first stack frame that is *not* code in
- # the pkg_resources module, to use for the warning
- while sys._getframe(level).f_globals is g:
- level += 1
- except ValueError:
- pass
- warnings.warn(stacklevel=level + 1, *args, **kw)
-
-
-def parse_requirements(strs):
- """Yield ``Requirement`` objects for each specification in `strs`
-
- `strs` must be a string, or a (possibly-nested) iterable thereof.
- """
- # create a steppable iterator, so we can handle \-continuations
- lines = iter(yield_lines(strs))
-
- for line in lines:
+ level = 1
+ g = globals()
+ try:
+ # find the first stack frame that is *not* code in
+ # the pkg_resources module, to use for the warning
+ while sys._getframe(level).f_globals is g:
+ level += 1
+ except ValueError:
+ pass
+ warnings.warn(stacklevel=level + 1, *args, **kw)
+
+
+def parse_requirements(strs):
+ """Yield ``Requirement`` objects for each specification in `strs`
+
+ `strs` must be a string, or a (possibly-nested) iterable thereof.
+ """
+ # create a steppable iterator, so we can handle \-continuations
+ lines = iter(yield_lines(strs))
+
+ for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
@@ -3097,64 +3097,64 @@ def parse_requirements(strs):
except StopIteration:
return
yield Requirement(line)
-
-
+
+
class RequirementParseError(packaging.requirements.InvalidRequirement):
"Compatibility wrapper for InvalidRequirement"
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
- """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+ """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
super(Requirement, self).__init__(requirement_string)
self.unsafe_name = self.name
project_name = safe_name(self.name)
- self.project_name, self.key = project_name, project_name.lower()
+ self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
- self.hashCmp = (
- self.key,
+ self.hashCmp = (
+ self.key,
self.url,
- self.specifier,
- frozenset(self.extras),
+ self.specifier,
+ frozenset(self.extras),
str(self.marker) if self.marker else None,
- )
- self.__hash = hash(self.hashCmp)
-
- def __eq__(self, other):
- return (
- isinstance(other, Requirement) and
- self.hashCmp == other.hashCmp
- )
-
- def __ne__(self, other):
- return not self == other
-
- def __contains__(self, item):
- if isinstance(item, Distribution):
- if item.key != self.key:
- return False
-
- item = item.version
-
- # Allow prereleases always in order to match the previous behavior of
- # this method. In the future this should be smarter and follow PEP 440
- # more accurately.
- return self.specifier.contains(item, prereleases=True)
-
- def __hash__(self):
- return self.__hash
-
+ )
+ self.__hash = hash(self.hashCmp)
+
+ def __eq__(self, other):
+ return (
+ isinstance(other, Requirement) and
+ self.hashCmp == other.hashCmp
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __contains__(self, item):
+ if isinstance(item, Distribution):
+ if item.key != self.key:
+ return False
+
+ item = item.version
+
+ # Allow prereleases always in order to match the previous behavior of
+ # this method. In the future this should be smarter and follow PEP 440
+ # more accurately.
+ return self.specifier.contains(item, prereleases=True)
+
+ def __hash__(self):
+ return self.__hash
+
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
-
- @staticmethod
- def parse(s):
- req, = parse_requirements(s)
- return req
-
-
+
+ @staticmethod
+ def parse(s):
+ req, = parse_requirements(s)
+ return req
+
+
def _always_object(classes):
"""
Ensure object appears in the mro even
@@ -3165,70 +3165,70 @@ def _always_object(classes):
return classes
-def _find_adapter(registry, ob):
- """Return an adapter factory for `ob` from `registry`"""
+def _find_adapter(registry, ob):
+ """Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
- if t in registry:
- return registry[t]
-
-
-def ensure_directory(path):
- """Ensure that the parent directory of `path` exists"""
- dirname = os.path.dirname(path)
+ if t in registry:
+ return registry[t]
+
+
+def ensure_directory(path):
+ """Ensure that the parent directory of `path` exists"""
+ dirname = os.path.dirname(path)
os.makedirs(dirname, exist_ok=True)
-
-
-def _bypass_ensure_directory(path):
- """Sandbox-bypassing version of ensure_directory()"""
- if not WRITE_SUPPORT:
- raise IOError('"os.mkdir" not supported on this platform.')
- dirname, filename = split(path)
- if dirname and filename and not isdir(dirname):
- _bypass_ensure_directory(dirname)
+
+
+def _bypass_ensure_directory(path):
+ """Sandbox-bypassing version of ensure_directory()"""
+ if not WRITE_SUPPORT:
+ raise IOError('"os.mkdir" not supported on this platform.')
+ dirname, filename = split(path)
+ if dirname and filename and not isdir(dirname):
+ _bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
-
-
-def split_sections(s):
- """Split a string or iterable thereof into (section, content) pairs
-
- Each ``section`` is a stripped version of the section header ("[section]")
- and each ``content`` is a list of stripped lines excluding blank lines and
- comment-only lines. If there are any such lines before the first section
- header, they're returned in a first ``section`` of ``None``.
- """
- section = None
- content = []
- for line in yield_lines(s):
- if line.startswith("["):
- if line.endswith("]"):
- if section or content:
- yield section, content
- section = line[1:-1].strip()
- content = []
- else:
- raise ValueError("Invalid section heading", line)
- else:
- content.append(line)
-
- # wrap up last segment
- yield section, content
-
+
+
+def split_sections(s):
+ """Split a string or iterable thereof into (section, content) pairs
+
+ Each ``section`` is a stripped version of the section header ("[section]")
+ and each ``content`` is a list of stripped lines excluding blank lines and
+ comment-only lines. If there are any such lines before the first section
+ header, they're returned in a first ``section`` of ``None``.
+ """
+ section = None
+ content = []
+ for line in yield_lines(s):
+ if line.startswith("["):
+ if line.endswith("]"):
+ if section or content:
+ yield section, content
+ section = line[1:-1].strip()
+ content = []
+ else:
+ raise ValueError("Invalid section heading", line)
+ else:
+ content.append(line)
+
+ # wrap up last segment
+ yield section, content
+
def _mkstemp(*args, **kw):
- old_open = os.open
- try:
- # temporarily bypass sandboxing
- os.open = os_open
+ old_open = os.open
+ try:
+ # temporarily bypass sandboxing
+ os.open = os_open
return tempfile.mkstemp(*args, **kw)
- finally:
- # and then put it back
- os.open = old_open
-
-
+ finally:
+ # and then put it back
+ os.open = old_open
+
+
# Yandex resource support
from __res import Y_PYTHON_SOURCE_ROOT, ResourceImporter, executable
from library.python import resource
@@ -3313,31 +3313,31 @@ register_finder(ResourceImporter, find_in_res)
register_loader_type(ResourceImporter, ResProvider.from_module)
-# Silence the PEP440Warning by default, so that end users don't get hit by it
-# randomly just because they use pkg_resources. We want to append the rule
-# because we want earlier uses of filterwarnings to take precedence over this
-# one.
-warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
-
-
-# from jaraco.functools 1.3
-def _call_aside(f, *args, **kwargs):
- f(*args, **kwargs)
- return f
-
-
-@_call_aside
-def _initialize(g=globals()):
- "Set up global resource manager (deliberately not state-saved)"
- manager = ResourceManager()
- g['_manager'] = manager
+# Silence the PEP440Warning by default, so that end users don't get hit by it
+# randomly just because they use pkg_resources. We want to append the rule
+# because we want earlier uses of filterwarnings to take precedence over this
+# one.
+warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
+
+
+# from jaraco.functools 1.3
+def _call_aside(f, *args, **kwargs):
+ f(*args, **kwargs)
+ return f
+
+
+@_call_aside
+def _initialize(g=globals()):
+ "Set up global resource manager (deliberately not state-saved)"
+ manager = ResourceManager()
+ g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
-
-
+
+
class PkgResourcesDeprecationWarning(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
@@ -3347,28 +3347,28 @@ class PkgResourcesDeprecationWarning(Warning):
"""
-@_call_aside
-def _initialize_master_working_set():
- """
- Prepare the master working set and make the ``require()``
- API available.
-
- This function has explicit effects on the global state
- of pkg_resources. It is intended to be invoked once at
- the initialization of this module.
-
- Invocation by other packages is unsupported and done
- at their own risk.
- """
- working_set = WorkingSet._build_master()
- _declare_state('object', working_set=working_set)
-
- require = working_set.require
- iter_entry_points = working_set.iter_entry_points
- add_activation_listener = working_set.subscribe
- run_script = working_set.run_script
- # backward compatibility
- run_main = run_script
+@_call_aside
+def _initialize_master_working_set():
+ """
+ Prepare the master working set and make the ``require()``
+ API available.
+
+ This function has explicit effects on the global state
+ of pkg_resources. It is intended to be invoked once at
+ the initialization of this module.
+
+ Invocation by other packages is unsupported and done
+ at their own risk.
+ """
+ working_set = WorkingSet._build_master()
+ _declare_state('object', working_set=working_set)
+
+ require = working_set.require
+ iter_entry_points = working_set.iter_entry_points
+ add_activation_listener = working_set.subscribe
+ run_script = working_set.run_script
+ # backward compatibility
+ run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
@@ -3382,6 +3382,6 @@ def _initialize_master_working_set():
existing=False,
)
working_set.entries = []
- # match order
- list(map(working_set.add_entry, sys.path))
- globals().update(locals())
+ # match order
+ list(map(working_set.add_entry, sys.path))
+ globals().update(locals())
diff --git a/contrib/python/setuptools/py3/setuptools/__init__.py b/contrib/python/setuptools/py3/setuptools/__init__.py
index 11abd83c3d..9d6f0bc0dd 100644
--- a/contrib/python/setuptools/py3/setuptools/__init__.py
+++ b/contrib/python/setuptools/py3/setuptools/__init__.py
@@ -1,26 +1,26 @@
-"""Extensions to the 'distutils' for large or complex distributions"""
-
+"""Extensions to the 'distutils' for large or complex distributions"""
+
from fnmatch import fnmatchcase
import functools
-import os
+import os
import re
import _distutils_hack.override # noqa: F401
-import distutils.core
+import distutils.core
from distutils.errors import DistutilsOptionError
-from distutils.util import convert_path
-
+from distutils.util import convert_path
+
from ._deprecation_warning import SetuptoolsDeprecationWarning
-
-import setuptools.version
-from setuptools.extension import Extension
+
+import setuptools.version
+from setuptools.extension import Extension
from setuptools.dist import Distribution
-from setuptools.depends import Require
+from setuptools.depends import Require
from . import monkey
-
-__all__ = [
+
+__all__ = [
'setup',
'Distribution',
'Command',
@@ -29,36 +29,36 @@ __all__ = [
'SetuptoolsDeprecationWarning',
'find_packages',
'find_namespace_packages',
-]
-
-__version__ = setuptools.version.__version__
-
-bootstrap_install_from = None
-
-
+]
+
+__version__ = setuptools.version.__version__
+
+bootstrap_install_from = None
+
+
class PackageFinder:
"""
Generate a list of all Python packages found within a directory
"""
- @classmethod
- def find(cls, where='.', exclude=(), include=('*',)):
- """Return a list all Python packages found within directory 'where'
-
+ @classmethod
+ def find(cls, where='.', exclude=(), include=('*',)):
+ """Return a list all Python packages found within directory 'where'
+
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
- 'exclude' is a sequence of package names to exclude; '*' can be used
- as a wildcard in the names, such that 'foo.*' will exclude all
- subpackages of 'foo' (but not 'foo' itself).
-
- 'include' is a sequence of package names to include. If it's
- specified, only the named packages will be included. If it's not
- specified, all found packages will be included. 'include' can contain
- shell style wildcard patterns just like 'exclude'.
- """
-
+ 'exclude' is a sequence of package names to exclude; '*' can be used
+ as a wildcard in the names, such that 'foo.*' will exclude all
+ subpackages of 'foo' (but not 'foo' itself).
+
+ 'include' is a sequence of package names to include. If it's
+ specified, only the named packages will be included. If it's not
+ specified, all found packages will be included. 'include' can contain
+ shell style wildcard patterns just like 'exclude'.
+ """
+
return list(
cls._find_packages_iter(
convert_path(where),
@@ -66,23 +66,23 @@ class PackageFinder:
cls._build_filter(*include),
)
)
-
+
@classmethod
def _find_packages_iter(cls, where, exclude, include):
- """
+ """
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
- """
+ """
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
-
+
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
-
+
# Skip directory trees that are not valid packages
if '.' in dir or not cls._looks_like_package(full_path):
continue
@@ -95,30 +95,30 @@ class PackageFinder:
# down there, even if the parent was excluded.
dirs.append(dir)
- @staticmethod
- def _looks_like_package(path):
+ @staticmethod
+ def _looks_like_package(path):
"""Does a directory look like a package?"""
- return os.path.isfile(os.path.join(path, '__init__.py'))
-
- @staticmethod
- def _build_filter(*patterns):
- """
- Given a list of patterns, return a callable that will be true only if
+ return os.path.isfile(os.path.join(path, '__init__.py'))
+
+ @staticmethod
+ def _build_filter(*patterns):
+ """
+ Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
- """
- return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
-
+ """
+ return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
-class PEP420PackageFinder(PackageFinder):
- @staticmethod
- def _looks_like_package(path):
- return True
-
-find_packages = PackageFinder.find
+class PEP420PackageFinder(PackageFinder):
+ @staticmethod
+ def _looks_like_package(path):
+ return True
+
+
+find_packages = PackageFinder.find
find_namespace_packages = PEP420PackageFinder.find
-
-
+
+
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
# our PEP 517 backend patch `distutils.core.Distribution`.
@@ -157,21 +157,21 @@ setup.__doc__ = distutils.core.setup.__doc__
_Command = monkey.get_unpatched(distutils.core.Command)
-
-
-class Command(_Command):
- __doc__ = _Command.__doc__
-
- command_consumes_arguments = False
-
- def __init__(self, dist, **kw):
- """
- Construct the command for dist, updating
- vars(self) with any keyword parameters.
- """
- _Command.__init__(self, dist)
- vars(self).update(kw)
-
+
+
+class Command(_Command):
+ __doc__ = _Command.__doc__
+
+ command_consumes_arguments = False
+
+ def __init__(self, dist, **kw):
+ """
+ Construct the command for dist, updating
+ vars(self) with any keyword parameters.
+ """
+ _Command.__init__(self, dist)
+ vars(self).update(kw)
+
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
@@ -204,36 +204,36 @@ class Command(_Command):
"'%s' must be a list of strings (got %r)" % (option, val)
)
- def reinitialize_command(self, command, reinit_subcommands=0, **kw):
- cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
- vars(cmd).update(kw)
- return cmd
-
-
-def _find_all_simple(path):
- """
- Find all files under 'path'
- """
- results = (
- os.path.join(base, file)
- for base, dirs, files in os.walk(path, followlinks=True)
- for file in files
- )
- return filter(os.path.isfile, results)
-
-
-def findall(dir=os.curdir):
- """
- Find all files under 'dir' and return the list of full filenames.
- Unless dir is '.', return full filenames with dir prepended.
- """
- files = _find_all_simple(dir)
- if dir == os.curdir:
- make_rel = functools.partial(os.path.relpath, start=dir)
- files = map(make_rel, files)
- return list(files)
-
-
+ def reinitialize_command(self, command, reinit_subcommands=0, **kw):
+ cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
+ vars(cmd).update(kw)
+ return cmd
+
+
+def _find_all_simple(path):
+ """
+ Find all files under 'path'
+ """
+ results = (
+ os.path.join(base, file)
+ for base, dirs, files in os.walk(path, followlinks=True)
+ for file in files
+ )
+ return filter(os.path.isfile, results)
+
+
+def findall(dir=os.curdir):
+ """
+ Find all files under 'dir' and return the list of full filenames.
+ Unless dir is '.', return full filenames with dir prepended.
+ """
+ files = _find_all_simple(dir)
+ if dir == os.curdir:
+ make_rel = functools.partial(os.path.relpath, start=dir)
+ files = map(make_rel, files)
+ return list(files)
+
+
class sic(str):
"""Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
diff --git a/contrib/python/setuptools/py3/setuptools/archive_util.py b/contrib/python/setuptools/py3/setuptools/archive_util.py
index 1172c588bd..0f70284822 100644
--- a/contrib/python/setuptools/py3/setuptools/archive_util.py
+++ b/contrib/python/setuptools/py3/setuptools/archive_util.py
@@ -1,13 +1,13 @@
-"""Utilities for extracting common archive formats"""
-
-import zipfile
-import tarfile
-import os
-import shutil
-import posixpath
-import contextlib
-from distutils.errors import DistutilsError
-
+"""Utilities for extracting common archive formats"""
+
+import zipfile
+import tarfile
+import os
+import shutil
+import posixpath
+import contextlib
+from distutils.errors import DistutilsError
+
from pkg_resources import ensure_directory
__all__ = [
@@ -16,115 +16,115 @@ __all__ = [
]
-class UnrecognizedFormat(DistutilsError):
- """Couldn't recognize the archive type"""
-
+class UnrecognizedFormat(DistutilsError):
+ """Couldn't recognize the archive type"""
+
def default_filter(src, dst):
- """The default progress/filter callback; returns True for all files"""
- return dst
-
-
+ """The default progress/filter callback; returns True for all files"""
+ return dst
+
+
def unpack_archive(
filename, extract_dir, progress_filter=default_filter,
- drivers=None):
- """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
-
- `progress_filter` is a function taking two arguments: a source path
- internal to the archive ('/'-separated), and a filesystem path where it
- will be extracted. The callback must return the desired extract path
- (which may be the same as the one passed in), or else ``None`` to skip
- that file or directory. The callback can thus be used to report on the
- progress of the extraction, as well as to filter the items extracted or
- alter their extraction paths.
-
- `drivers`, if supplied, must be a non-empty sequence of functions with the
- same signature as this function (minus the `drivers` argument), that raise
- ``UnrecognizedFormat`` if they do not support extracting the designated
- archive type. The `drivers` are tried in sequence until one is found that
- does not raise an error, or until all are exhausted (in which case
- ``UnrecognizedFormat`` is raised). If you do not supply a sequence of
- drivers, the module's ``extraction_drivers`` constant will be used, which
- means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
- order.
- """
- for driver in drivers or extraction_drivers:
- try:
- driver(filename, extract_dir, progress_filter)
- except UnrecognizedFormat:
- continue
- else:
- return
- else:
- raise UnrecognizedFormat(
- "Not a recognized archive type: %s" % filename
- )
-
-
-def unpack_directory(filename, extract_dir, progress_filter=default_filter):
- """"Unpack" a directory, using the same interface as for archives
-
- Raises ``UnrecognizedFormat`` if `filename` is not a directory
- """
- if not os.path.isdir(filename):
- raise UnrecognizedFormat("%s is not a directory" % filename)
-
- paths = {
- filename: ('', extract_dir),
- }
- for base, dirs, files in os.walk(filename):
- src, dst = paths[base]
- for d in dirs:
- paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
- for f in files:
- target = os.path.join(dst, f)
- target = progress_filter(src + f, target)
- if not target:
- # skip non-files
- continue
- ensure_directory(target)
- f = os.path.join(base, f)
- shutil.copyfile(f, target)
- shutil.copystat(f, target)
-
-
-def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
- """Unpack zip `filename` to `extract_dir`
-
- Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
- by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
- of the `progress_filter` argument.
- """
-
- if not zipfile.is_zipfile(filename):
- raise UnrecognizedFormat("%s is not a zip file" % (filename,))
-
+ drivers=None):
+ """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
+
+ `progress_filter` is a function taking two arguments: a source path
+ internal to the archive ('/'-separated), and a filesystem path where it
+ will be extracted. The callback must return the desired extract path
+ (which may be the same as the one passed in), or else ``None`` to skip
+ that file or directory. The callback can thus be used to report on the
+ progress of the extraction, as well as to filter the items extracted or
+ alter their extraction paths.
+
+ `drivers`, if supplied, must be a non-empty sequence of functions with the
+ same signature as this function (minus the `drivers` argument), that raise
+ ``UnrecognizedFormat`` if they do not support extracting the designated
+ archive type. The `drivers` are tried in sequence until one is found that
+ does not raise an error, or until all are exhausted (in which case
+ ``UnrecognizedFormat`` is raised). If you do not supply a sequence of
+ drivers, the module's ``extraction_drivers`` constant will be used, which
+ means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
+ order.
+ """
+ for driver in drivers or extraction_drivers:
+ try:
+ driver(filename, extract_dir, progress_filter)
+ except UnrecognizedFormat:
+ continue
+ else:
+ return
+ else:
+ raise UnrecognizedFormat(
+ "Not a recognized archive type: %s" % filename
+ )
+
+
+def unpack_directory(filename, extract_dir, progress_filter=default_filter):
+ """"Unpack" a directory, using the same interface as for archives
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a directory
+ """
+ if not os.path.isdir(filename):
+ raise UnrecognizedFormat("%s is not a directory" % filename)
+
+ paths = {
+ filename: ('', extract_dir),
+ }
+ for base, dirs, files in os.walk(filename):
+ src, dst = paths[base]
+ for d in dirs:
+ paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
+ for f in files:
+ target = os.path.join(dst, f)
+ target = progress_filter(src + f, target)
+ if not target:
+ # skip non-files
+ continue
+ ensure_directory(target)
+ f = os.path.join(base, f)
+ shutil.copyfile(f, target)
+ shutil.copystat(f, target)
+
+
+def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
+ """Unpack zip `filename` to `extract_dir`
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
+ by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
+ of the `progress_filter` argument.
+ """
+
+ if not zipfile.is_zipfile(filename):
+ raise UnrecognizedFormat("%s is not a zip file" % (filename,))
+
with zipfile.ZipFile(filename) as z:
- for info in z.infolist():
- name = info.filename
-
- # don't extract absolute paths or ones with .. in them
- if name.startswith('/') or '..' in name.split('/'):
- continue
-
- target = os.path.join(extract_dir, *name.split('/'))
- target = progress_filter(name, target)
- if not target:
- continue
- if name.endswith('/'):
- # directory
- ensure_directory(target)
- else:
- # file
- ensure_directory(target)
- data = z.read(info.filename)
- with open(target, 'wb') as f:
- f.write(data)
- unix_attributes = info.external_attr >> 16
- if unix_attributes:
- os.chmod(target, unix_attributes)
-
-
+ for info in z.infolist():
+ name = info.filename
+
+ # don't extract absolute paths or ones with .. in them
+ if name.startswith('/') or '..' in name.split('/'):
+ continue
+
+ target = os.path.join(extract_dir, *name.split('/'))
+ target = progress_filter(name, target)
+ if not target:
+ continue
+ if name.endswith('/'):
+ # directory
+ ensure_directory(target)
+ else:
+ # file
+ ensure_directory(target)
+ data = z.read(info.filename)
+ with open(target, 'wb') as f:
+ f.write(data)
+ unix_attributes = info.external_attr >> 16
+ if unix_attributes:
+ os.chmod(target, unix_attributes)
+
+
def _resolve_tar_file_or_dir(tar_obj, tar_member_obj):
"""Resolve any links and extract link targets as normal files."""
while tar_member_obj is not None and (
@@ -175,20 +175,20 @@ def _iter_open_tar(tar_obj, extract_dir, progress_filter):
yield member, final_dst
-def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
- """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
-
- Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
- by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
- of the `progress_filter` argument.
- """
- try:
- tarobj = tarfile.open(filename)
+def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
+ """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
+ by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
+ of the `progress_filter` argument.
+ """
+ try:
+ tarobj = tarfile.open(filename)
except tarfile.TarError as e:
- raise UnrecognizedFormat(
- "%s is not a compressed or uncompressed tar file" % (filename,)
+ raise UnrecognizedFormat(
+ "%s is not a compressed or uncompressed tar file" % (filename,)
) from e
-
+
for member, final_dst in _iter_open_tar(
tarobj, extract_dir, progress_filter,
):
@@ -198,8 +198,8 @@ def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
except tarfile.ExtractError:
# chown/chmod/mkfifo/mknode/makedev failed
pass
-
+
return True
-
-extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
+
+extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
diff --git a/contrib/python/setuptools/py3/setuptools/command/__init__.py b/contrib/python/setuptools/py3/setuptools/command/__init__.py
index 48180605ee..b966dcea57 100644
--- a/contrib/python/setuptools/py3/setuptools/command/__init__.py
+++ b/contrib/python/setuptools/py3/setuptools/command/__init__.py
@@ -1,8 +1,8 @@
-from distutils.command.bdist import bdist
-import sys
-
-if 'egg' not in bdist.format_commands:
- bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
- bdist.format_commands.append('egg')
-
-del bdist, sys
+from distutils.command.bdist import bdist
+import sys
+
+if 'egg' not in bdist.format_commands:
+ bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
+ bdist.format_commands.append('egg')
+
+del bdist, sys
diff --git a/contrib/python/setuptools/py3/setuptools/command/alias.py b/contrib/python/setuptools/py3/setuptools/command/alias.py
index e96a50c7e1..452a9244ea 100644
--- a/contrib/python/setuptools/py3/setuptools/command/alias.py
+++ b/contrib/python/setuptools/py3/setuptools/command/alias.py
@@ -1,78 +1,78 @@
-from distutils.errors import DistutilsOptionError
-
-from setuptools.command.setopt import edit_config, option_base, config_file
-
-
-def shquote(arg):
- """Quote an argument for later parsing by shlex.split()"""
- for c in '"', "'", "\\", "#":
- if c in arg:
- return repr(arg)
- if arg.split() != [arg]:
- return repr(arg)
- return arg
-
-
-class alias(option_base):
- """Define a shortcut that invokes one or more commands"""
-
- description = "define a shortcut to invoke one or more commands"
- command_consumes_arguments = True
-
- user_options = [
- ('remove', 'r', 'remove (unset) the alias'),
- ] + option_base.user_options
-
- boolean_options = option_base.boolean_options + ['remove']
-
- def initialize_options(self):
- option_base.initialize_options(self)
- self.args = None
- self.remove = None
-
- def finalize_options(self):
- option_base.finalize_options(self)
- if self.remove and len(self.args) != 1:
- raise DistutilsOptionError(
- "Must specify exactly one argument (the alias name) when "
- "using --remove"
- )
-
- def run(self):
- aliases = self.distribution.get_option_dict('aliases')
-
- if not self.args:
- print("Command Aliases")
- print("---------------")
- for alias in aliases:
- print("setup.py alias", format_alias(alias, aliases))
- return
-
- elif len(self.args) == 1:
- alias, = self.args
- if self.remove:
- command = None
- elif alias in aliases:
- print("setup.py alias", format_alias(alias, aliases))
- return
- else:
- print("No alias definition found for %r" % alias)
- return
- else:
- alias = self.args[0]
- command = ' '.join(map(shquote, self.args[1:]))
-
- edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
-
-
-def format_alias(name, aliases):
- source, command = aliases[name]
- if source == config_file('global'):
- source = '--global-config '
- elif source == config_file('user'):
- source = '--user-config '
- elif source == config_file('local'):
- source = ''
- else:
- source = '--filename=%r' % source
- return source + name + ' ' + command
+from distutils.errors import DistutilsOptionError
+
+from setuptools.command.setopt import edit_config, option_base, config_file
+
+
+def shquote(arg):
+ """Quote an argument for later parsing by shlex.split()"""
+ for c in '"', "'", "\\", "#":
+ if c in arg:
+ return repr(arg)
+ if arg.split() != [arg]:
+ return repr(arg)
+ return arg
+
+
+class alias(option_base):
+ """Define a shortcut that invokes one or more commands"""
+
+ description = "define a shortcut to invoke one or more commands"
+ command_consumes_arguments = True
+
+ user_options = [
+ ('remove', 'r', 'remove (unset) the alias'),
+ ] + option_base.user_options
+
+ boolean_options = option_base.boolean_options + ['remove']
+
+ def initialize_options(self):
+ option_base.initialize_options(self)
+ self.args = None
+ self.remove = None
+
+ def finalize_options(self):
+ option_base.finalize_options(self)
+ if self.remove and len(self.args) != 1:
+ raise DistutilsOptionError(
+ "Must specify exactly one argument (the alias name) when "
+ "using --remove"
+ )
+
+ def run(self):
+ aliases = self.distribution.get_option_dict('aliases')
+
+ if not self.args:
+ print("Command Aliases")
+ print("---------------")
+ for alias in aliases:
+ print("setup.py alias", format_alias(alias, aliases))
+ return
+
+ elif len(self.args) == 1:
+ alias, = self.args
+ if self.remove:
+ command = None
+ elif alias in aliases:
+ print("setup.py alias", format_alias(alias, aliases))
+ return
+ else:
+ print("No alias definition found for %r" % alias)
+ return
+ else:
+ alias = self.args[0]
+ command = ' '.join(map(shquote, self.args[1:]))
+
+ edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
+
+
+def format_alias(name, aliases):
+ source, command = aliases[name]
+ if source == config_file('global'):
+ source = '--global-config '
+ elif source == config_file('user'):
+ source = '--user-config '
+ elif source == config_file('local'):
+ source = ''
+ else:
+ source = '--filename=%r' % source
+ return source + name + ' ' + command
diff --git a/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py b/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py
index 0caee01d3c..e6b1609f7b 100644
--- a/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py
+++ b/contrib/python/setuptools/py3/setuptools/command/bdist_egg.py
@@ -1,35 +1,35 @@
-"""setuptools.command.bdist_egg
-
-Build .egg distributions"""
-
-from distutils.dir_util import remove_tree, mkpath
-from distutils import log
-from types import CodeType
-import sys
-import os
+"""setuptools.command.bdist_egg
+
+Build .egg distributions"""
+
+from distutils.dir_util import remove_tree, mkpath
+from distutils import log
+from types import CodeType
+import sys
+import os
import re
import textwrap
-import marshal
-
-from pkg_resources import get_build_platform, Distribution, ensure_directory
-from setuptools.extension import Library
-from setuptools import Command
-
+import marshal
+
+from pkg_resources import get_build_platform, Distribution, ensure_directory
+from setuptools.extension import Library
+from setuptools import Command
+
from sysconfig import get_path, get_python_version
-
-
+
+
def _get_purelib():
return get_path("purelib")
-
-
-def strip_module(filename):
- if '.' in filename:
- filename = os.path.splitext(filename)[0]
- if filename.endswith('module'):
- filename = filename[:-6]
- return filename
-
-
+
+
+def strip_module(filename):
+ if '.' in filename:
+ filename = os.path.splitext(filename)[0]
+ if filename.endswith('module'):
+ filename = filename[:-6]
+ return filename
+
+
def sorted_walk(dir):
"""Do os.walk in a reproducible way,
independent of indeterministic filesystem readdir order
@@ -40,207 +40,207 @@ def sorted_walk(dir):
yield base, dirs, files
-def write_stub(resource, pyfile):
- _stub_template = textwrap.dedent("""
- def __bootstrap__():
- global __bootstrap__, __loader__, __file__
+def write_stub(resource, pyfile):
+ _stub_template = textwrap.dedent("""
+ def __bootstrap__():
+ global __bootstrap__, __loader__, __file__
import sys, pkg_resources, importlib.util
- __file__ = pkg_resources.resource_filename(__name__, %r)
- __loader__ = None; del __bootstrap__, __loader__
+ __file__ = pkg_resources.resource_filename(__name__, %r)
+ __loader__ = None; del __bootstrap__, __loader__
spec = importlib.util.spec_from_file_location(__name__,__file__)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
- __bootstrap__()
- """).lstrip()
- with open(pyfile, 'w') as f:
- f.write(_stub_template % resource)
-
-
-class bdist_egg(Command):
- description = "create an \"egg\" distribution"
-
- user_options = [
- ('bdist-dir=', 'b',
- "temporary directory for creating the distribution"),
- ('plat-name=', 'p', "platform name to embed in generated filenames "
- "(default: %s)" % get_build_platform()),
- ('exclude-source-files', None,
- "remove all .py files from the generated egg"),
- ('keep-temp', 'k',
- "keep the pseudo-installation tree around after " +
- "creating the distribution archive"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ]
-
- boolean_options = [
- 'keep-temp', 'skip-build', 'exclude-source-files'
- ]
-
- def initialize_options(self):
- self.bdist_dir = None
- self.plat_name = None
- self.keep_temp = 0
- self.dist_dir = None
- self.skip_build = 0
- self.egg_output = None
- self.exclude_source_files = None
-
- def finalize_options(self):
- ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
- self.egg_info = ei_cmd.egg_info
-
- if self.bdist_dir is None:
- bdist_base = self.get_finalized_command('bdist').bdist_base
- self.bdist_dir = os.path.join(bdist_base, 'egg')
-
- if self.plat_name is None:
- self.plat_name = get_build_platform()
-
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
-
- if self.egg_output is None:
-
- # Compute filename of the output egg
- basename = Distribution(
- None, None, ei_cmd.egg_name, ei_cmd.egg_version,
- get_python_version(),
- self.distribution.has_ext_modules() and self.plat_name
- ).egg_name()
-
- self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
-
- def do_install_data(self):
- # Hack for packages that install data to install's --install-lib
- self.get_finalized_command('install').install_lib = self.bdist_dir
-
- site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
- old, self.distribution.data_files = self.distribution.data_files, []
-
- for item in old:
- if isinstance(item, tuple) and len(item) == 2:
- if os.path.isabs(item[0]):
- realpath = os.path.realpath(item[0])
- normalized = os.path.normcase(realpath)
- if normalized == site_packages or normalized.startswith(
- site_packages + os.sep
- ):
- item = realpath[len(site_packages) + 1:], item[1]
- # XXX else: raise ???
- self.distribution.data_files.append(item)
-
- try:
+ __bootstrap__()
+ """).lstrip()
+ with open(pyfile, 'w') as f:
+ f.write(_stub_template % resource)
+
+
+class bdist_egg(Command):
+ description = "create an \"egg\" distribution"
+
+ user_options = [
+ ('bdist-dir=', 'b',
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p', "platform name to embed in generated filenames "
+ "(default: %s)" % get_build_platform()),
+ ('exclude-source-files', None,
+ "remove all .py files from the generated egg"),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ]
+
+ boolean_options = [
+ 'keep-temp', 'skip-build', 'exclude-source-files'
+ ]
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.plat_name = None
+ self.keep_temp = 0
+ self.dist_dir = None
+ self.skip_build = 0
+ self.egg_output = None
+ self.exclude_source_files = None
+
+ def finalize_options(self):
+ ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
+ self.egg_info = ei_cmd.egg_info
+
+ if self.bdist_dir is None:
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'egg')
+
+ if self.plat_name is None:
+ self.plat_name = get_build_platform()
+
+ self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+ if self.egg_output is None:
+
+ # Compute filename of the output egg
+ basename = Distribution(
+ None, None, ei_cmd.egg_name, ei_cmd.egg_version,
+ get_python_version(),
+ self.distribution.has_ext_modules() and self.plat_name
+ ).egg_name()
+
+ self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
+
+ def do_install_data(self):
+ # Hack for packages that install data to install's --install-lib
+ self.get_finalized_command('install').install_lib = self.bdist_dir
+
+ site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
+ old, self.distribution.data_files = self.distribution.data_files, []
+
+ for item in old:
+ if isinstance(item, tuple) and len(item) == 2:
+ if os.path.isabs(item[0]):
+ realpath = os.path.realpath(item[0])
+ normalized = os.path.normcase(realpath)
+ if normalized == site_packages or normalized.startswith(
+ site_packages + os.sep
+ ):
+ item = realpath[len(site_packages) + 1:], item[1]
+ # XXX else: raise ???
+ self.distribution.data_files.append(item)
+
+ try:
log.info("installing package data to %s", self.bdist_dir)
- self.call_command('install_data', force=0, root=None)
- finally:
- self.distribution.data_files = old
-
- def get_outputs(self):
- return [self.egg_output]
-
- def call_command(self, cmdname, **kw):
- """Invoke reinitialized command `cmdname` with keyword args"""
- for dirname in INSTALL_DIRECTORY_ATTRS:
- kw.setdefault(dirname, self.bdist_dir)
- kw.setdefault('skip_build', self.skip_build)
- kw.setdefault('dry_run', self.dry_run)
- cmd = self.reinitialize_command(cmdname, **kw)
- self.run_command(cmdname)
- return cmd
-
+ self.call_command('install_data', force=0, root=None)
+ finally:
+ self.distribution.data_files = old
+
+ def get_outputs(self):
+ return [self.egg_output]
+
+ def call_command(self, cmdname, **kw):
+ """Invoke reinitialized command `cmdname` with keyword args"""
+ for dirname in INSTALL_DIRECTORY_ATTRS:
+ kw.setdefault(dirname, self.bdist_dir)
+ kw.setdefault('skip_build', self.skip_build)
+ kw.setdefault('dry_run', self.dry_run)
+ cmd = self.reinitialize_command(cmdname, **kw)
+ self.run_command(cmdname)
+ return cmd
+
def run(self): # noqa: C901 # is too complex (14) # FIXME
- # Generate metadata first
- self.run_command("egg_info")
- # We run install_lib before install_data, because some data hacks
- # pull their data path from the install_lib command.
+ # Generate metadata first
+ self.run_command("egg_info")
+ # We run install_lib before install_data, because some data hacks
+ # pull their data path from the install_lib command.
log.info("installing library code to %s", self.bdist_dir)
- instcmd = self.get_finalized_command('install')
- old_root = instcmd.root
- instcmd.root = None
- if self.distribution.has_c_libraries() and not self.skip_build:
- self.run_command('build_clib')
- cmd = self.call_command('install_lib', warn_dir=0)
- instcmd.root = old_root
-
- all_outputs, ext_outputs = self.get_ext_outputs()
- self.stubs = []
- to_compile = []
- for (p, ext_name) in enumerate(ext_outputs):
- filename, ext = os.path.splitext(ext_name)
- pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
- '.py')
- self.stubs.append(pyfile)
+ instcmd = self.get_finalized_command('install')
+ old_root = instcmd.root
+ instcmd.root = None
+ if self.distribution.has_c_libraries() and not self.skip_build:
+ self.run_command('build_clib')
+ cmd = self.call_command('install_lib', warn_dir=0)
+ instcmd.root = old_root
+
+ all_outputs, ext_outputs = self.get_ext_outputs()
+ self.stubs = []
+ to_compile = []
+ for (p, ext_name) in enumerate(ext_outputs):
+ filename, ext = os.path.splitext(ext_name)
+ pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
+ '.py')
+ self.stubs.append(pyfile)
log.info("creating stub loader for %s", ext_name)
- if not self.dry_run:
- write_stub(os.path.basename(ext_name), pyfile)
- to_compile.append(pyfile)
- ext_outputs[p] = ext_name.replace(os.sep, '/')
-
- if to_compile:
- cmd.byte_compile(to_compile)
- if self.distribution.data_files:
- self.do_install_data()
-
- # Make the EGG-INFO directory
- archive_root = self.bdist_dir
- egg_info = os.path.join(archive_root, 'EGG-INFO')
- self.mkpath(egg_info)
- if self.distribution.scripts:
- script_dir = os.path.join(egg_info, 'scripts')
+ if not self.dry_run:
+ write_stub(os.path.basename(ext_name), pyfile)
+ to_compile.append(pyfile)
+ ext_outputs[p] = ext_name.replace(os.sep, '/')
+
+ if to_compile:
+ cmd.byte_compile(to_compile)
+ if self.distribution.data_files:
+ self.do_install_data()
+
+ # Make the EGG-INFO directory
+ archive_root = self.bdist_dir
+ egg_info = os.path.join(archive_root, 'EGG-INFO')
+ self.mkpath(egg_info)
+ if self.distribution.scripts:
+ script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s", script_dir)
- self.call_command('install_scripts', install_dir=script_dir,
- no_ep=1)
-
- self.copy_metadata_to(egg_info)
- native_libs = os.path.join(egg_info, "native_libs.txt")
- if all_outputs:
+ self.call_command('install_scripts', install_dir=script_dir,
+ no_ep=1)
+
+ self.copy_metadata_to(egg_info)
+ native_libs = os.path.join(egg_info, "native_libs.txt")
+ if all_outputs:
log.info("writing %s", native_libs)
- if not self.dry_run:
- ensure_directory(native_libs)
- libs_file = open(native_libs, 'wt')
- libs_file.write('\n'.join(all_outputs))
- libs_file.write('\n')
- libs_file.close()
- elif os.path.isfile(native_libs):
+ if not self.dry_run:
+ ensure_directory(native_libs)
+ libs_file = open(native_libs, 'wt')
+ libs_file.write('\n'.join(all_outputs))
+ libs_file.write('\n')
+ libs_file.close()
+ elif os.path.isfile(native_libs):
log.info("removing %s", native_libs)
- if not self.dry_run:
- os.unlink(native_libs)
-
- write_safety_flag(
- os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
- )
-
- if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
- log.warn(
- "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
- "Use the install_requires/extras_require setup() args instead."
- )
-
- if self.exclude_source_files:
- self.zap_pyfiles()
-
- # Make the archive
- make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
- dry_run=self.dry_run, mode=self.gen_header())
- if not self.keep_temp:
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
-
- # Add to 'Distribution.dist_files' so that the "upload" command works
- getattr(self.distribution, 'dist_files', []).append(
- ('bdist_egg', get_python_version(), self.egg_output))
-
- def zap_pyfiles(self):
- log.info("Removing .py files from temporary directory")
- for base, dirs, files in walk_egg(self.bdist_dir):
- for name in files:
+ if not self.dry_run:
+ os.unlink(native_libs)
+
+ write_safety_flag(
+ os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
+ )
+
+ if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
+ log.warn(
+ "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
+ "Use the install_requires/extras_require setup() args instead."
+ )
+
+ if self.exclude_source_files:
+ self.zap_pyfiles()
+
+ # Make the archive
+ make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
+ dry_run=self.dry_run, mode=self.gen_header())
+ if not self.keep_temp:
+ remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+ # Add to 'Distribution.dist_files' so that the "upload" command works
+ getattr(self.distribution, 'dist_files', []).append(
+ ('bdist_egg', get_python_version(), self.egg_output))
+
+ def zap_pyfiles(self):
+ log.info("Removing .py files from temporary directory")
+ for base, dirs, files in walk_egg(self.bdist_dir):
+ for name in files:
path = os.path.join(base, name)
- if name.endswith('.py'):
- log.debug("Deleting %s", path)
- os.unlink(path)
-
+ if name.endswith('.py'):
+ log.debug("Deleting %s", path)
+ os.unlink(path)
+
if base.endswith('__pycache__'):
path_old = path
@@ -257,200 +257,200 @@ class bdist_egg(Command):
pass
os.rename(path_old, path_new)
- def zip_safe(self):
- safe = getattr(self.distribution, 'zip_safe', None)
- if safe is not None:
- return safe
- log.warn("zip_safe flag not set; analyzing archive contents...")
- return analyze_egg(self.bdist_dir, self.stubs)
-
- def gen_header(self):
+ def zip_safe(self):
+ safe = getattr(self.distribution, 'zip_safe', None)
+ if safe is not None:
+ return safe
+ log.warn("zip_safe flag not set; analyzing archive contents...")
+ return analyze_egg(self.bdist_dir, self.stubs)
+
+ def gen_header(self):
return 'w'
-
- def copy_metadata_to(self, target_dir):
- "Copy metadata (egg info) to the target_dir"
- # normalize the path (so that a forward-slash in egg_info will
- # match using startswith below)
- norm_egg_info = os.path.normpath(self.egg_info)
- prefix = os.path.join(norm_egg_info, '')
- for path in self.ei_cmd.filelist.files:
- if path.startswith(prefix):
- target = os.path.join(target_dir, path[len(prefix):])
- ensure_directory(target)
- self.copy_file(path, target)
-
- def get_ext_outputs(self):
- """Get a list of relative paths to C extensions in the output distro"""
-
- all_outputs = []
- ext_outputs = []
-
- paths = {self.bdist_dir: ''}
+
+ def copy_metadata_to(self, target_dir):
+ "Copy metadata (egg info) to the target_dir"
+ # normalize the path (so that a forward-slash in egg_info will
+ # match using startswith below)
+ norm_egg_info = os.path.normpath(self.egg_info)
+ prefix = os.path.join(norm_egg_info, '')
+ for path in self.ei_cmd.filelist.files:
+ if path.startswith(prefix):
+ target = os.path.join(target_dir, path[len(prefix):])
+ ensure_directory(target)
+ self.copy_file(path, target)
+
+ def get_ext_outputs(self):
+ """Get a list of relative paths to C extensions in the output distro"""
+
+ all_outputs = []
+ ext_outputs = []
+
+ paths = {self.bdist_dir: ''}
for base, dirs, files in sorted_walk(self.bdist_dir):
- for filename in files:
- if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
- all_outputs.append(paths[base] + filename)
- for filename in dirs:
- paths[os.path.join(base, filename)] = (paths[base] +
- filename + '/')
-
- if self.distribution.has_ext_modules():
- build_cmd = self.get_finalized_command('build_ext')
- for ext in build_cmd.extensions:
- if isinstance(ext, Library):
- continue
- fullname = build_cmd.get_ext_fullname(ext.name)
- filename = build_cmd.get_ext_filename(fullname)
- if not os.path.basename(filename).startswith('dl-'):
- if os.path.exists(os.path.join(self.bdist_dir, filename)):
- ext_outputs.append(filename)
-
- return all_outputs, ext_outputs
-
-
-NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
-
-
-def walk_egg(egg_dir):
- """Walk an unpacked egg's contents, skipping the metadata directory"""
+ for filename in files:
+ if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
+ all_outputs.append(paths[base] + filename)
+ for filename in dirs:
+ paths[os.path.join(base, filename)] = (paths[base] +
+ filename + '/')
+
+ if self.distribution.has_ext_modules():
+ build_cmd = self.get_finalized_command('build_ext')
+ for ext in build_cmd.extensions:
+ if isinstance(ext, Library):
+ continue
+ fullname = build_cmd.get_ext_fullname(ext.name)
+ filename = build_cmd.get_ext_filename(fullname)
+ if not os.path.basename(filename).startswith('dl-'):
+ if os.path.exists(os.path.join(self.bdist_dir, filename)):
+ ext_outputs.append(filename)
+
+ return all_outputs, ext_outputs
+
+
+NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
+
+
+def walk_egg(egg_dir):
+ """Walk an unpacked egg's contents, skipping the metadata directory"""
walker = sorted_walk(egg_dir)
- base, dirs, files = next(walker)
- if 'EGG-INFO' in dirs:
- dirs.remove('EGG-INFO')
- yield base, dirs, files
- for bdf in walker:
- yield bdf
-
-
-def analyze_egg(egg_dir, stubs):
- # check for existing flag in EGG-INFO
- for flag, fn in safety_flags.items():
- if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
- return flag
- if not can_scan():
- return False
- safe = True
- for base, dirs, files in walk_egg(egg_dir):
- for name in files:
- if name.endswith('.py') or name.endswith('.pyw'):
- continue
- elif name.endswith('.pyc') or name.endswith('.pyo'):
- # always scan, even if we already know we're not safe
- safe = scan_module(egg_dir, base, name, stubs) and safe
- return safe
-
-
-def write_safety_flag(egg_dir, safe):
- # Write or remove zip safety flag file(s)
- for flag, fn in safety_flags.items():
- fn = os.path.join(egg_dir, fn)
- if os.path.exists(fn):
- if safe is None or bool(safe) != flag:
- os.unlink(fn)
- elif safe is not None and bool(safe) == flag:
- f = open(fn, 'wt')
- f.write('\n')
- f.close()
-
-
-safety_flags = {
- True: 'zip-safe',
- False: 'not-zip-safe',
-}
-
-
-def scan_module(egg_dir, base, name, stubs):
- """Check whether module possibly uses unsafe-for-zipfile stuff"""
-
- filename = os.path.join(base, name)
- if filename[:-1] in stubs:
- return True # Extension module
- pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
- module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
+ base, dirs, files = next(walker)
+ if 'EGG-INFO' in dirs:
+ dirs.remove('EGG-INFO')
+ yield base, dirs, files
+ for bdf in walker:
+ yield bdf
+
+
+def analyze_egg(egg_dir, stubs):
+ # check for existing flag in EGG-INFO
+ for flag, fn in safety_flags.items():
+ if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
+ return flag
+ if not can_scan():
+ return False
+ safe = True
+ for base, dirs, files in walk_egg(egg_dir):
+ for name in files:
+ if name.endswith('.py') or name.endswith('.pyw'):
+ continue
+ elif name.endswith('.pyc') or name.endswith('.pyo'):
+ # always scan, even if we already know we're not safe
+ safe = scan_module(egg_dir, base, name, stubs) and safe
+ return safe
+
+
+def write_safety_flag(egg_dir, safe):
+ # Write or remove zip safety flag file(s)
+ for flag, fn in safety_flags.items():
+ fn = os.path.join(egg_dir, fn)
+ if os.path.exists(fn):
+ if safe is None or bool(safe) != flag:
+ os.unlink(fn)
+ elif safe is not None and bool(safe) == flag:
+ f = open(fn, 'wt')
+ f.write('\n')
+ f.close()
+
+
+safety_flags = {
+ True: 'zip-safe',
+ False: 'not-zip-safe',
+}
+
+
+def scan_module(egg_dir, base, name, stubs):
+ """Check whether module possibly uses unsafe-for-zipfile stuff"""
+
+ filename = os.path.join(base, name)
+ if filename[:-1] in stubs:
+ return True # Extension module
+ pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
+ module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 7):
skip = 12 # skip magic & date & file size
- else:
+ else:
skip = 16 # skip magic & reserved? & date & file size
- f = open(filename, 'rb')
- f.read(skip)
- code = marshal.load(f)
- f.close()
- safe = True
- symbols = dict.fromkeys(iter_symbols(code))
- for bad in ['__file__', '__path__']:
- if bad in symbols:
- log.warn("%s: module references %s", module, bad)
- safe = False
- if 'inspect' in symbols:
- for bad in [
- 'getsource', 'getabsfile', 'getsourcefile', 'getfile'
- 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
- 'getinnerframes', 'getouterframes', 'stack', 'trace'
- ]:
- if bad in symbols:
- log.warn("%s: module MAY be using inspect.%s", module, bad)
- safe = False
- return safe
-
-
-def iter_symbols(code):
- """Yield names and strings used by `code` and its nested code objects"""
- for name in code.co_names:
- yield name
- for const in code.co_consts:
+ f = open(filename, 'rb')
+ f.read(skip)
+ code = marshal.load(f)
+ f.close()
+ safe = True
+ symbols = dict.fromkeys(iter_symbols(code))
+ for bad in ['__file__', '__path__']:
+ if bad in symbols:
+ log.warn("%s: module references %s", module, bad)
+ safe = False
+ if 'inspect' in symbols:
+ for bad in [
+ 'getsource', 'getabsfile', 'getsourcefile', 'getfile'
+ 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
+ 'getinnerframes', 'getouterframes', 'stack', 'trace'
+ ]:
+ if bad in symbols:
+ log.warn("%s: module MAY be using inspect.%s", module, bad)
+ safe = False
+ return safe
+
+
+def iter_symbols(code):
+ """Yield names and strings used by `code` and its nested code objects"""
+ for name in code.co_names:
+ yield name
+ for const in code.co_consts:
if isinstance(const, str):
- yield const
- elif isinstance(const, CodeType):
- for name in iter_symbols(const):
- yield name
-
-
-def can_scan():
- if not sys.platform.startswith('java') and sys.platform != 'cli':
- # CPython, PyPy, etc.
- return True
- log.warn("Unable to analyze compiled code on this platform.")
- log.warn("Please ask the author to include a 'zip_safe'"
- " setting (either True or False) in the package's setup.py")
-
-
-# Attribute names of options for commands that might need to be convinced to
-# install to the egg build directory
-
-INSTALL_DIRECTORY_ATTRS = [
- 'install_lib', 'install_dir', 'install_data', 'install_base'
-]
-
-
-def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
- mode='w'):
- """Create a zip file from all the files under 'base_dir'. The output
- zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
- Python module (if available) or the InfoZIP "zip" utility (if installed
- and found on the default search path). If neither tool is available,
- raises DistutilsExecError. Returns the name of the output zip file.
- """
- import zipfile
-
- mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
- log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
-
- def visit(z, dirname, names):
- for name in names:
- path = os.path.normpath(os.path.join(dirname, name))
- if os.path.isfile(path):
- p = path[len(base_dir) + 1:]
- if not dry_run:
- z.write(path, p)
+ yield const
+ elif isinstance(const, CodeType):
+ for name in iter_symbols(const):
+ yield name
+
+
+def can_scan():
+ if not sys.platform.startswith('java') and sys.platform != 'cli':
+ # CPython, PyPy, etc.
+ return True
+ log.warn("Unable to analyze compiled code on this platform.")
+ log.warn("Please ask the author to include a 'zip_safe'"
+ " setting (either True or False) in the package's setup.py")
+
+
+# Attribute names of options for commands that might need to be convinced to
+# install to the egg build directory
+
+INSTALL_DIRECTORY_ATTRS = [
+ 'install_lib', 'install_dir', 'install_data', 'install_base'
+]
+
+
+def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
+ mode='w'):
+ """Create a zip file from all the files under 'base_dir'. The output
+ zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
+ Python module (if available) or the InfoZIP "zip" utility (if installed
+ and found on the default search path). If neither tool is available,
+ raises DistutilsExecError. Returns the name of the output zip file.
+ """
+ import zipfile
+
+ mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
+ log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
+
+ def visit(z, dirname, names):
+ for name in names:
+ path = os.path.normpath(os.path.join(dirname, name))
+ if os.path.isfile(path):
+ p = path[len(base_dir) + 1:]
+ if not dry_run:
+ z.write(path, p)
log.debug("adding '%s'", p)
-
- compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
- if not dry_run:
- z = zipfile.ZipFile(zip_filename, mode, compression=compression)
+
+ compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
+ if not dry_run:
+ z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in sorted_walk(base_dir):
- visit(z, dirname, files)
- z.close()
- else:
+ visit(z, dirname, files)
+ z.close()
+ else:
for dirname, dirs, files in sorted_walk(base_dir):
- visit(None, dirname, files)
- return zip_filename
+ visit(None, dirname, files)
+ return zip_filename
diff --git a/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py b/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py
index 1957977d3a..98bf5dea84 100644
--- a/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py
+++ b/contrib/python/setuptools/py3/setuptools/command/bdist_rpm.py
@@ -1,40 +1,40 @@
-import distutils.command.bdist_rpm as orig
+import distutils.command.bdist_rpm as orig
import warnings
-
+
from setuptools import SetuptoolsDeprecationWarning
-
-
-class bdist_rpm(orig.bdist_rpm):
- """
- Override the default bdist_rpm behavior to do the following:
-
- 1. Run egg_info to ensure the name and version are properly calculated.
- 2. Always run 'install' using --single-version-externally-managed to
- disable eggs in RPM distributions.
- """
-
- def run(self):
+
+
+class bdist_rpm(orig.bdist_rpm):
+ """
+ Override the default bdist_rpm behavior to do the following:
+
+ 1. Run egg_info to ensure the name and version are properly calculated.
+ 2. Always run 'install' using --single-version-externally-managed to
+ disable eggs in RPM distributions.
+ """
+
+ def run(self):
warnings.warn(
"bdist_rpm is deprecated and will be removed in a future "
"version. Use bdist_wheel (wheel packages) instead.",
SetuptoolsDeprecationWarning,
)
- # ensure distro name is up-to-date
- self.run_command('egg_info')
-
- orig.bdist_rpm.run(self)
-
- def _make_spec_file(self):
- spec = orig.bdist_rpm._make_spec_file(self)
- spec = [
- line.replace(
- "setup.py install ",
- "setup.py install --single-version-externally-managed "
- ).replace(
- "%setup",
- "%setup -n %{name}-%{unmangled_version}"
+ # ensure distro name is up-to-date
+ self.run_command('egg_info')
+
+ orig.bdist_rpm.run(self)
+
+ def _make_spec_file(self):
+ spec = orig.bdist_rpm._make_spec_file(self)
+ spec = [
+ line.replace(
+ "setup.py install ",
+ "setup.py install --single-version-externally-managed "
+ ).replace(
+ "%setup",
+ "%setup -n %{name}-%{unmangled_version}"
)
- for line in spec
- ]
- return spec
+ for line in spec
+ ]
+ return spec
diff --git a/contrib/python/setuptools/py3/setuptools/command/build_ext.py b/contrib/python/setuptools/py3/setuptools/command/build_ext.py
index a3a0468c42..c59eff8bbf 100644
--- a/contrib/python/setuptools/py3/setuptools/command/build_ext.py
+++ b/contrib/python/setuptools/py3/setuptools/command/build_ext.py
@@ -2,29 +2,29 @@ import os
import sys
import itertools
from importlib.machinery import EXTENSION_SUFFIXES
-from distutils.command.build_ext import build_ext as _du_build_ext
-from distutils.file_util import copy_file
-from distutils.ccompiler import new_compiler
+from distutils.command.build_ext import build_ext as _du_build_ext
+from distutils.file_util import copy_file
+from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
-from distutils.errors import DistutilsError
-from distutils import log
-
-from setuptools.extension import Library
-
-try:
- # Attempt to use Cython for building extensions, if available
- from Cython.Distutils.build_ext import build_ext as _build_ext
+from distutils.errors import DistutilsError
+from distutils import log
+
+from setuptools.extension import Library
+
+try:
+ # Attempt to use Cython for building extensions, if available
+ from Cython.Distutils.build_ext import build_ext as _build_ext
# Additionally, assert that the compiler module will load
# also. Ref #1229.
__import__('Cython.Compiler.Main')
-except ImportError:
- _build_ext = _du_build_ext
-
+except ImportError:
+ _build_ext = _du_build_ext
+
# make sure _config_vars is initialized
get_config_var("LDSHARED")
from distutils.sysconfig import _config_vars as _CONFIG_VARS # noqa
-
-
+
+
def _customize_compiler_for_shlib(compiler):
if sys.platform == "darwin":
# building .dylib requires additional compiler flags on OSX; here we
@@ -45,20 +45,20 @@ def _customize_compiler_for_shlib(compiler):
customize_compiler(compiler)
-have_rtld = False
-use_stubs = False
-libtype = 'shared'
-
-if sys.platform == "darwin":
- use_stubs = True
-elif os.name != 'nt':
- try:
- import dl
- use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
- except ImportError:
- pass
-
-
+have_rtld = False
+use_stubs = False
+libtype = 'shared'
+
+if sys.platform == "darwin":
+ use_stubs = True
+elif os.name != 'nt':
+ try:
+ import dl
+ use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
+ except ImportError:
+ pass
+
+
def if_dl(s):
return s if have_rtld else ''
@@ -72,38 +72,38 @@ def get_abi3_suffix():
return suffix
-class build_ext(_build_ext):
- def run(self):
- """Build extensions in build directory, then copy if --inplace"""
- old_inplace, self.inplace = self.inplace, 0
- _build_ext.run(self)
- self.inplace = old_inplace
- if old_inplace:
- self.copy_extensions_to_source()
-
- def copy_extensions_to_source(self):
- build_py = self.get_finalized_command('build_py')
- for ext in self.extensions:
- fullname = self.get_ext_fullname(ext.name)
- filename = self.get_ext_filename(fullname)
- modpath = fullname.split('.')
- package = '.'.join(modpath[:-1])
- package_dir = build_py.get_package_dir(package)
- dest_filename = os.path.join(package_dir,
- os.path.basename(filename))
- src_filename = os.path.join(self.build_lib, filename)
-
- # Always copy, even if source is older than destination, to ensure
- # that the right extensions for the current Python/platform are
- # used.
- copy_file(
- src_filename, dest_filename, verbose=self.verbose,
- dry_run=self.dry_run
- )
- if ext._needs_stub:
- self.write_stub(package_dir or os.curdir, ext, True)
-
- def get_ext_filename(self, fullname):
+class build_ext(_build_ext):
+ def run(self):
+ """Build extensions in build directory, then copy if --inplace"""
+ old_inplace, self.inplace = self.inplace, 0
+ _build_ext.run(self)
+ self.inplace = old_inplace
+ if old_inplace:
+ self.copy_extensions_to_source()
+
+ def copy_extensions_to_source(self):
+ build_py = self.get_finalized_command('build_py')
+ for ext in self.extensions:
+ fullname = self.get_ext_fullname(ext.name)
+ filename = self.get_ext_filename(fullname)
+ modpath = fullname.split('.')
+ package = '.'.join(modpath[:-1])
+ package_dir = build_py.get_package_dir(package)
+ dest_filename = os.path.join(package_dir,
+ os.path.basename(filename))
+ src_filename = os.path.join(self.build_lib, filename)
+
+ # Always copy, even if source is older than destination, to ensure
+ # that the right extensions for the current Python/platform are
+ # used.
+ copy_file(
+ src_filename, dest_filename, verbose=self.verbose,
+ dry_run=self.dry_run
+ )
+ if ext._needs_stub:
+ self.write_stub(package_dir or os.curdir, ext, True)
+
+ def get_ext_filename(self, fullname):
so_ext = os.getenv('SETUPTOOLS_EXT_SUFFIX')
if so_ext:
filename = os.path.join(*fullname.split('.')) + so_ext
@@ -111,218 +111,218 @@ class build_ext(_build_ext):
filename = _build_ext.get_ext_filename(self, fullname)
so_ext = get_config_var('EXT_SUFFIX')
- if fullname in self.ext_map:
- ext = self.ext_map[fullname]
+ if fullname in self.ext_map:
+ ext = self.ext_map[fullname]
use_abi3 = getattr(ext, 'py_limited_api') and get_abi3_suffix()
if use_abi3:
filename = filename[:-len(so_ext)]
so_ext = get_abi3_suffix()
filename = filename + so_ext
- if isinstance(ext, Library):
- fn, ext = os.path.splitext(filename)
- return self.shlib_compiler.library_filename(fn, libtype)
- elif use_stubs and ext._links_to_dynamic:
- d, fn = os.path.split(filename)
- return os.path.join(d, 'dl-' + fn)
- return filename
-
- def initialize_options(self):
- _build_ext.initialize_options(self)
- self.shlib_compiler = None
- self.shlibs = []
- self.ext_map = {}
-
- def finalize_options(self):
- _build_ext.finalize_options(self)
- self.extensions = self.extensions or []
- self.check_extensions_list(self.extensions)
- self.shlibs = [ext for ext in self.extensions
- if isinstance(ext, Library)]
- if self.shlibs:
- self.setup_shlib_compiler()
- for ext in self.extensions:
- ext._full_name = self.get_ext_fullname(ext.name)
- for ext in self.extensions:
- fullname = ext._full_name
- self.ext_map[fullname] = ext
-
- # distutils 3.1 will also ask for module names
- # XXX what to do with conflicts?
- self.ext_map[fullname.split('.')[-1]] = ext
-
- ltd = self.shlibs and self.links_to_dynamic(ext) or False
- ns = ltd and use_stubs and not isinstance(ext, Library)
- ext._links_to_dynamic = ltd
- ext._needs_stub = ns
- filename = ext._file_name = self.get_ext_filename(fullname)
- libdir = os.path.dirname(os.path.join(self.build_lib, filename))
- if ltd and libdir not in ext.library_dirs:
- ext.library_dirs.append(libdir)
- if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
- ext.runtime_library_dirs.append(os.curdir)
-
- def setup_shlib_compiler(self):
- compiler = self.shlib_compiler = new_compiler(
- compiler=self.compiler, dry_run=self.dry_run, force=self.force
- )
+ if isinstance(ext, Library):
+ fn, ext = os.path.splitext(filename)
+ return self.shlib_compiler.library_filename(fn, libtype)
+ elif use_stubs and ext._links_to_dynamic:
+ d, fn = os.path.split(filename)
+ return os.path.join(d, 'dl-' + fn)
+ return filename
+
+ def initialize_options(self):
+ _build_ext.initialize_options(self)
+ self.shlib_compiler = None
+ self.shlibs = []
+ self.ext_map = {}
+
+ def finalize_options(self):
+ _build_ext.finalize_options(self)
+ self.extensions = self.extensions or []
+ self.check_extensions_list(self.extensions)
+ self.shlibs = [ext for ext in self.extensions
+ if isinstance(ext, Library)]
+ if self.shlibs:
+ self.setup_shlib_compiler()
+ for ext in self.extensions:
+ ext._full_name = self.get_ext_fullname(ext.name)
+ for ext in self.extensions:
+ fullname = ext._full_name
+ self.ext_map[fullname] = ext
+
+ # distutils 3.1 will also ask for module names
+ # XXX what to do with conflicts?
+ self.ext_map[fullname.split('.')[-1]] = ext
+
+ ltd = self.shlibs and self.links_to_dynamic(ext) or False
+ ns = ltd and use_stubs and not isinstance(ext, Library)
+ ext._links_to_dynamic = ltd
+ ext._needs_stub = ns
+ filename = ext._file_name = self.get_ext_filename(fullname)
+ libdir = os.path.dirname(os.path.join(self.build_lib, filename))
+ if ltd and libdir not in ext.library_dirs:
+ ext.library_dirs.append(libdir)
+ if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
+ ext.runtime_library_dirs.append(os.curdir)
+
+ def setup_shlib_compiler(self):
+ compiler = self.shlib_compiler = new_compiler(
+ compiler=self.compiler, dry_run=self.dry_run, force=self.force
+ )
_customize_compiler_for_shlib(compiler)
-
- if self.include_dirs is not None:
- compiler.set_include_dirs(self.include_dirs)
- if self.define is not None:
- # 'define' option is a list of (name,value) tuples
- for (name, value) in self.define:
- compiler.define_macro(name, value)
- if self.undef is not None:
- for macro in self.undef:
- compiler.undefine_macro(macro)
- if self.libraries is not None:
- compiler.set_libraries(self.libraries)
- if self.library_dirs is not None:
- compiler.set_library_dirs(self.library_dirs)
- if self.rpath is not None:
- compiler.set_runtime_library_dirs(self.rpath)
- if self.link_objects is not None:
- compiler.set_link_objects(self.link_objects)
-
- # hack so distutils' build_extension() builds a library instead
- compiler.link_shared_object = link_shared_object.__get__(compiler)
-
- def get_export_symbols(self, ext):
- if isinstance(ext, Library):
- return ext.export_symbols
- return _build_ext.get_export_symbols(self, ext)
-
- def build_extension(self, ext):
- ext._convert_pyx_sources_to_lang()
- _compiler = self.compiler
- try:
- if isinstance(ext, Library):
- self.compiler = self.shlib_compiler
- _build_ext.build_extension(self, ext)
- if ext._needs_stub:
- cmd = self.get_finalized_command('build_py').build_lib
- self.write_stub(cmd, ext)
- finally:
- self.compiler = _compiler
-
- def links_to_dynamic(self, ext):
- """Return true if 'ext' links to a dynamic lib in the same package"""
- # XXX this should check to ensure the lib is actually being built
- # XXX as dynamic, and not just using a locally-found version or a
- # XXX static-compiled version
- libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
- pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
- return any(pkg + libname in libnames for libname in ext.libraries)
-
- def get_outputs(self):
- return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
-
- def __get_stubs_outputs(self):
- # assemble the base name for each extension that needs a stub
- ns_ext_bases = (
- os.path.join(self.build_lib, *ext._full_name.split('.'))
- for ext in self.extensions
- if ext._needs_stub
- )
- # pair each base with the extension
- pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
- return list(base + fnext for base, fnext in pairs)
-
- def __get_output_extensions(self):
- yield '.py'
- yield '.pyc'
- if self.get_finalized_command('build_py').optimize:
- yield '.pyo'
-
- def write_stub(self, output_dir, ext, compile=False):
- log.info("writing stub loader for %s to %s", ext._full_name,
- output_dir)
- stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
- '.py')
- if compile and os.path.exists(stub_file):
- raise DistutilsError(stub_file + " already exists! Please delete.")
- if not self.dry_run:
- f = open(stub_file, 'w')
- f.write(
- '\n'.join([
- "def __bootstrap__():",
- " global __bootstrap__, __file__, __loader__",
+
+ if self.include_dirs is not None:
+ compiler.set_include_dirs(self.include_dirs)
+ if self.define is not None:
+ # 'define' option is a list of (name,value) tuples
+ for (name, value) in self.define:
+ compiler.define_macro(name, value)
+ if self.undef is not None:
+ for macro in self.undef:
+ compiler.undefine_macro(macro)
+ if self.libraries is not None:
+ compiler.set_libraries(self.libraries)
+ if self.library_dirs is not None:
+ compiler.set_library_dirs(self.library_dirs)
+ if self.rpath is not None:
+ compiler.set_runtime_library_dirs(self.rpath)
+ if self.link_objects is not None:
+ compiler.set_link_objects(self.link_objects)
+
+ # hack so distutils' build_extension() builds a library instead
+ compiler.link_shared_object = link_shared_object.__get__(compiler)
+
+ def get_export_symbols(self, ext):
+ if isinstance(ext, Library):
+ return ext.export_symbols
+ return _build_ext.get_export_symbols(self, ext)
+
+ def build_extension(self, ext):
+ ext._convert_pyx_sources_to_lang()
+ _compiler = self.compiler
+ try:
+ if isinstance(ext, Library):
+ self.compiler = self.shlib_compiler
+ _build_ext.build_extension(self, ext)
+ if ext._needs_stub:
+ cmd = self.get_finalized_command('build_py').build_lib
+ self.write_stub(cmd, ext)
+ finally:
+ self.compiler = _compiler
+
+ def links_to_dynamic(self, ext):
+ """Return true if 'ext' links to a dynamic lib in the same package"""
+ # XXX this should check to ensure the lib is actually being built
+ # XXX as dynamic, and not just using a locally-found version or a
+ # XXX static-compiled version
+ libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
+ pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
+ return any(pkg + libname in libnames for libname in ext.libraries)
+
+ def get_outputs(self):
+ return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
+
+ def __get_stubs_outputs(self):
+ # assemble the base name for each extension that needs a stub
+ ns_ext_bases = (
+ os.path.join(self.build_lib, *ext._full_name.split('.'))
+ for ext in self.extensions
+ if ext._needs_stub
+ )
+ # pair each base with the extension
+ pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
+ return list(base + fnext for base, fnext in pairs)
+
+ def __get_output_extensions(self):
+ yield '.py'
+ yield '.pyc'
+ if self.get_finalized_command('build_py').optimize:
+ yield '.pyo'
+
+ def write_stub(self, output_dir, ext, compile=False):
+ log.info("writing stub loader for %s to %s", ext._full_name,
+ output_dir)
+ stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
+ '.py')
+ if compile and os.path.exists(stub_file):
+ raise DistutilsError(stub_file + " already exists! Please delete.")
+ if not self.dry_run:
+ f = open(stub_file, 'w')
+ f.write(
+ '\n'.join([
+ "def __bootstrap__():",
+ " global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, importlib.util" +
if_dl(", dl"),
- " __file__ = pkg_resources.resource_filename"
- "(__name__,%r)"
- % os.path.basename(ext._file_name),
- " del __bootstrap__",
- " if '__loader__' in globals():",
- " del __loader__",
- if_dl(" old_flags = sys.getdlopenflags()"),
- " old_dir = os.getcwd()",
- " try:",
- " os.chdir(os.path.dirname(__file__))",
- if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
+ " __file__ = pkg_resources.resource_filename"
+ "(__name__,%r)"
+ % os.path.basename(ext._file_name),
+ " del __bootstrap__",
+ " if '__loader__' in globals():",
+ " del __loader__",
+ if_dl(" old_flags = sys.getdlopenflags()"),
+ " old_dir = os.getcwd()",
+ " try:",
+ " os.chdir(os.path.dirname(__file__))",
+ if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" spec = importlib.util.spec_from_file_location(",
" __name__, __file__)",
" mod = importlib.util.module_from_spec(spec)",
" spec.loader.exec_module(mod)",
- " finally:",
- if_dl(" sys.setdlopenflags(old_flags)"),
- " os.chdir(old_dir)",
- "__bootstrap__()",
- "" # terminal \n
- ])
- )
- f.close()
- if compile:
- from distutils.util import byte_compile
-
- byte_compile([stub_file], optimize=0,
- force=True, dry_run=self.dry_run)
- optimize = self.get_finalized_command('install_lib').optimize
- if optimize > 0:
- byte_compile([stub_file], optimize=optimize,
- force=True, dry_run=self.dry_run)
- if os.path.exists(stub_file) and not self.dry_run:
- os.unlink(stub_file)
-
-
-if use_stubs or os.name == 'nt':
- # Build shared libraries
- #
- def link_shared_object(
- self, objects, output_libname, output_dir=None, libraries=None,
- library_dirs=None, runtime_library_dirs=None, export_symbols=None,
- debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
- target_lang=None):
- self.link(
- self.SHARED_LIBRARY, objects, output_libname,
- output_dir, libraries, library_dirs, runtime_library_dirs,
- export_symbols, debug, extra_preargs, extra_postargs,
- build_temp, target_lang
- )
-else:
- # Build static libraries everywhere else
- libtype = 'static'
-
- def link_shared_object(
- self, objects, output_libname, output_dir=None, libraries=None,
- library_dirs=None, runtime_library_dirs=None, export_symbols=None,
- debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
- target_lang=None):
- # XXX we need to either disallow these attrs on Library instances,
- # or warn/abort here if set, or something...
- # libraries=None, library_dirs=None, runtime_library_dirs=None,
- # export_symbols=None, extra_preargs=None, extra_postargs=None,
- # build_temp=None
-
- assert output_dir is None # distutils build_ext doesn't pass this
- output_dir, filename = os.path.split(output_libname)
- basename, ext = os.path.splitext(filename)
- if self.library_filename("x").startswith('lib'):
- # strip 'lib' prefix; this is kludgy if some platform uses
- # a different prefix
- basename = basename[3:]
-
- self.create_static_lib(
- objects, basename, output_dir, debug, target_lang
- )
+ " finally:",
+ if_dl(" sys.setdlopenflags(old_flags)"),
+ " os.chdir(old_dir)",
+ "__bootstrap__()",
+ "" # terminal \n
+ ])
+ )
+ f.close()
+ if compile:
+ from distutils.util import byte_compile
+
+ byte_compile([stub_file], optimize=0,
+ force=True, dry_run=self.dry_run)
+ optimize = self.get_finalized_command('install_lib').optimize
+ if optimize > 0:
+ byte_compile([stub_file], optimize=optimize,
+ force=True, dry_run=self.dry_run)
+ if os.path.exists(stub_file) and not self.dry_run:
+ os.unlink(stub_file)
+
+
+if use_stubs or os.name == 'nt':
+ # Build shared libraries
+ #
+ def link_shared_object(
+ self, objects, output_libname, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None, export_symbols=None,
+ debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
+ target_lang=None):
+ self.link(
+ self.SHARED_LIBRARY, objects, output_libname,
+ output_dir, libraries, library_dirs, runtime_library_dirs,
+ export_symbols, debug, extra_preargs, extra_postargs,
+ build_temp, target_lang
+ )
+else:
+ # Build static libraries everywhere else
+ libtype = 'static'
+
+ def link_shared_object(
+ self, objects, output_libname, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None, export_symbols=None,
+ debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
+ target_lang=None):
+ # XXX we need to either disallow these attrs on Library instances,
+ # or warn/abort here if set, or something...
+ # libraries=None, library_dirs=None, runtime_library_dirs=None,
+ # export_symbols=None, extra_preargs=None, extra_postargs=None,
+ # build_temp=None
+
+ assert output_dir is None # distutils build_ext doesn't pass this
+ output_dir, filename = os.path.split(output_libname)
+ basename, ext = os.path.splitext(filename)
+ if self.library_filename("x").startswith('lib'):
+ # strip 'lib' prefix; this is kludgy if some platform uses
+ # a different prefix
+ basename = basename[3:]
+
+ self.create_static_lib(
+ objects, basename, output_dir, debug, target_lang
+ )
diff --git a/contrib/python/setuptools/py3/setuptools/command/build_py.py b/contrib/python/setuptools/py3/setuptools/command/build_py.py
index 1c35af7674..c3fdc0927c 100644
--- a/contrib/python/setuptools/py3/setuptools/command/build_py.py
+++ b/contrib/python/setuptools/py3/setuptools/command/build_py.py
@@ -1,72 +1,72 @@
-from glob import glob
-from distutils.util import convert_path
-import distutils.command.build_py as orig
-import os
-import fnmatch
-import textwrap
-import io
-import distutils.errors
-import itertools
+from glob import glob
+from distutils.util import convert_path
+import distutils.command.build_py as orig
+import os
+import fnmatch
+import textwrap
+import io
+import distutils.errors
+import itertools
import stat
from setuptools.extern.more_itertools import unique_everseen
-
+
def make_writable(target):
os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)
class build_py(orig.build_py):
- """Enhanced 'build_py' command that includes data files with packages
-
- The data files are specified via a 'package_data' argument to 'setup()'.
- See 'setuptools.dist.Distribution' for more details.
-
- Also, this version of the 'build_py' command allows you to specify both
- 'py_modules' and 'packages' in the same setup operation.
- """
-
- def finalize_options(self):
- orig.build_py.finalize_options(self)
- self.package_data = self.distribution.package_data
+ """Enhanced 'build_py' command that includes data files with packages
+
+ The data files are specified via a 'package_data' argument to 'setup()'.
+ See 'setuptools.dist.Distribution' for more details.
+
+ Also, this version of the 'build_py' command allows you to specify both
+ 'py_modules' and 'packages' in the same setup operation.
+ """
+
+ def finalize_options(self):
+ orig.build_py.finalize_options(self)
+ self.package_data = self.distribution.package_data
self.exclude_package_data = self.distribution.exclude_package_data or {}
- if 'data_files' in self.__dict__:
- del self.__dict__['data_files']
- self.__updated_files = []
-
- def run(self):
- """Build modules, packages, and copy data files to build directory"""
- if not self.py_modules and not self.packages:
- return
-
- if self.py_modules:
- self.build_modules()
-
- if self.packages:
- self.build_packages()
- self.build_package_data()
-
- # Only compile actual .py files, using our base class' idea of what our
- # output files are.
- self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
-
- def __getattr__(self, attr):
- "lazily compute data files"
- if attr == 'data_files':
- self.data_files = self._get_data_files()
- return self.data_files
- return orig.build_py.__getattr__(self, attr)
-
- def build_module(self, module, module_file, package):
+ if 'data_files' in self.__dict__:
+ del self.__dict__['data_files']
+ self.__updated_files = []
+
+ def run(self):
+ """Build modules, packages, and copy data files to build directory"""
+ if not self.py_modules and not self.packages:
+ return
+
+ if self.py_modules:
+ self.build_modules()
+
+ if self.packages:
+ self.build_packages()
+ self.build_package_data()
+
+ # Only compile actual .py files, using our base class' idea of what our
+ # output files are.
+ self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
+
+ def __getattr__(self, attr):
+ "lazily compute data files"
+ if attr == 'data_files':
+ self.data_files = self._get_data_files()
+ return self.data_files
+ return orig.build_py.__getattr__(self, attr)
+
+ def build_module(self, module, module_file, package):
outfile, copied = orig.build_py.build_module(self, module, module_file, package)
- if copied:
- self.__updated_files.append(outfile)
- return outfile, copied
-
- def _get_data_files(self):
- """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
- self.analyze_manifest()
- return list(map(self._get_pkg_data_files, self.packages or ()))
-
+ if copied:
+ self.__updated_files.append(outfile)
+ return outfile, copied
+
+ def _get_data_files(self):
+ """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
+ self.analyze_manifest()
+ return list(map(self._get_pkg_data_files, self.packages or ()))
+
def get_data_files_without_manifest(self):
"""
Generate list of ``(package,src_dir,build_dir,filenames)`` tuples,
@@ -77,22 +77,22 @@ class build_py(orig.build_py):
self.__dict__.setdefault('manifest_files', {})
return list(map(self._get_pkg_data_files, self.packages or ()))
- def _get_pkg_data_files(self, package):
- # Locate package source directory
- src_dir = self.get_package_dir(package)
-
- # Compute package build directory
- build_dir = os.path.join(*([self.build_lib] + package.split('.')))
-
- # Strip directory from globbed filenames
- filenames = [
- os.path.relpath(file, src_dir)
- for file in self.find_data_files(package, src_dir)
- ]
- return package, src_dir, build_dir, filenames
-
- def find_data_files(self, package, src_dir):
- """Return filenames for package's data files in 'src_dir'"""
+ def _get_pkg_data_files(self, package):
+ # Locate package source directory
+ src_dir = self.get_package_dir(package)
+
+ # Compute package build directory
+ build_dir = os.path.join(*([self.build_lib] + package.split('.')))
+
+ # Strip directory from globbed filenames
+ filenames = [
+ os.path.relpath(file, src_dir)
+ for file in self.find_data_files(package, src_dir)
+ ]
+ return package, src_dir, build_dir, filenames
+
+ def find_data_files(self, package, src_dir):
+ """Return filenames for package's data files in 'src_dir'"""
patterns = self._get_platform_patterns(
self.package_data,
package,
@@ -106,94 +106,94 @@ class build_py(orig.build_py):
self.manifest_files.get(package, []),
glob_files,
)
- return self.exclude_data_files(package, src_dir, files)
-
- def build_package_data(self):
- """Copy data files into build directory"""
- for package, src_dir, build_dir, filenames in self.data_files:
- for filename in filenames:
- target = os.path.join(build_dir, filename)
- self.mkpath(os.path.dirname(target))
- srcfile = os.path.join(src_dir, filename)
- outf, copied = self.copy_file(srcfile, target)
+ return self.exclude_data_files(package, src_dir, files)
+
+ def build_package_data(self):
+ """Copy data files into build directory"""
+ for package, src_dir, build_dir, filenames in self.data_files:
+ for filename in filenames:
+ target = os.path.join(build_dir, filename)
+ self.mkpath(os.path.dirname(target))
+ srcfile = os.path.join(src_dir, filename)
+ outf, copied = self.copy_file(srcfile, target)
make_writable(target)
- srcfile = os.path.abspath(srcfile)
-
- def analyze_manifest(self):
- self.manifest_files = mf = {}
- if not self.distribution.include_package_data:
- return
- src_dirs = {}
- for package in self.packages or ():
- # Locate package source directory
- src_dirs[assert_relative(self.get_package_dir(package))] = package
-
- self.run_command('egg_info')
- ei_cmd = self.get_finalized_command('egg_info')
- for path in ei_cmd.filelist.files:
- d, f = os.path.split(assert_relative(path))
- prev = None
- oldf = f
- while d and d != prev and d not in src_dirs:
- prev = d
- d, df = os.path.split(d)
- f = os.path.join(df, f)
- if d in src_dirs:
- if path.endswith('.py') and f == oldf:
- continue # it's a module, not data
- mf.setdefault(src_dirs[d], []).append(path)
-
- def get_data_files(self):
- pass # Lazily compute data files in _get_data_files() function.
-
- def check_package(self, package, package_dir):
- """Check namespace packages' __init__ for declare_namespace"""
- try:
- return self.packages_checked[package]
- except KeyError:
- pass
-
- init_py = orig.build_py.check_package(self, package, package_dir)
- self.packages_checked[package] = init_py
-
- if not init_py or not self.distribution.namespace_packages:
- return init_py
-
- for pkg in self.distribution.namespace_packages:
- if pkg == package or pkg.startswith(package + '.'):
- break
- else:
- return init_py
-
- with io.open(init_py, 'rb') as f:
- contents = f.read()
- if b'declare_namespace' not in contents:
- raise distutils.errors.DistutilsError(
- "Namespace package problem: %s is a namespace package, but "
- "its\n__init__.py does not call declare_namespace()! Please "
- 'fix it.\n(See the setuptools manual under '
- '"Namespace Packages" for details.)\n"' % (package,)
- )
- return init_py
-
- def initialize_options(self):
- self.packages_checked = {}
- orig.build_py.initialize_options(self)
-
- def get_package_dir(self, package):
- res = orig.build_py.get_package_dir(self, package)
- if self.distribution.src_root is not None:
- return os.path.join(self.distribution.src_root, res)
- return res
-
- def exclude_data_files(self, package, src_dir, files):
- """Filter filenames for package's data files in 'src_dir'"""
+ srcfile = os.path.abspath(srcfile)
+
+ def analyze_manifest(self):
+ self.manifest_files = mf = {}
+ if not self.distribution.include_package_data:
+ return
+ src_dirs = {}
+ for package in self.packages or ():
+ # Locate package source directory
+ src_dirs[assert_relative(self.get_package_dir(package))] = package
+
+ self.run_command('egg_info')
+ ei_cmd = self.get_finalized_command('egg_info')
+ for path in ei_cmd.filelist.files:
+ d, f = os.path.split(assert_relative(path))
+ prev = None
+ oldf = f
+ while d and d != prev and d not in src_dirs:
+ prev = d
+ d, df = os.path.split(d)
+ f = os.path.join(df, f)
+ if d in src_dirs:
+ if path.endswith('.py') and f == oldf:
+ continue # it's a module, not data
+ mf.setdefault(src_dirs[d], []).append(path)
+
+ def get_data_files(self):
+ pass # Lazily compute data files in _get_data_files() function.
+
+ def check_package(self, package, package_dir):
+ """Check namespace packages' __init__ for declare_namespace"""
+ try:
+ return self.packages_checked[package]
+ except KeyError:
+ pass
+
+ init_py = orig.build_py.check_package(self, package, package_dir)
+ self.packages_checked[package] = init_py
+
+ if not init_py or not self.distribution.namespace_packages:
+ return init_py
+
+ for pkg in self.distribution.namespace_packages:
+ if pkg == package or pkg.startswith(package + '.'):
+ break
+ else:
+ return init_py
+
+ with io.open(init_py, 'rb') as f:
+ contents = f.read()
+ if b'declare_namespace' not in contents:
+ raise distutils.errors.DistutilsError(
+ "Namespace package problem: %s is a namespace package, but "
+ "its\n__init__.py does not call declare_namespace()! Please "
+ 'fix it.\n(See the setuptools manual under '
+ '"Namespace Packages" for details.)\n"' % (package,)
+ )
+ return init_py
+
+ def initialize_options(self):
+ self.packages_checked = {}
+ orig.build_py.initialize_options(self)
+
+ def get_package_dir(self, package):
+ res = orig.build_py.get_package_dir(self, package)
+ if self.distribution.src_root is not None:
+ return os.path.join(self.distribution.src_root, res)
+ return res
+
+ def exclude_data_files(self, package, src_dir, files):
+ """Filter filenames for package's data files in 'src_dir'"""
files = list(files)
patterns = self._get_platform_patterns(
self.exclude_package_data,
package,
src_dir,
- )
+ )
match_groups = (fnmatch.filter(files, pattern) for pattern in patterns)
# flatten the groups of matches into an iterable of matches
matches = itertools.chain.from_iterable(match_groups)
@@ -201,7 +201,7 @@ class build_py(orig.build_py):
keepers = (fn for fn in files if fn not in bad)
# ditch dupes
return list(unique_everseen(keepers))
-
+
@staticmethod
def _get_platform_patterns(spec, package, src_dir):
"""
@@ -219,24 +219,24 @@ class build_py(orig.build_py):
os.path.join(src_dir, convert_path(pattern))
for pattern in raw_patterns
)
-
-def assert_relative(path):
- if not os.path.isabs(path):
- return path
- from distutils.errors import DistutilsSetupError
-
+
+def assert_relative(path):
+ if not os.path.isabs(path):
+ return path
+ from distutils.errors import DistutilsSetupError
+
msg = (
textwrap.dedent(
"""
- Error: setup script specifies an absolute path:
-
- %s
-
- setup() arguments must *always* be /-separated paths relative to the
- setup.py directory, *never* absolute paths.
+ Error: setup script specifies an absolute path:
+
+ %s
+
+ setup() arguments must *always* be /-separated paths relative to the
+ setup.py directory, *never* absolute paths.
"""
).lstrip()
% path
)
- raise DistutilsSetupError(msg)
+ raise DistutilsSetupError(msg)
diff --git a/contrib/python/setuptools/py3/setuptools/command/develop.py b/contrib/python/setuptools/py3/setuptools/command/develop.py
index ec70008518..24fb0a7c81 100644
--- a/contrib/python/setuptools/py3/setuptools/command/develop.py
+++ b/contrib/python/setuptools/py3/setuptools/command/develop.py
@@ -1,83 +1,83 @@
-from distutils.util import convert_path
-from distutils import log
-from distutils.errors import DistutilsError, DistutilsOptionError
-import os
-import glob
-import io
-
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsError, DistutilsOptionError
+import os
+import glob
+import io
+
import pkg_resources
-from setuptools.command.easy_install import easy_install
+from setuptools.command.easy_install import easy_install
from setuptools import namespaces
-import setuptools
-
-
+import setuptools
+
+
class develop(namespaces.DevelopInstaller, easy_install):
- """Set up package for development"""
-
- description = "install package in 'development mode'"
-
- user_options = easy_install.user_options + [
- ("uninstall", "u", "Uninstall this source package"),
- ("egg-path=", None, "Set the path to be used in the .egg-link file"),
- ]
-
- boolean_options = easy_install.boolean_options + ['uninstall']
-
- command_consumes_arguments = False # override base
-
- def run(self):
- if self.uninstall:
- self.multi_version = True
- self.uninstall_link()
+ """Set up package for development"""
+
+ description = "install package in 'development mode'"
+
+ user_options = easy_install.user_options + [
+ ("uninstall", "u", "Uninstall this source package"),
+ ("egg-path=", None, "Set the path to be used in the .egg-link file"),
+ ]
+
+ boolean_options = easy_install.boolean_options + ['uninstall']
+
+ command_consumes_arguments = False # override base
+
+ def run(self):
+ if self.uninstall:
+ self.multi_version = True
+ self.uninstall_link()
self.uninstall_namespaces()
- else:
- self.install_for_development()
- self.warn_deprecated_options()
-
- def initialize_options(self):
- self.uninstall = None
- self.egg_path = None
- easy_install.initialize_options(self)
- self.setup_path = None
- self.always_copy_from = '.' # always copy eggs installed in curdir
-
- def finalize_options(self):
- ei = self.get_finalized_command("egg_info")
- if ei.broken_egg_info:
- template = "Please rename %r to %r before using 'develop'"
- args = ei.egg_info, ei.broken_egg_info
- raise DistutilsError(template % args)
- self.args = [ei.egg_name]
-
- easy_install.finalize_options(self)
- self.expand_basedirs()
- self.expand_dirs()
- # pick up setup-dir .egg files only: no .egg-info
- self.package_index.scan(glob.glob('*.egg'))
-
- egg_link_fn = ei.egg_name + '.egg-link'
- self.egg_link = os.path.join(self.install_dir, egg_link_fn)
- self.egg_base = ei.egg_base
- if self.egg_path is None:
- self.egg_path = os.path.abspath(ei.egg_base)
-
+ else:
+ self.install_for_development()
+ self.warn_deprecated_options()
+
+ def initialize_options(self):
+ self.uninstall = None
+ self.egg_path = None
+ easy_install.initialize_options(self)
+ self.setup_path = None
+ self.always_copy_from = '.' # always copy eggs installed in curdir
+
+ def finalize_options(self):
+ ei = self.get_finalized_command("egg_info")
+ if ei.broken_egg_info:
+ template = "Please rename %r to %r before using 'develop'"
+ args = ei.egg_info, ei.broken_egg_info
+ raise DistutilsError(template % args)
+ self.args = [ei.egg_name]
+
+ easy_install.finalize_options(self)
+ self.expand_basedirs()
+ self.expand_dirs()
+ # pick up setup-dir .egg files only: no .egg-info
+ self.package_index.scan(glob.glob('*.egg'))
+
+ egg_link_fn = ei.egg_name + '.egg-link'
+ self.egg_link = os.path.join(self.install_dir, egg_link_fn)
+ self.egg_base = ei.egg_base
+ if self.egg_path is None:
+ self.egg_path = os.path.abspath(ei.egg_base)
+
target = pkg_resources.normalize_path(self.egg_base)
egg_path = pkg_resources.normalize_path(
os.path.join(self.install_dir, self.egg_path)
)
- if egg_path != target:
- raise DistutilsOptionError(
- "--egg-path must be a relative path from the install"
- " directory to " + target
- )
-
- # Make a distribution for the package's source
+ if egg_path != target:
+ raise DistutilsOptionError(
+ "--egg-path must be a relative path from the install"
+ " directory to " + target
+ )
+
+ # Make a distribution for the package's source
self.dist = pkg_resources.Distribution(
- target,
+ target,
pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)),
project_name=ei.egg_name,
- )
-
+ )
+
self.setup_path = self._resolve_setup_path(
self.egg_base,
self.install_dir,
@@ -98,96 +98,96 @@ class develop(namespaces.DevelopInstaller, easy_install):
os.path.join(install_dir, egg_path, path_to_setup)
)
if resolved != pkg_resources.normalize_path(os.curdir):
- raise DistutilsOptionError(
- "Can't get a consistent path to setup script from"
+ raise DistutilsOptionError(
+ "Can't get a consistent path to setup script from"
" installation directory",
resolved,
pkg_resources.normalize_path(os.curdir),
)
return path_to_setup
-
- def install_for_development(self):
+
+ def install_for_development(self):
self.run_command('egg_info')
-
+
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
-
- if setuptools.bootstrap_install_from:
- self.easy_install(setuptools.bootstrap_install_from)
- setuptools.bootstrap_install_from = None
-
+
+ if setuptools.bootstrap_install_from:
+ self.easy_install(setuptools.bootstrap_install_from)
+ setuptools.bootstrap_install_from = None
+
self.install_namespaces()
- # create an .egg-link in the installation dir, pointing to our egg
- log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
- if not self.dry_run:
- with open(self.egg_link, "w") as f:
- f.write(self.egg_path + "\n" + self.setup_path)
- # postprocess the installed distro, fixing up .pth, installing scripts,
- # and handling requirements
- self.process_distribution(None, self.dist, not self.no_deps)
-
- def uninstall_link(self):
- if os.path.exists(self.egg_link):
- log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
- egg_link_file = open(self.egg_link)
- contents = [line.rstrip() for line in egg_link_file]
- egg_link_file.close()
+ # create an .egg-link in the installation dir, pointing to our egg
+ log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
+ if not self.dry_run:
+ with open(self.egg_link, "w") as f:
+ f.write(self.egg_path + "\n" + self.setup_path)
+ # postprocess the installed distro, fixing up .pth, installing scripts,
+ # and handling requirements
+ self.process_distribution(None, self.dist, not self.no_deps)
+
+ def uninstall_link(self):
+ if os.path.exists(self.egg_link):
+ log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
+ egg_link_file = open(self.egg_link)
+ contents = [line.rstrip() for line in egg_link_file]
+ egg_link_file.close()
if contents not in ([self.egg_path], [self.egg_path, self.setup_path]):
- log.warn("Link points to %s: uninstall aborted", contents)
- return
- if not self.dry_run:
- os.unlink(self.egg_link)
- if not self.dry_run:
- self.update_pth(self.dist) # remove any .pth link to us
- if self.distribution.scripts:
- # XXX should also check for entry point scripts!
- log.warn("Note: you must uninstall or replace scripts manually!")
-
- def install_egg_scripts(self, dist):
- if dist is not self.dist:
- # Installing a dependency, so fall back to normal behavior
- return easy_install.install_egg_scripts(self, dist)
-
- # create wrapper scripts in the script dir, pointing to dist.scripts
-
- # new-style...
- self.install_wrapper_scripts(dist)
-
- # ...and old-style
- for script_name in self.distribution.scripts or []:
- script_path = os.path.abspath(convert_path(script_name))
- script_name = os.path.basename(script_path)
- with io.open(script_path) as strm:
- script_text = strm.read()
- self.install_script(dist, script_name, script_text, script_path)
-
- def install_wrapper_scripts(self, dist):
- dist = VersionlessRequirement(dist)
- return easy_install.install_wrapper_scripts(self, dist)
-
-
+ log.warn("Link points to %s: uninstall aborted", contents)
+ return
+ if not self.dry_run:
+ os.unlink(self.egg_link)
+ if not self.dry_run:
+ self.update_pth(self.dist) # remove any .pth link to us
+ if self.distribution.scripts:
+ # XXX should also check for entry point scripts!
+ log.warn("Note: you must uninstall or replace scripts manually!")
+
+ def install_egg_scripts(self, dist):
+ if dist is not self.dist:
+ # Installing a dependency, so fall back to normal behavior
+ return easy_install.install_egg_scripts(self, dist)
+
+ # create wrapper scripts in the script dir, pointing to dist.scripts
+
+ # new-style...
+ self.install_wrapper_scripts(dist)
+
+ # ...and old-style
+ for script_name in self.distribution.scripts or []:
+ script_path = os.path.abspath(convert_path(script_name))
+ script_name = os.path.basename(script_path)
+ with io.open(script_path) as strm:
+ script_text = strm.read()
+ self.install_script(dist, script_name, script_text, script_path)
+
+ def install_wrapper_scripts(self, dist):
+ dist = VersionlessRequirement(dist)
+ return easy_install.install_wrapper_scripts(self, dist)
+
+
class VersionlessRequirement:
- """
- Adapt a pkg_resources.Distribution to simply return the project
- name as the 'requirement' so that scripts will work across
- multiple versions.
-
+ """
+ Adapt a pkg_resources.Distribution to simply return the project
+ name as the 'requirement' so that scripts will work across
+ multiple versions.
+
>>> from pkg_resources import Distribution
- >>> dist = Distribution(project_name='foo', version='1.0')
- >>> str(dist.as_requirement())
- 'foo==1.0'
- >>> adapted_dist = VersionlessRequirement(dist)
- >>> str(adapted_dist.as_requirement())
- 'foo'
- """
-
- def __init__(self, dist):
- self.__dist = dist
-
- def __getattr__(self, name):
- return getattr(self.__dist, name)
-
- def as_requirement(self):
- return self.project_name
+ >>> dist = Distribution(project_name='foo', version='1.0')
+ >>> str(dist.as_requirement())
+ 'foo==1.0'
+ >>> adapted_dist = VersionlessRequirement(dist)
+ >>> str(adapted_dist.as_requirement())
+ 'foo'
+ """
+
+ def __init__(self, dist):
+ self.__dist = dist
+
+ def __getattr__(self, name):
+ return getattr(self.__dist, name)
+
+ def as_requirement(self):
+ return self.project_name
diff --git a/contrib/python/setuptools/py3/setuptools/command/easy_install.py b/contrib/python/setuptools/py3/setuptools/command/easy_install.py
index c8b87493a5..fc848d0d1c 100644
--- a/contrib/python/setuptools/py3/setuptools/command/easy_install.py
+++ b/contrib/python/setuptools/py3/setuptools/command/easy_install.py
@@ -1,100 +1,100 @@
-"""
-Easy Install
-------------
-
-A tool for doing automatic download/extract/build of distutils-based Python
-packages. For detailed documentation, see the accompanying EasyInstall.txt
-file, or visit the `EasyInstall home page`__.
-
+"""
+Easy Install
+------------
+
+A tool for doing automatic download/extract/build of distutils-based Python
+packages. For detailed documentation, see the accompanying EasyInstall.txt
+file, or visit the `EasyInstall home page`__.
+
__ https://setuptools.pypa.io/en/latest/deprecated/easy_install.html
-
-"""
-
-from glob import glob
-from distutils.util import get_platform
-from distutils.util import convert_path, subst_vars
+
+"""
+
+from glob import glob
+from distutils.util import get_platform
+from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
-from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
-from distutils import log, dir_util
-from distutils.command.build_scripts import first_line_re
-from distutils.spawn import find_executable
-import sys
-import os
-import zipimport
-import shutil
-import tempfile
-import zipfile
-import re
-import stat
-import random
-import textwrap
-import warnings
-import site
-import struct
-import contextlib
-import subprocess
-import shlex
-import io
+from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
+from distutils import log, dir_util
+from distutils.command.build_scripts import first_line_re
+from distutils.spawn import find_executable
+import sys
+import os
+import zipimport
+import shutil
+import tempfile
+import zipfile
+import re
+import stat
+import random
+import textwrap
+import warnings
+import site
+import struct
+import contextlib
+import subprocess
+import shlex
+import io
import configparser
-
-
+
+
from sysconfig import get_config_vars, get_path
from setuptools import SetuptoolsDeprecationWarning
-from setuptools import Command
-from setuptools.sandbox import run_setup
-from setuptools.command import setopt
-from setuptools.archive_util import unpack_archive
+from setuptools import Command
+from setuptools.sandbox import run_setup
+from setuptools.command import setopt
+from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
-from setuptools.command import bdist_egg, egg_info
+from setuptools.command import bdist_egg, egg_info
from setuptools.wheel import Wheel
-from pkg_resources import (
- yield_lines, normalize_path, resource_string, ensure_directory,
- get_distribution, find_distributions, Environment, Requirement,
- Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
- VersionConflict, DEVELOP_DIST,
-)
+from pkg_resources import (
+ yield_lines, normalize_path, resource_string, ensure_directory,
+ get_distribution, find_distributions, Environment, Requirement,
+ Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
+ VersionConflict, DEVELOP_DIST,
+)
import pkg_resources
-
-# Turn on PEP440Warnings
-warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
-
-__all__ = [
- 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
+
+# Turn on PEP440Warnings
+warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
+
+__all__ = [
+ 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'get_exe_prefixes',
-]
-
-
-def is_64bit():
- return struct.calcsize("P") == 8
-
-
-def samefile(p1, p2):
+]
+
+
+def is_64bit():
+ return struct.calcsize("P") == 8
+
+
+def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
- both_exist = os.path.exists(p1) and os.path.exists(p2)
- use_samefile = hasattr(os.path, 'samefile') and both_exist
- if use_samefile:
- return os.path.samefile(p1, p2)
- norm_p1 = os.path.normpath(os.path.normcase(p1))
- norm_p2 = os.path.normpath(os.path.normcase(p2))
- return norm_p1 == norm_p2
-
-
+ both_exist = os.path.exists(p1) and os.path.exists(p2)
+ use_samefile = hasattr(os.path, 'samefile') and both_exist
+ if use_samefile:
+ return os.path.samefile(p1, p2)
+ norm_p1 = os.path.normpath(os.path.normcase(p1))
+ norm_p2 = os.path.normpath(os.path.normcase(p2))
+ return norm_p1 == norm_p2
+
+
def _to_bytes(s):
return s.encode('utf8')
-
+
def isascii(s):
try:
s.encode('ascii')
@@ -102,294 +102,294 @@ def isascii(s):
except UnicodeError:
return False
-
+
def _one_liner(text):
return textwrap.dedent(text).strip().replace('\n', '; ')
-
-
-class easy_install(Command):
- """Manage a download/build/install process"""
- description = "Find/get/install Python packages"
- command_consumes_arguments = True
-
- user_options = [
- ('prefix=', None, "installation prefix"),
- ("zip-ok", "z", "install package as a zipfile"),
- ("multi-version", "m", "make apps have to require() a version"),
- ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
- ("install-dir=", "d", "install package to DIR"),
- ("script-dir=", "s", "install scripts to DIR"),
- ("exclude-scripts", "x", "Don't install scripts"),
- ("always-copy", "a", "Copy all needed packages to install dir"),
- ("index-url=", "i", "base URL of Python Package Index"),
- ("find-links=", "f", "additional URL(s) to search for packages"),
- ("build-directory=", "b",
- "download/extract/build in DIR; keep the results"),
- ('optimize=', 'O',
- "also compile with optimization: -O1 for \"python -O\", "
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
- ('record=', None,
- "filename in which to record list of installed files"),
- ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
- ('site-dirs=', 'S', "list of directories where .pth files work"),
- ('editable', 'e', "Install specified packages in editable form"),
- ('no-deps', 'N', "don't install dependencies"),
- ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
- ('local-snapshots-ok', 'l',
- "allow building eggs from local checkouts"),
- ('version', None, "print version information and exit"),
- ('no-find-links', None,
+
+
+class easy_install(Command):
+ """Manage a download/build/install process"""
+ description = "Find/get/install Python packages"
+ command_consumes_arguments = True
+
+ user_options = [
+ ('prefix=', None, "installation prefix"),
+ ("zip-ok", "z", "install package as a zipfile"),
+ ("multi-version", "m", "make apps have to require() a version"),
+ ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
+ ("install-dir=", "d", "install package to DIR"),
+ ("script-dir=", "s", "install scripts to DIR"),
+ ("exclude-scripts", "x", "Don't install scripts"),
+ ("always-copy", "a", "Copy all needed packages to install dir"),
+ ("index-url=", "i", "base URL of Python Package Index"),
+ ("find-links=", "f", "additional URL(s) to search for packages"),
+ ("build-directory=", "b",
+ "download/extract/build in DIR; keep the results"),
+ ('optimize=', 'O',
+ "also compile with optimization: -O1 for \"python -O\", "
+ "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+ ('record=', None,
+ "filename in which to record list of installed files"),
+ ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
+ ('site-dirs=', 'S', "list of directories where .pth files work"),
+ ('editable', 'e', "Install specified packages in editable form"),
+ ('no-deps', 'N', "don't install dependencies"),
+ ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
+ ('local-snapshots-ok', 'l',
+ "allow building eggs from local checkouts"),
+ ('version', None, "print version information and exit"),
+ ('no-find-links', None,
"Don't load find-links defined in packages being installed"),
('user', None, "install in user site-package '%s'" % site.USER_SITE)
- ]
- boolean_options = [
- 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
- 'editable',
+ ]
+ boolean_options = [
+ 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
+ 'editable',
'no-deps', 'local-snapshots-ok', 'version',
'user'
- ]
-
- negative_opt = {'always-unzip': 'zip-ok'}
- create_index = PackageIndex
-
- def initialize_options(self):
+ ]
+
+ negative_opt = {'always-unzip': 'zip-ok'}
+ create_index = PackageIndex
+
+ def initialize_options(self):
warnings.warn(
"easy_install command is deprecated. "
"Use build and pip and other standards-based tools.",
EasyInstallDeprecationWarning,
)
- # the --user option seems to be an opt-in one,
- # so the default should be False.
- self.user = 0
- self.zip_ok = self.local_snapshots_ok = None
- self.install_dir = self.script_dir = self.exclude_scripts = None
- self.index_url = None
- self.find_links = None
- self.build_directory = None
- self.args = None
- self.optimize = self.record = None
- self.upgrade = self.always_copy = self.multi_version = None
- self.editable = self.no_deps = self.allow_hosts = None
- self.root = self.prefix = self.no_report = None
- self.version = None
- self.install_purelib = None # for pure module distributions
- self.install_platlib = None # non-pure (dists w/ extensions)
- self.install_headers = None # for C/C++ headers
- self.install_lib = None # set to either purelib or platlib
- self.install_scripts = None
- self.install_data = None
- self.install_base = None
- self.install_platbase = None
- if site.ENABLE_USER_SITE:
- self.install_userbase = site.USER_BASE
- self.install_usersite = site.USER_SITE
- else:
- self.install_userbase = None
- self.install_usersite = None
- self.no_find_links = None
-
- # Options not specifiable via command line
- self.package_index = None
- self.pth_file = self.always_copy_from = None
- self.site_dirs = None
- self.installed_projects = {}
- # Always read easy_install options, even if we are subclassed, or have
- # an independent instance created. This ensures that defaults will
- # always come from the standard configuration file(s)' "easy_install"
- # section, even if this is a "develop" or "install" command, or some
- # other embedding.
- self._dry_run = None
- self.verbose = self.distribution.verbose
- self.distribution._set_command_options(
- self, self.distribution.get_option_dict('easy_install')
- )
-
- def delete_blockers(self, blockers):
- extant_blockers = (
- filename for filename in blockers
- if os.path.exists(filename) or os.path.islink(filename)
- )
- list(map(self._delete_path, extant_blockers))
-
- def _delete_path(self, path):
- log.info("Deleting %s", path)
- if self.dry_run:
- return
-
- is_tree = os.path.isdir(path) and not os.path.islink(path)
- remover = rmtree if is_tree else os.unlink
- remover(path)
-
- @staticmethod
- def _render_version():
- """
- Render the Setuptools version and installation details, then exit.
- """
+ # the --user option seems to be an opt-in one,
+ # so the default should be False.
+ self.user = 0
+ self.zip_ok = self.local_snapshots_ok = None
+ self.install_dir = self.script_dir = self.exclude_scripts = None
+ self.index_url = None
+ self.find_links = None
+ self.build_directory = None
+ self.args = None
+ self.optimize = self.record = None
+ self.upgrade = self.always_copy = self.multi_version = None
+ self.editable = self.no_deps = self.allow_hosts = None
+ self.root = self.prefix = self.no_report = None
+ self.version = None
+ self.install_purelib = None # for pure module distributions
+ self.install_platlib = None # non-pure (dists w/ extensions)
+ self.install_headers = None # for C/C++ headers
+ self.install_lib = None # set to either purelib or platlib
+ self.install_scripts = None
+ self.install_data = None
+ self.install_base = None
+ self.install_platbase = None
+ if site.ENABLE_USER_SITE:
+ self.install_userbase = site.USER_BASE
+ self.install_usersite = site.USER_SITE
+ else:
+ self.install_userbase = None
+ self.install_usersite = None
+ self.no_find_links = None
+
+ # Options not specifiable via command line
+ self.package_index = None
+ self.pth_file = self.always_copy_from = None
+ self.site_dirs = None
+ self.installed_projects = {}
+ # Always read easy_install options, even if we are subclassed, or have
+ # an independent instance created. This ensures that defaults will
+ # always come from the standard configuration file(s)' "easy_install"
+ # section, even if this is a "develop" or "install" command, or some
+ # other embedding.
+ self._dry_run = None
+ self.verbose = self.distribution.verbose
+ self.distribution._set_command_options(
+ self, self.distribution.get_option_dict('easy_install')
+ )
+
+ def delete_blockers(self, blockers):
+ extant_blockers = (
+ filename for filename in blockers
+ if os.path.exists(filename) or os.path.islink(filename)
+ )
+ list(map(self._delete_path, extant_blockers))
+
+ def _delete_path(self, path):
+ log.info("Deleting %s", path)
+ if self.dry_run:
+ return
+
+ is_tree = os.path.isdir(path) and not os.path.islink(path)
+ remover = rmtree if is_tree else os.unlink
+ remover(path)
+
+ @staticmethod
+ def _render_version():
+ """
+ Render the Setuptools version and installation details, then exit.
+ """
ver = '{}.{}'.format(*sys.version_info)
- dist = get_distribution('setuptools')
- tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
- print(tmpl.format(**locals()))
- raise SystemExit()
-
+ dist = get_distribution('setuptools')
+ tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
+ print(tmpl.format(**locals()))
+ raise SystemExit()
+
def finalize_options(self): # noqa: C901 # is too complex (25) # FIXME
- self.version and self._render_version()
-
- py_version = sys.version.split()[0]
- prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
-
- self.config_vars = {
- 'dist_name': self.distribution.get_name(),
- 'dist_version': self.distribution.get_version(),
- 'dist_fullname': self.distribution.get_fullname(),
- 'py_version': py_version,
- 'py_version_short': py_version[0:3],
- 'py_version_nodot': py_version[0] + py_version[2],
- 'sys_prefix': prefix,
- 'prefix': prefix,
- 'sys_exec_prefix': exec_prefix,
- 'exec_prefix': exec_prefix,
- # Only python 3.2+ has abiflags
- 'abiflags': getattr(sys, 'abiflags', ''),
- }
-
- if site.ENABLE_USER_SITE:
- self.config_vars['userbase'] = self.install_userbase
- self.config_vars['usersite'] = self.install_usersite
-
+ self.version and self._render_version()
+
+ py_version = sys.version.split()[0]
+ prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
+
+ self.config_vars = {
+ 'dist_name': self.distribution.get_name(),
+ 'dist_version': self.distribution.get_version(),
+ 'dist_fullname': self.distribution.get_fullname(),
+ 'py_version': py_version,
+ 'py_version_short': py_version[0:3],
+ 'py_version_nodot': py_version[0] + py_version[2],
+ 'sys_prefix': prefix,
+ 'prefix': prefix,
+ 'sys_exec_prefix': exec_prefix,
+ 'exec_prefix': exec_prefix,
+ # Only python 3.2+ has abiflags
+ 'abiflags': getattr(sys, 'abiflags', ''),
+ }
+
+ if site.ENABLE_USER_SITE:
+ self.config_vars['userbase'] = self.install_userbase
+ self.config_vars['usersite'] = self.install_usersite
+
elif self.user:
log.warn("WARNING: The user site-packages directory is disabled.")
- self._fix_install_dir_for_user_site()
-
- self.expand_basedirs()
- self.expand_dirs()
-
+ self._fix_install_dir_for_user_site()
+
+ self.expand_basedirs()
+ self.expand_dirs()
+
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
- # If a non-default installation directory was specified, default the
- # script directory to match it.
- if self.script_dir is None:
- self.script_dir = self.install_dir
-
- if self.no_find_links is None:
- self.no_find_links = False
-
- # Let install_dir get set by install_lib command, which in turn
- # gets its info from the install command, and takes into account
- # --prefix and --home and all that other crud.
- self.set_undefined_options(
- 'install_lib', ('install_dir', 'install_dir')
- )
- # Likewise, set default script_dir from 'install_scripts.install_dir'
- self.set_undefined_options(
- 'install_scripts', ('install_dir', 'script_dir')
- )
-
- if self.user and self.install_purelib:
- self.install_dir = self.install_purelib
- self.script_dir = self.install_scripts
- # default --record from the install command
- self.set_undefined_options('install', ('record', 'record'))
- # Should this be moved to the if statement below? It's not used
- # elsewhere
- normpath = map(normalize_path, sys.path)
- self.all_site_dirs = get_site_dirs()
- if self.site_dirs is not None:
- site_dirs = [
- os.path.expanduser(s.strip()) for s in
- self.site_dirs.split(',')
- ]
- for d in site_dirs:
- if not os.path.isdir(d):
- log.warn("%s (in --site-dirs) does not exist", d)
- elif normalize_path(d) not in normpath:
- raise DistutilsOptionError(
- d + " (in --site-dirs) is not on sys.path"
- )
- else:
- self.all_site_dirs.append(normalize_path(d))
- if not self.editable:
- self.check_site_dir()
+ # If a non-default installation directory was specified, default the
+ # script directory to match it.
+ if self.script_dir is None:
+ self.script_dir = self.install_dir
+
+ if self.no_find_links is None:
+ self.no_find_links = False
+
+ # Let install_dir get set by install_lib command, which in turn
+ # gets its info from the install command, and takes into account
+ # --prefix and --home and all that other crud.
+ self.set_undefined_options(
+ 'install_lib', ('install_dir', 'install_dir')
+ )
+ # Likewise, set default script_dir from 'install_scripts.install_dir'
+ self.set_undefined_options(
+ 'install_scripts', ('install_dir', 'script_dir')
+ )
+
+ if self.user and self.install_purelib:
+ self.install_dir = self.install_purelib
+ self.script_dir = self.install_scripts
+ # default --record from the install command
+ self.set_undefined_options('install', ('record', 'record'))
+ # Should this be moved to the if statement below? It's not used
+ # elsewhere
+ normpath = map(normalize_path, sys.path)
+ self.all_site_dirs = get_site_dirs()
+ if self.site_dirs is not None:
+ site_dirs = [
+ os.path.expanduser(s.strip()) for s in
+ self.site_dirs.split(',')
+ ]
+ for d in site_dirs:
+ if not os.path.isdir(d):
+ log.warn("%s (in --site-dirs) does not exist", d)
+ elif normalize_path(d) not in normpath:
+ raise DistutilsOptionError(
+ d + " (in --site-dirs) is not on sys.path"
+ )
+ else:
+ self.all_site_dirs.append(normalize_path(d))
+ if not self.editable:
+ self.check_site_dir()
self.index_url = self.index_url or "https://pypi.org/simple/"
- self.shadow_path = self.all_site_dirs[:]
- for path_item in self.install_dir, normalize_path(self.script_dir):
- if path_item not in self.shadow_path:
- self.shadow_path.insert(0, path_item)
-
- if self.allow_hosts is not None:
- hosts = [s.strip() for s in self.allow_hosts.split(',')]
- else:
- hosts = ['*']
- if self.package_index is None:
- self.package_index = self.create_index(
- self.index_url, search_path=self.shadow_path, hosts=hosts,
- )
- self.local_index = Environment(self.shadow_path + sys.path)
-
- if self.find_links is not None:
+ self.shadow_path = self.all_site_dirs[:]
+ for path_item in self.install_dir, normalize_path(self.script_dir):
+ if path_item not in self.shadow_path:
+ self.shadow_path.insert(0, path_item)
+
+ if self.allow_hosts is not None:
+ hosts = [s.strip() for s in self.allow_hosts.split(',')]
+ else:
+ hosts = ['*']
+ if self.package_index is None:
+ self.package_index = self.create_index(
+ self.index_url, search_path=self.shadow_path, hosts=hosts,
+ )
+ self.local_index = Environment(self.shadow_path + sys.path)
+
+ if self.find_links is not None:
if isinstance(self.find_links, str):
- self.find_links = self.find_links.split()
- else:
- self.find_links = []
- if self.local_snapshots_ok:
- self.package_index.scan_egg_links(self.shadow_path + sys.path)
- if not self.no_find_links:
- self.package_index.add_find_links(self.find_links)
- self.set_undefined_options('install_lib', ('optimize', 'optimize'))
- if not isinstance(self.optimize, int):
- try:
- self.optimize = int(self.optimize)
- if not (0 <= self.optimize <= 2):
- raise ValueError
+ self.find_links = self.find_links.split()
+ else:
+ self.find_links = []
+ if self.local_snapshots_ok:
+ self.package_index.scan_egg_links(self.shadow_path + sys.path)
+ if not self.no_find_links:
+ self.package_index.add_find_links(self.find_links)
+ self.set_undefined_options('install_lib', ('optimize', 'optimize'))
+ if not isinstance(self.optimize, int):
+ try:
+ self.optimize = int(self.optimize)
+ if not (0 <= self.optimize <= 2):
+ raise ValueError
except ValueError as e:
raise DistutilsOptionError(
"--optimize must be 0, 1, or 2"
) from e
-
- if self.editable and not self.build_directory:
- raise DistutilsArgError(
- "Must specify a build directory (-b) when using --editable"
- )
- if not self.args:
- raise DistutilsArgError(
- "No urls, filenames, or requirements specified (see --help)")
-
- self.outputs = []
-
- def _fix_install_dir_for_user_site(self):
- """
- Fix the install_dir if "--user" was used.
- """
- if not self.user or not site.ENABLE_USER_SITE:
- return
-
- self.create_home_path()
- if self.install_userbase is None:
- msg = "User base directory is not specified"
- raise DistutilsPlatformError(msg)
- self.install_base = self.install_platbase = self.install_userbase
- scheme_name = os.name.replace('posix', 'unix') + '_user'
- self.select_scheme(scheme_name)
-
- def _expand_attrs(self, attrs):
- for attr in attrs:
- val = getattr(self, attr)
- if val is not None:
- if os.name == 'posix' or os.name == 'nt':
- val = os.path.expanduser(val)
- val = subst_vars(val, self.config_vars)
- setattr(self, attr, val)
-
- def expand_basedirs(self):
- """Calls `os.path.expanduser` on install_base, install_platbase and
- root."""
- self._expand_attrs(['install_base', 'install_platbase', 'root'])
-
- def expand_dirs(self):
- """Calls `os.path.expanduser` on install dirs."""
+
+ if self.editable and not self.build_directory:
+ raise DistutilsArgError(
+ "Must specify a build directory (-b) when using --editable"
+ )
+ if not self.args:
+ raise DistutilsArgError(
+ "No urls, filenames, or requirements specified (see --help)")
+
+ self.outputs = []
+
+ def _fix_install_dir_for_user_site(self):
+ """
+ Fix the install_dir if "--user" was used.
+ """
+ if not self.user or not site.ENABLE_USER_SITE:
+ return
+
+ self.create_home_path()
+ if self.install_userbase is None:
+ msg = "User base directory is not specified"
+ raise DistutilsPlatformError(msg)
+ self.install_base = self.install_platbase = self.install_userbase
+ scheme_name = os.name.replace('posix', 'unix') + '_user'
+ self.select_scheme(scheme_name)
+
+ def _expand_attrs(self, attrs):
+ for attr in attrs:
+ val = getattr(self, attr)
+ if val is not None:
+ if os.name == 'posix' or os.name == 'nt':
+ val = os.path.expanduser(val)
+ val = subst_vars(val, self.config_vars)
+ setattr(self, attr, val)
+
+ def expand_basedirs(self):
+ """Calls `os.path.expanduser` on install_base, install_platbase and
+ root."""
+ self._expand_attrs(['install_base', 'install_platbase', 'root'])
+
+ def expand_dirs(self):
+ """Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
@@ -399,7 +399,7 @@ class easy_install(Command):
'install_data',
]
self._expand_attrs(dirs)
-
+
def run(self, show_deprecation=True):
if show_deprecation:
self.announce(
@@ -407,231 +407,231 @@ class easy_install(Command):
"and will be removed in a future version.",
log.WARN,
)
- if self.verbose != self.distribution.verbose:
- log.set_verbosity(self.verbose)
- try:
- for spec in self.args:
- self.easy_install(spec, not self.no_deps)
- if self.record:
- outputs = self.outputs
- if self.root: # strip any package prefix
- root_len = len(self.root)
- for counter in range(len(outputs)):
- outputs[counter] = outputs[counter][root_len:]
- from distutils import file_util
-
- self.execute(
- file_util.write_file, (self.record, outputs),
- "writing list of installed files to '%s'" %
- self.record
- )
- self.warn_deprecated_options()
- finally:
- log.set_verbosity(self.distribution.verbose)
-
- def pseudo_tempname(self):
- """Return a pseudo-tempname base in the install directory.
- This code is intentionally naive; if a malicious party can write to
- the target directory you're already in deep doodoo.
- """
- try:
- pid = os.getpid()
+ if self.verbose != self.distribution.verbose:
+ log.set_verbosity(self.verbose)
+ try:
+ for spec in self.args:
+ self.easy_install(spec, not self.no_deps)
+ if self.record:
+ outputs = self.outputs
+ if self.root: # strip any package prefix
+ root_len = len(self.root)
+ for counter in range(len(outputs)):
+ outputs[counter] = outputs[counter][root_len:]
+ from distutils import file_util
+
+ self.execute(
+ file_util.write_file, (self.record, outputs),
+ "writing list of installed files to '%s'" %
+ self.record
+ )
+ self.warn_deprecated_options()
+ finally:
+ log.set_verbosity(self.distribution.verbose)
+
+ def pseudo_tempname(self):
+ """Return a pseudo-tempname base in the install directory.
+ This code is intentionally naive; if a malicious party can write to
+ the target directory you're already in deep doodoo.
+ """
+ try:
+ pid = os.getpid()
except Exception:
- pid = random.randint(0, sys.maxsize)
- return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
-
- def warn_deprecated_options(self):
- pass
-
+ pid = random.randint(0, sys.maxsize)
+ return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
+
+ def warn_deprecated_options(self):
+ pass
+
def check_site_dir(self): # noqa: C901 # is too complex (12) # FIXME
- """Verify that self.install_dir is .pth-capable dir, if needed"""
-
- instdir = normalize_path(self.install_dir)
- pth_file = os.path.join(instdir, 'easy-install.pth')
-
+ """Verify that self.install_dir is .pth-capable dir, if needed"""
+
+ instdir = normalize_path(self.install_dir)
+ pth_file = os.path.join(instdir, 'easy-install.pth')
+
if not os.path.exists(instdir):
try:
os.makedirs(instdir)
except (OSError, IOError):
self.cant_write_to_target()
- # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
- is_site_dir = instdir in self.all_site_dirs
-
- if not is_site_dir and not self.multi_version:
- # No? Then directly test whether it does .pth file processing
- is_site_dir = self.check_pth_processing()
- else:
- # make sure we can write to target dir
- testfile = self.pseudo_tempname() + '.write-test'
- test_exists = os.path.exists(testfile)
- try:
- if test_exists:
- os.unlink(testfile)
- open(testfile, 'w').close()
- os.unlink(testfile)
- except (OSError, IOError):
- self.cant_write_to_target()
-
- if not is_site_dir and not self.multi_version:
+ # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
+ is_site_dir = instdir in self.all_site_dirs
+
+ if not is_site_dir and not self.multi_version:
+ # No? Then directly test whether it does .pth file processing
+ is_site_dir = self.check_pth_processing()
+ else:
+ # make sure we can write to target dir
+ testfile = self.pseudo_tempname() + '.write-test'
+ test_exists = os.path.exists(testfile)
+ try:
+ if test_exists:
+ os.unlink(testfile)
+ open(testfile, 'w').close()
+ os.unlink(testfile)
+ except (OSError, IOError):
+ self.cant_write_to_target()
+
+ if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir with easy_install
pythonpath = os.environ.get('PYTHONPATH', '')
log.warn(self.__no_default_msg, self.install_dir, pythonpath)
-
- if is_site_dir:
- if self.pth_file is None:
- self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
- else:
- self.pth_file = None
-
+
+ if is_site_dir:
+ if self.pth_file is None:
+ self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
+ else:
+ self.pth_file = None
+
if self.multi_version and not os.path.exists(pth_file):
self.pth_file = None # don't create a .pth file
- self.install_dir = instdir
-
- __cant_write_msg = textwrap.dedent("""
- can't create or remove files in install directory
-
- The following error occurred while trying to add or remove files in the
- installation directory:
-
- %s
-
- The installation directory you specified (via --install-dir, --prefix, or
- the distutils default setting) was:
-
- %s
+ self.install_dir = instdir
+
+ __cant_write_msg = textwrap.dedent("""
+ can't create or remove files in install directory
+
+ The following error occurred while trying to add or remove files in the
+ installation directory:
+
+ %s
+
+ The installation directory you specified (via --install-dir, --prefix, or
+ the distutils default setting) was:
+
+ %s
""").lstrip() # noqa
-
- __not_exists_id = textwrap.dedent("""
- This directory does not currently exist. Please create it and try again, or
- choose a different installation directory (using the -d or --install-dir
- option).
+
+ __not_exists_id = textwrap.dedent("""
+ This directory does not currently exist. Please create it and try again, or
+ choose a different installation directory (using the -d or --install-dir
+ option).
""").lstrip() # noqa
-
- __access_msg = textwrap.dedent("""
- Perhaps your account does not have write access to this directory? If the
- installation directory is a system-owned directory, you may need to sign in
- as the administrator or "root" account. If you do not have administrative
- access to this machine, you may wish to choose a different installation
- directory, preferably one that is listed in your PYTHONPATH environment
- variable.
-
- For information on other options, you may wish to consult the
- documentation at:
-
+
+ __access_msg = textwrap.dedent("""
+ Perhaps your account does not have write access to this directory? If the
+ installation directory is a system-owned directory, you may need to sign in
+ as the administrator or "root" account. If you do not have administrative
+ access to this machine, you may wish to choose a different installation
+ directory, preferably one that is listed in your PYTHONPATH environment
+ variable.
+
+ For information on other options, you may wish to consult the
+ documentation at:
+
https://setuptools.pypa.io/en/latest/deprecated/easy_install.html
-
- Please make the appropriate changes for your system and try again.
+
+ Please make the appropriate changes for your system and try again.
""").lstrip() # noqa
-
- def cant_write_to_target(self):
- msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
-
- if not os.path.exists(self.install_dir):
- msg += '\n' + self.__not_exists_id
- else:
- msg += '\n' + self.__access_msg
- raise DistutilsError(msg)
-
- def check_pth_processing(self):
- """Empirically verify whether .pth files are supported in inst. dir"""
- instdir = self.install_dir
- log.info("Checking .pth file support in %s", instdir)
- pth_file = self.pseudo_tempname() + ".pth"
- ok_file = pth_file + '.ok'
- ok_exists = os.path.exists(ok_file)
+
+ def cant_write_to_target(self):
+ msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
+
+ if not os.path.exists(self.install_dir):
+ msg += '\n' + self.__not_exists_id
+ else:
+ msg += '\n' + self.__access_msg
+ raise DistutilsError(msg)
+
+ def check_pth_processing(self):
+ """Empirically verify whether .pth files are supported in inst. dir"""
+ instdir = self.install_dir
+ log.info("Checking .pth file support in %s", instdir)
+ pth_file = self.pseudo_tempname() + ".pth"
+ ok_file = pth_file + '.ok'
+ ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
- try:
- if ok_exists:
- os.unlink(ok_file)
- dirname = os.path.dirname(ok_file)
+ try:
+ if ok_exists:
+ os.unlink(ok_file)
+ dirname = os.path.dirname(ok_file)
os.makedirs(dirname, exist_ok=True)
- f = open(pth_file, 'w')
- except (OSError, IOError):
- self.cant_write_to_target()
- else:
- try:
+ f = open(pth_file, 'w')
+ except (OSError, IOError):
+ self.cant_write_to_target()
+ else:
+ try:
f.write(tmpl.format(**locals()))
- f.close()
- f = None
- executable = sys.executable
- if os.name == 'nt':
- dirname, basename = os.path.split(executable)
- alt = os.path.join(dirname, 'pythonw.exe')
+ f.close()
+ f = None
+ executable = sys.executable
+ if os.name == 'nt':
+ dirname, basename = os.path.split(executable)
+ alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
- # use pythonw.exe to avoid opening a console window
- executable = alt
-
- from distutils.spawn import spawn
-
- spawn([executable, '-E', '-c', 'pass'], 0)
-
- if os.path.exists(ok_file):
- log.info(
- "TEST PASSED: %s appears to support .pth files",
- instdir
- )
- return True
- finally:
- if f:
- f.close()
- if os.path.exists(ok_file):
- os.unlink(ok_file)
- if os.path.exists(pth_file):
- os.unlink(pth_file)
- if not self.multi_version:
- log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
- return False
-
- def install_egg_scripts(self, dist):
- """Write all the scripts for `dist`, unless scripts are excluded"""
- if not self.exclude_scripts and dist.metadata_isdir('scripts'):
- for script_name in dist.metadata_listdir('scripts'):
- if dist.metadata_isdir('scripts/' + script_name):
- # The "script" is a directory, likely a Python 3
- # __pycache__ directory, so skip it.
- continue
- self.install_script(
- dist, script_name,
- dist.get_metadata('scripts/' + script_name)
- )
- self.install_wrapper_scripts(dist)
-
- def add_output(self, path):
- if os.path.isdir(path):
- for base, dirs, files in os.walk(path):
- for filename in files:
- self.outputs.append(os.path.join(base, filename))
- else:
- self.outputs.append(path)
-
- def not_editable(self, spec):
- if self.editable:
- raise DistutilsArgError(
- "Invalid argument %r: you can't use filenames or URLs "
- "with --editable (except via the --find-links option)."
- % (spec,)
- )
-
- def check_editable(self, spec):
- if not self.editable:
- return
-
- if os.path.exists(os.path.join(self.build_directory, spec.key)):
- raise DistutilsArgError(
- "%r already exists in %s; can't do a checkout there" %
- (spec.key, self.build_directory)
- )
-
+ # use pythonw.exe to avoid opening a console window
+ executable = alt
+
+ from distutils.spawn import spawn
+
+ spawn([executable, '-E', '-c', 'pass'], 0)
+
+ if os.path.exists(ok_file):
+ log.info(
+ "TEST PASSED: %s appears to support .pth files",
+ instdir
+ )
+ return True
+ finally:
+ if f:
+ f.close()
+ if os.path.exists(ok_file):
+ os.unlink(ok_file)
+ if os.path.exists(pth_file):
+ os.unlink(pth_file)
+ if not self.multi_version:
+ log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
+ return False
+
+ def install_egg_scripts(self, dist):
+ """Write all the scripts for `dist`, unless scripts are excluded"""
+ if not self.exclude_scripts and dist.metadata_isdir('scripts'):
+ for script_name in dist.metadata_listdir('scripts'):
+ if dist.metadata_isdir('scripts/' + script_name):
+ # The "script" is a directory, likely a Python 3
+ # __pycache__ directory, so skip it.
+ continue
+ self.install_script(
+ dist, script_name,
+ dist.get_metadata('scripts/' + script_name)
+ )
+ self.install_wrapper_scripts(dist)
+
+ def add_output(self, path):
+ if os.path.isdir(path):
+ for base, dirs, files in os.walk(path):
+ for filename in files:
+ self.outputs.append(os.path.join(base, filename))
+ else:
+ self.outputs.append(path)
+
+ def not_editable(self, spec):
+ if self.editable:
+ raise DistutilsArgError(
+ "Invalid argument %r: you can't use filenames or URLs "
+ "with --editable (except via the --find-links option)."
+ % (spec,)
+ )
+
+ def check_editable(self, spec):
+ if not self.editable:
+ return
+
+ if os.path.exists(os.path.join(self.build_directory, spec.key)):
+ raise DistutilsArgError(
+ "%r already exists in %s; can't do a checkout there" %
+ (spec.key, self.build_directory)
+ )
+
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
@@ -641,211 +641,211 @@ class easy_install(Command):
finally:
os.path.exists(tmpdir) and rmtree(tmpdir)
- def easy_install(self, spec, deps=False):
+ def easy_install(self, spec, deps=False):
with self._tmpdir() as tmpdir:
- if not isinstance(spec, Requirement):
- if URL_SCHEME(spec):
- # It's a url, download it to tmpdir and process
- self.not_editable(spec)
+ if not isinstance(spec, Requirement):
+ if URL_SCHEME(spec):
+ # It's a url, download it to tmpdir and process
+ self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
-
- elif os.path.exists(spec):
- # Existing file or directory, just process it directly
- self.not_editable(spec)
- return self.install_item(None, spec, tmpdir, deps, True)
- else:
- spec = parse_requirement_arg(spec)
-
- self.check_editable(spec)
- dist = self.package_index.fetch_distribution(
- spec, tmpdir, self.upgrade, self.editable,
- not self.always_copy, self.local_index
- )
- if dist is None:
- msg = "Could not find suitable distribution for %r" % spec
- if self.always_copy:
- msg += " (--always-copy skips system and development eggs)"
- raise DistutilsError(msg)
- elif dist.precedence == DEVELOP_DIST:
- # .egg-info dists don't need installing, just process deps
- self.process_distribution(spec, dist, deps, "Using")
- return dist
- else:
- return self.install_item(spec, dist.location, tmpdir, deps)
-
- def install_item(self, spec, download, tmpdir, deps, install_needed=False):
-
- # Installation is also needed if file in tmpdir or is not an egg
- install_needed = install_needed or self.always_copy
- install_needed = install_needed or os.path.dirname(download) == tmpdir
- install_needed = install_needed or not download.endswith('.egg')
- install_needed = install_needed or (
- self.always_copy_from is not None and
- os.path.dirname(normalize_path(download)) ==
- normalize_path(self.always_copy_from)
- )
-
- if spec and not install_needed:
- # at this point, we know it's a local .egg, we just don't know if
- # it's already installed.
- for dist in self.local_index[spec.project_name]:
- if dist.location == download:
- break
- else:
- install_needed = True # it's not in the local index
-
- log.info("Processing %s", os.path.basename(download))
-
- if install_needed:
- dists = self.install_eggs(spec, download, tmpdir)
- for dist in dists:
- self.process_distribution(spec, dist, deps)
- else:
- dists = [self.egg_distribution(download)]
- self.process_distribution(spec, dists[0], deps, "Using")
-
- if spec is not None:
- for dist in dists:
- if dist in spec:
- return dist
-
- def select_scheme(self, name):
- """Sets the install directories by applying the install schemes."""
- # it's the caller's problem if they supply a bad name!
- scheme = INSTALL_SCHEMES[name]
- for key in SCHEME_KEYS:
- attrname = 'install_' + key
- if getattr(self, attrname) is None:
- setattr(self, attrname, scheme[key])
-
+
+ elif os.path.exists(spec):
+ # Existing file or directory, just process it directly
+ self.not_editable(spec)
+ return self.install_item(None, spec, tmpdir, deps, True)
+ else:
+ spec = parse_requirement_arg(spec)
+
+ self.check_editable(spec)
+ dist = self.package_index.fetch_distribution(
+ spec, tmpdir, self.upgrade, self.editable,
+ not self.always_copy, self.local_index
+ )
+ if dist is None:
+ msg = "Could not find suitable distribution for %r" % spec
+ if self.always_copy:
+ msg += " (--always-copy skips system and development eggs)"
+ raise DistutilsError(msg)
+ elif dist.precedence == DEVELOP_DIST:
+ # .egg-info dists don't need installing, just process deps
+ self.process_distribution(spec, dist, deps, "Using")
+ return dist
+ else:
+ return self.install_item(spec, dist.location, tmpdir, deps)
+
+ def install_item(self, spec, download, tmpdir, deps, install_needed=False):
+
+ # Installation is also needed if file in tmpdir or is not an egg
+ install_needed = install_needed or self.always_copy
+ install_needed = install_needed or os.path.dirname(download) == tmpdir
+ install_needed = install_needed or not download.endswith('.egg')
+ install_needed = install_needed or (
+ self.always_copy_from is not None and
+ os.path.dirname(normalize_path(download)) ==
+ normalize_path(self.always_copy_from)
+ )
+
+ if spec and not install_needed:
+ # at this point, we know it's a local .egg, we just don't know if
+ # it's already installed.
+ for dist in self.local_index[spec.project_name]:
+ if dist.location == download:
+ break
+ else:
+ install_needed = True # it's not in the local index
+
+ log.info("Processing %s", os.path.basename(download))
+
+ if install_needed:
+ dists = self.install_eggs(spec, download, tmpdir)
+ for dist in dists:
+ self.process_distribution(spec, dist, deps)
+ else:
+ dists = [self.egg_distribution(download)]
+ self.process_distribution(spec, dists[0], deps, "Using")
+
+ if spec is not None:
+ for dist in dists:
+ if dist in spec:
+ return dist
+
+ def select_scheme(self, name):
+ """Sets the install directories by applying the install schemes."""
+ # it's the caller's problem if they supply a bad name!
+ scheme = INSTALL_SCHEMES[name]
+ for key in SCHEME_KEYS:
+ attrname = 'install_' + key
+ if getattr(self, attrname) is None:
+ setattr(self, attrname, scheme[key])
+
# FIXME: 'easy_install.process_distribution' is too complex (12)
def process_distribution( # noqa: C901
self, requirement, dist, deps=True, *info,
):
- self.update_pth(dist)
- self.package_index.add(dist)
- if dist in self.local_index[dist.key]:
- self.local_index.remove(dist)
- self.local_index.add(dist)
- self.install_egg_scripts(dist)
- self.installed_projects[dist.key] = dist
- log.info(self.installation_report(requirement, dist, *info))
- if (dist.has_metadata('dependency_links.txt') and
- not self.no_find_links):
- self.package_index.add_find_links(
- dist.get_metadata_lines('dependency_links.txt')
- )
- if not deps and not self.always_copy:
- return
- elif requirement is not None and dist.key != requirement.key:
- log.warn("Skipping dependencies for %s", dist)
- return # XXX this is not the distribution we were looking for
- elif requirement is None or dist not in requirement:
- # if we wound up with a different version, resolve what we've got
- distreq = dist.as_requirement()
+ self.update_pth(dist)
+ self.package_index.add(dist)
+ if dist in self.local_index[dist.key]:
+ self.local_index.remove(dist)
+ self.local_index.add(dist)
+ self.install_egg_scripts(dist)
+ self.installed_projects[dist.key] = dist
+ log.info(self.installation_report(requirement, dist, *info))
+ if (dist.has_metadata('dependency_links.txt') and
+ not self.no_find_links):
+ self.package_index.add_find_links(
+ dist.get_metadata_lines('dependency_links.txt')
+ )
+ if not deps and not self.always_copy:
+ return
+ elif requirement is not None and dist.key != requirement.key:
+ log.warn("Skipping dependencies for %s", dist)
+ return # XXX this is not the distribution we were looking for
+ elif requirement is None or dist not in requirement:
+ # if we wound up with a different version, resolve what we've got
+ distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
- log.info("Processing dependencies for %s", requirement)
- try:
- distros = WorkingSet([]).resolve(
- [requirement], self.local_index, self.easy_install
- )
- except DistributionNotFound as e:
+ log.info("Processing dependencies for %s", requirement)
+ try:
+ distros = WorkingSet([]).resolve(
+ [requirement], self.local_index, self.easy_install
+ )
+ except DistributionNotFound as e:
raise DistutilsError(str(e)) from e
- except VersionConflict as e:
+ except VersionConflict as e:
raise DistutilsError(e.report()) from e
- if self.always_copy or self.always_copy_from:
- # Force all the relevant distros to be copied or activated
- for dist in distros:
- if dist.key not in self.installed_projects:
- self.easy_install(dist.as_requirement())
- log.info("Finished processing dependencies for %s", requirement)
-
- def should_unzip(self, dist):
- if self.zip_ok is not None:
- return not self.zip_ok
- if dist.has_metadata('not-zip-safe'):
- return True
- if not dist.has_metadata('zip-safe'):
- return True
- return False
-
- def maybe_move(self, spec, dist_filename, setup_base):
- dst = os.path.join(self.build_directory, spec.key)
- if os.path.exists(dst):
+ if self.always_copy or self.always_copy_from:
+ # Force all the relevant distros to be copied or activated
+ for dist in distros:
+ if dist.key not in self.installed_projects:
+ self.easy_install(dist.as_requirement())
+ log.info("Finished processing dependencies for %s", requirement)
+
+ def should_unzip(self, dist):
+ if self.zip_ok is not None:
+ return not self.zip_ok
+ if dist.has_metadata('not-zip-safe'):
+ return True
+ if not dist.has_metadata('zip-safe'):
+ return True
+ return False
+
+ def maybe_move(self, spec, dist_filename, setup_base):
+ dst = os.path.join(self.build_directory, spec.key)
+ if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
- log.warn(msg, spec.key, self.build_directory, setup_base)
- return setup_base
- if os.path.isdir(dist_filename):
- setup_base = dist_filename
- else:
- if os.path.dirname(dist_filename) == setup_base:
- os.unlink(dist_filename) # get it out of the tmp dir
- contents = os.listdir(setup_base)
- if len(contents) == 1:
- dist_filename = os.path.join(setup_base, contents[0])
- if os.path.isdir(dist_filename):
- # if the only thing there is a directory, move it instead
- setup_base = dist_filename
- ensure_directory(dst)
- shutil.move(setup_base, dst)
- return dst
-
- def install_wrapper_scripts(self, dist):
- if self.exclude_scripts:
- return
- for args in ScriptWriter.best().get_args(dist):
- self.write_script(*args)
-
- def install_script(self, dist, script_name, script_text, dev_path=None):
- """Generate a legacy script wrapper and install it"""
- spec = str(dist.as_requirement())
- is_script = is_python_script(script_text, script_name)
-
- if is_script:
- body = self._load_template(dev_path) % locals()
- script_text = ScriptWriter.get_header(script_text) + body
+ log.warn(msg, spec.key, self.build_directory, setup_base)
+ return setup_base
+ if os.path.isdir(dist_filename):
+ setup_base = dist_filename
+ else:
+ if os.path.dirname(dist_filename) == setup_base:
+ os.unlink(dist_filename) # get it out of the tmp dir
+ contents = os.listdir(setup_base)
+ if len(contents) == 1:
+ dist_filename = os.path.join(setup_base, contents[0])
+ if os.path.isdir(dist_filename):
+ # if the only thing there is a directory, move it instead
+ setup_base = dist_filename
+ ensure_directory(dst)
+ shutil.move(setup_base, dst)
+ return dst
+
+ def install_wrapper_scripts(self, dist):
+ if self.exclude_scripts:
+ return
+ for args in ScriptWriter.best().get_args(dist):
+ self.write_script(*args)
+
+ def install_script(self, dist, script_name, script_text, dev_path=None):
+ """Generate a legacy script wrapper and install it"""
+ spec = str(dist.as_requirement())
+ is_script = is_python_script(script_text, script_name)
+
+ if is_script:
+ body = self._load_template(dev_path) % locals()
+ script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_bytes(script_text), 'b')
-
- @staticmethod
- def _load_template(dev_path):
- """
- There are a couple of template scripts in the package. This
- function loads one of them and prepares it for use.
- """
+
+ @staticmethod
+ def _load_template(dev_path):
+ """
+ There are a couple of template scripts in the package. This
+ function loads one of them and prepares it for use.
+ """
# See https://github.com/pypa/setuptools/issues/134 for info
- # on script file naming and downstream issues with SVR4
- name = 'script.tmpl'
- if dev_path:
- name = name.replace('.tmpl', ' (dev).tmpl')
-
- raw_bytes = resource_string('setuptools', name)
- return raw_bytes.decode('utf-8')
-
- def write_script(self, script_name, contents, mode="t", blockers=()):
- """Write an executable file to the scripts directory"""
- self.delete_blockers( # clean up old .py/.pyw w/o a script
- [os.path.join(self.script_dir, x) for x in blockers]
- )
- log.info("Installing %s script to %s", script_name, self.script_dir)
- target = os.path.join(self.script_dir, script_name)
- self.add_output(target)
-
+ # on script file naming and downstream issues with SVR4
+ name = 'script.tmpl'
+ if dev_path:
+ name = name.replace('.tmpl', ' (dev).tmpl')
+
+ raw_bytes = resource_string('setuptools', name)
+ return raw_bytes.decode('utf-8')
+
+ def write_script(self, script_name, contents, mode="t", blockers=()):
+ """Write an executable file to the scripts directory"""
+ self.delete_blockers( # clean up old .py/.pyw w/o a script
+ [os.path.join(self.script_dir, x) for x in blockers]
+ )
+ log.info("Installing %s script to %s", script_name, self.script_dir)
+ target = os.path.join(self.script_dir, script_name)
+ self.add_output(target)
+
if self.dry_run:
return
- mask = current_umask()
+ mask = current_umask()
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
-
- def install_eggs(self, spec, dist_filename, tmpdir):
- # .egg dirs or files are already built, so just return them
+
+ def install_eggs(self, spec, dist_filename, tmpdir):
+ # .egg dirs or files are already built, so just return them
installer_map = {
'.egg': self.install_egg,
'.exe': self.install_exe,
@@ -859,86 +859,86 @@ class easy_install(Command):
pass
else:
return [install_dist(dist_filename, tmpdir)]
-
- # Anything else, try to extract and build
- setup_base = tmpdir
- if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
- unpack_archive(dist_filename, tmpdir, self.unpack_progress)
- elif os.path.isdir(dist_filename):
- setup_base = os.path.abspath(dist_filename)
-
- if (setup_base.startswith(tmpdir) # something we downloaded
- and self.build_directory and spec is not None):
- setup_base = self.maybe_move(spec, dist_filename, setup_base)
-
- # Find the setup.py file
- setup_script = os.path.join(setup_base, 'setup.py')
-
- if not os.path.exists(setup_script):
- setups = glob(os.path.join(setup_base, '*', 'setup.py'))
- if not setups:
- raise DistutilsError(
- "Couldn't find a setup script in %s" %
- os.path.abspath(dist_filename)
- )
- if len(setups) > 1:
- raise DistutilsError(
- "Multiple setup scripts in %s" %
- os.path.abspath(dist_filename)
- )
- setup_script = setups[0]
-
- # Now run it, and return the result
- if self.editable:
- log.info(self.report_editable(spec, setup_script))
- return []
- else:
- return self.build_and_install(setup_script, setup_base)
-
- def egg_distribution(self, egg_path):
- if os.path.isdir(egg_path):
- metadata = PathMetadata(egg_path, os.path.join(egg_path,
- 'EGG-INFO'))
- else:
- metadata = EggMetadata(zipimport.zipimporter(egg_path))
- return Distribution.from_filename(egg_path, metadata=metadata)
-
+
+ # Anything else, try to extract and build
+ setup_base = tmpdir
+ if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
+ unpack_archive(dist_filename, tmpdir, self.unpack_progress)
+ elif os.path.isdir(dist_filename):
+ setup_base = os.path.abspath(dist_filename)
+
+ if (setup_base.startswith(tmpdir) # something we downloaded
+ and self.build_directory and spec is not None):
+ setup_base = self.maybe_move(spec, dist_filename, setup_base)
+
+ # Find the setup.py file
+ setup_script = os.path.join(setup_base, 'setup.py')
+
+ if not os.path.exists(setup_script):
+ setups = glob(os.path.join(setup_base, '*', 'setup.py'))
+ if not setups:
+ raise DistutilsError(
+ "Couldn't find a setup script in %s" %
+ os.path.abspath(dist_filename)
+ )
+ if len(setups) > 1:
+ raise DistutilsError(
+ "Multiple setup scripts in %s" %
+ os.path.abspath(dist_filename)
+ )
+ setup_script = setups[0]
+
+ # Now run it, and return the result
+ if self.editable:
+ log.info(self.report_editable(spec, setup_script))
+ return []
+ else:
+ return self.build_and_install(setup_script, setup_base)
+
+ def egg_distribution(self, egg_path):
+ if os.path.isdir(egg_path):
+ metadata = PathMetadata(egg_path, os.path.join(egg_path,
+ 'EGG-INFO'))
+ else:
+ metadata = EggMetadata(zipimport.zipimporter(egg_path))
+ return Distribution.from_filename(egg_path, metadata=metadata)
+
# FIXME: 'easy_install.install_egg' is too complex (11)
def install_egg(self, egg_path, tmpdir): # noqa: C901
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
- destination = os.path.abspath(destination)
- if not self.dry_run:
- ensure_directory(destination)
-
- dist = self.egg_distribution(egg_path)
- if not samefile(egg_path, destination):
- if os.path.isdir(destination) and not os.path.islink(destination):
- dir_util.remove_tree(destination, dry_run=self.dry_run)
- elif os.path.exists(destination):
+ destination = os.path.abspath(destination)
+ if not self.dry_run:
+ ensure_directory(destination)
+
+ dist = self.egg_distribution(egg_path)
+ if not samefile(egg_path, destination):
+ if os.path.isdir(destination) and not os.path.islink(destination):
+ dir_util.remove_tree(destination, dry_run=self.dry_run)
+ elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
- try:
- new_dist_is_zipped = False
- if os.path.isdir(egg_path):
- if egg_path.startswith(tmpdir):
- f, m = shutil.move, "Moving"
- else:
- f, m = shutil.copytree, "Copying"
- elif self.should_unzip(dist):
- self.mkpath(destination)
- f, m = self.unpack_and_compile, "Extracting"
- else:
- new_dist_is_zipped = True
- if egg_path.startswith(tmpdir):
- f, m = shutil.move, "Moving"
- else:
- f, m = shutil.copy2, "Copying"
+ try:
+ new_dist_is_zipped = False
+ if os.path.isdir(egg_path):
+ if egg_path.startswith(tmpdir):
+ f, m = shutil.move, "Moving"
+ else:
+ f, m = shutil.copytree, "Copying"
+ elif self.should_unzip(dist):
+ self.mkpath(destination)
+ f, m = self.unpack_and_compile, "Extracting"
+ else:
+ new_dist_is_zipped = True
+ if egg_path.startswith(tmpdir):
+ f, m = shutil.move, "Moving"
+ else:
+ f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
@@ -952,111 +952,111 @@ class easy_install(Command):
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
- update_dist_caches(destination, fix_zipimporter_caches=False)
- raise
-
- self.add_output(destination)
- return self.egg_distribution(destination)
-
- def install_exe(self, dist_filename, tmpdir):
- # See if it's valid, get data
- cfg = extract_wininst_cfg(dist_filename)
- if cfg is None:
- raise DistutilsError(
- "%s is not a valid distutils Windows .exe" % dist_filename
- )
- # Create a dummy distribution object until we build the real distro
- dist = Distribution(
- None,
- project_name=cfg.get('metadata', 'name'),
- version=cfg.get('metadata', 'version'), platform=get_platform(),
- )
-
- # Convert the .exe to an unpacked egg
+ update_dist_caches(destination, fix_zipimporter_caches=False)
+ raise
+
+ self.add_output(destination)
+ return self.egg_distribution(destination)
+
+ def install_exe(self, dist_filename, tmpdir):
+ # See if it's valid, get data
+ cfg = extract_wininst_cfg(dist_filename)
+ if cfg is None:
+ raise DistutilsError(
+ "%s is not a valid distutils Windows .exe" % dist_filename
+ )
+ # Create a dummy distribution object until we build the real distro
+ dist = Distribution(
+ None,
+ project_name=cfg.get('metadata', 'name'),
+ version=cfg.get('metadata', 'version'), platform=get_platform(),
+ )
+
+ # Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
- egg_tmp = egg_path + '.tmp'
- _egg_info = os.path.join(egg_tmp, 'EGG-INFO')
- pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
- ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
- dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
- self.exe_to_egg(dist_filename, egg_tmp)
-
- # Write EGG-INFO/PKG-INFO
- if not os.path.exists(pkg_inf):
- f = open(pkg_inf, 'w')
- f.write('Metadata-Version: 1.0\n')
- for k, v in cfg.items('metadata'):
- if k != 'target_version':
- f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
- f.close()
- script_dir = os.path.join(_egg_info, 'scripts')
- # delete entry-point scripts to avoid duping
+ egg_tmp = egg_path + '.tmp'
+ _egg_info = os.path.join(egg_tmp, 'EGG-INFO')
+ pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
+ ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
+ dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
+ self.exe_to_egg(dist_filename, egg_tmp)
+
+ # Write EGG-INFO/PKG-INFO
+ if not os.path.exists(pkg_inf):
+ f = open(pkg_inf, 'w')
+ f.write('Metadata-Version: 1.0\n')
+ for k, v in cfg.items('metadata'):
+ if k != 'target_version':
+ f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
+ f.close()
+ script_dir = os.path.join(_egg_info, 'scripts')
+ # delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
- # Build .egg file from tmpdir
- bdist_egg.make_zipfile(
+ # Build .egg file from tmpdir
+ bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
- )
- # install the .egg
- return self.install_egg(egg_path, tmpdir)
-
+ )
+ # install the .egg
+ return self.install_egg(egg_path, tmpdir)
+
# FIXME: 'easy_install.exe_to_egg' is too complex (12)
def exe_to_egg(self, dist_filename, egg_tmp): # noqa: C901
- """Extract a bdist_wininst to the directories an egg would use"""
- # Check for .pth file and set up prefix translations
- prefixes = get_exe_prefixes(dist_filename)
- to_compile = []
- native_libs = []
- top_level = {}
-
- def process(src, dst):
- s = src.lower()
- for old, new in prefixes:
- if s.startswith(old):
- src = new + src[len(old):]
- parts = src.split('/')
- dst = os.path.join(egg_tmp, *parts)
- dl = dst.lower()
- if dl.endswith('.pyd') or dl.endswith('.dll'):
- parts[-1] = bdist_egg.strip_module(parts[-1])
- top_level[os.path.splitext(parts[0])[0]] = 1
- native_libs.append(src)
- elif dl.endswith('.py') and old != 'SCRIPTS/':
- top_level[os.path.splitext(parts[0])[0]] = 1
- to_compile.append(dst)
- return dst
- if not src.endswith('.pth'):
- log.warn("WARNING: can't process %s", src)
- return None
-
- # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
- unpack_archive(dist_filename, egg_tmp, process)
- stubs = []
- for res in native_libs:
- if res.lower().endswith('.pyd'): # create stubs for .pyd's
- parts = res.split('/')
- resource = parts[-1]
- parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
- pyfile = os.path.join(egg_tmp, *parts)
- to_compile.append(pyfile)
- stubs.append(pyfile)
- bdist_egg.write_stub(resource, pyfile)
- self.byte_compile(to_compile) # compile .py's
- bdist_egg.write_safety_flag(
- os.path.join(egg_tmp, 'EGG-INFO'),
- bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
-
- for name in 'top_level', 'native_libs':
- if locals()[name]:
- txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
- if not os.path.exists(txt):
- f = open(txt, 'w')
- f.write('\n'.join(locals()[name]) + '\n')
- f.close()
-
+ """Extract a bdist_wininst to the directories an egg would use"""
+ # Check for .pth file and set up prefix translations
+ prefixes = get_exe_prefixes(dist_filename)
+ to_compile = []
+ native_libs = []
+ top_level = {}
+
+ def process(src, dst):
+ s = src.lower()
+ for old, new in prefixes:
+ if s.startswith(old):
+ src = new + src[len(old):]
+ parts = src.split('/')
+ dst = os.path.join(egg_tmp, *parts)
+ dl = dst.lower()
+ if dl.endswith('.pyd') or dl.endswith('.dll'):
+ parts[-1] = bdist_egg.strip_module(parts[-1])
+ top_level[os.path.splitext(parts[0])[0]] = 1
+ native_libs.append(src)
+ elif dl.endswith('.py') and old != 'SCRIPTS/':
+ top_level[os.path.splitext(parts[0])[0]] = 1
+ to_compile.append(dst)
+ return dst
+ if not src.endswith('.pth'):
+ log.warn("WARNING: can't process %s", src)
+ return None
+
+ # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
+ unpack_archive(dist_filename, egg_tmp, process)
+ stubs = []
+ for res in native_libs:
+ if res.lower().endswith('.pyd'): # create stubs for .pyd's
+ parts = res.split('/')
+ resource = parts[-1]
+ parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
+ pyfile = os.path.join(egg_tmp, *parts)
+ to_compile.append(pyfile)
+ stubs.append(pyfile)
+ bdist_egg.write_stub(resource, pyfile)
+ self.byte_compile(to_compile) # compile .py's
+ bdist_egg.write_safety_flag(
+ os.path.join(egg_tmp, 'EGG-INFO'),
+ bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
+
+ for name in 'top_level', 'native_libs':
+ if locals()[name]:
+ txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
+ if not os.path.exists(txt):
+ f = open(txt, 'w')
+ f.write('\n'.join(locals()[name]) + '\n')
+ f.close()
+
def install_wheel(self, wheel_path, tmpdir):
wheel = Wheel(wheel_path)
assert wheel.is_compatible()
@@ -1086,155 +1086,155 @@ class easy_install(Command):
self.add_output(destination)
return self.egg_distribution(destination)
- __mv_warning = textwrap.dedent("""
- Because this distribution was installed --multi-version, before you can
- import modules from this package in an application, you will need to
- 'import pkg_resources' and then use a 'require()' call similar to one of
- these examples, in order to select the desired version:
-
- pkg_resources.require("%(name)s") # latest installed version
- pkg_resources.require("%(name)s==%(version)s") # this exact version
- pkg_resources.require("%(name)s>=%(version)s") # this version or higher
+ __mv_warning = textwrap.dedent("""
+ Because this distribution was installed --multi-version, before you can
+ import modules from this package in an application, you will need to
+ 'import pkg_resources' and then use a 'require()' call similar to one of
+ these examples, in order to select the desired version:
+
+ pkg_resources.require("%(name)s") # latest installed version
+ pkg_resources.require("%(name)s==%(version)s") # this exact version
+ pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip() # noqa
-
- __id_warning = textwrap.dedent("""
- Note also that the installation directory must be on sys.path at runtime for
- this to work. (e.g. by being the application's script directory, by being on
- PYTHONPATH, or by being added to sys.path by your code.)
+
+ __id_warning = textwrap.dedent("""
+ Note also that the installation directory must be on sys.path at runtime for
+ this to work. (e.g. by being the application's script directory, by being on
+ PYTHONPATH, or by being added to sys.path by your code.)
""") # noqa
-
- def installation_report(self, req, dist, what="Installed"):
- """Helpful installation message for display to package users"""
- msg = "\n%(what)s %(eggloc)s%(extras)s"
- if self.multi_version and not self.no_report:
- msg += '\n' + self.__mv_warning
- if self.install_dir not in map(normalize_path, sys.path):
- msg += '\n' + self.__id_warning
-
- eggloc = dist.location
- name = dist.project_name
- version = dist.version
- extras = '' # TODO: self.report_extras(req, dist)
- return msg % locals()
-
- __editable_msg = textwrap.dedent("""
- Extracted editable version of %(spec)s to %(dirname)s
-
- If it uses setuptools in its setup script, you can activate it in
- "development" mode by going to that directory and running::
-
- %(python)s setup.py develop
-
- See the setuptools documentation for the "develop" command for more info.
+
+ def installation_report(self, req, dist, what="Installed"):
+ """Helpful installation message for display to package users"""
+ msg = "\n%(what)s %(eggloc)s%(extras)s"
+ if self.multi_version and not self.no_report:
+ msg += '\n' + self.__mv_warning
+ if self.install_dir not in map(normalize_path, sys.path):
+ msg += '\n' + self.__id_warning
+
+ eggloc = dist.location
+ name = dist.project_name
+ version = dist.version
+ extras = '' # TODO: self.report_extras(req, dist)
+ return msg % locals()
+
+ __editable_msg = textwrap.dedent("""
+ Extracted editable version of %(spec)s to %(dirname)s
+
+ If it uses setuptools in its setup script, you can activate it in
+ "development" mode by going to that directory and running::
+
+ %(python)s setup.py develop
+
+ See the setuptools documentation for the "develop" command for more info.
""").lstrip() # noqa
-
- def report_editable(self, spec, setup_script):
- dirname = os.path.dirname(setup_script)
- python = sys.executable
- return '\n' + self.__editable_msg % locals()
-
- def run_setup(self, setup_script, setup_base, args):
- sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
- sys.modules.setdefault('distutils.command.egg_info', egg_info)
-
- args = list(args)
- if self.verbose > 2:
- v = 'v' * (self.verbose - 1)
- args.insert(0, '-' + v)
- elif self.verbose < 2:
- args.insert(0, '-q')
- if self.dry_run:
- args.insert(0, '-n')
- log.info(
- "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
- )
- try:
- run_setup(setup_script, args)
- except SystemExit as v:
+
+ def report_editable(self, spec, setup_script):
+ dirname = os.path.dirname(setup_script)
+ python = sys.executable
+ return '\n' + self.__editable_msg % locals()
+
+ def run_setup(self, setup_script, setup_base, args):
+ sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
+ sys.modules.setdefault('distutils.command.egg_info', egg_info)
+
+ args = list(args)
+ if self.verbose > 2:
+ v = 'v' * (self.verbose - 1)
+ args.insert(0, '-' + v)
+ elif self.verbose < 2:
+ args.insert(0, '-q')
+ if self.dry_run:
+ args.insert(0, '-n')
+ log.info(
+ "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
+ )
+ try:
+ run_setup(setup_script, args)
+ except SystemExit as v:
raise DistutilsError(
"Setup script exited with %s" % (v.args[0],)
) from v
-
- def build_and_install(self, setup_script, setup_base):
- args = ['bdist_egg', '--dist-dir']
-
- dist_dir = tempfile.mkdtemp(
- prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
- )
- try:
- self._set_fetcher_options(os.path.dirname(setup_script))
- args.append(dist_dir)
-
- self.run_setup(setup_script, setup_base, args)
- all_eggs = Environment([dist_dir])
- eggs = []
- for key in all_eggs:
- for dist in all_eggs[key]:
- eggs.append(self.install_egg(dist.location, setup_base))
- if not eggs and not self.dry_run:
- log.warn("No eggs found in %s (setup script problem?)",
- dist_dir)
- return eggs
- finally:
- rmtree(dist_dir)
- log.set_verbosity(self.verbose) # restore our log verbosity
-
- def _set_fetcher_options(self, base):
- """
- When easy_install is about to run bdist_egg on a source dist, that
- source dist might have 'setup_requires' directives, requiring
- additional fetching. Ensure the fetcher options given to easy_install
- are available to that command as well.
- """
- # find the fetch options from easy_install and write them out
- # to the setup.cfg file.
- ei_opts = self.distribution.get_option_dict('easy_install').copy()
- fetch_directives = (
+
+ def build_and_install(self, setup_script, setup_base):
+ args = ['bdist_egg', '--dist-dir']
+
+ dist_dir = tempfile.mkdtemp(
+ prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
+ )
+ try:
+ self._set_fetcher_options(os.path.dirname(setup_script))
+ args.append(dist_dir)
+
+ self.run_setup(setup_script, setup_base, args)
+ all_eggs = Environment([dist_dir])
+ eggs = []
+ for key in all_eggs:
+ for dist in all_eggs[key]:
+ eggs.append(self.install_egg(dist.location, setup_base))
+ if not eggs and not self.dry_run:
+ log.warn("No eggs found in %s (setup script problem?)",
+ dist_dir)
+ return eggs
+ finally:
+ rmtree(dist_dir)
+ log.set_verbosity(self.verbose) # restore our log verbosity
+
+ def _set_fetcher_options(self, base):
+ """
+ When easy_install is about to run bdist_egg on a source dist, that
+ source dist might have 'setup_requires' directives, requiring
+ additional fetching. Ensure the fetcher options given to easy_install
+ are available to that command as well.
+ """
+ # find the fetch options from easy_install and write them out
+ # to the setup.cfg file.
+ ei_opts = self.distribution.get_option_dict('easy_install').copy()
+ fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts',
- )
- fetch_options = {}
- for key, val in ei_opts.items():
- if key not in fetch_directives:
- continue
+ )
+ fetch_options = {}
+ for key, val in ei_opts.items():
+ if key not in fetch_directives:
+ continue
fetch_options[key] = val[1]
- # create a settings dictionary suitable for `edit_config`
- settings = dict(easy_install=fetch_options)
- cfg_filename = os.path.join(base, 'setup.cfg')
- setopt.edit_config(cfg_filename, settings)
-
+ # create a settings dictionary suitable for `edit_config`
+ settings = dict(easy_install=fetch_options)
+ cfg_filename = os.path.join(base, 'setup.cfg')
+ setopt.edit_config(cfg_filename, settings)
+
def update_pth(self, dist): # noqa: C901 # is too complex (11) # FIXME
- if self.pth_file is None:
- return
-
- for d in self.pth_file[dist.key]: # drop old entries
+ if self.pth_file is None:
+ return
+
+ for d in self.pth_file[dist.key]: # drop old entries
if not self.multi_version and d.location == dist.location:
continue
-
+
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
- if not self.multi_version:
- if dist.location in self.pth_file.paths:
- log.info(
- "%s is already the active version in easy-install.pth",
+ if not self.multi_version:
+ if dist.location in self.pth_file.paths:
+ log.info(
+ "%s is already the active version in easy-install.pth",
dist,
- )
- else:
- log.info("Adding %s to easy-install.pth file", dist)
- self.pth_file.add(dist) # add new entry
- if dist.location not in self.shadow_path:
- self.shadow_path.append(dist.location)
-
+ )
+ else:
+ log.info("Adding %s to easy-install.pth file", dist)
+ self.pth_file.add(dist) # add new entry
+ if dist.location not in self.shadow_path:
+ self.shadow_path.append(dist.location)
+
if self.dry_run:
return
-
+
self.pth_file.save()
-
+
if dist.key != 'setuptools':
return
-
+
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
@@ -1243,132 +1243,132 @@ class easy_install(Command):
with open(filename, 'wt') as f:
f.write(self.pth_file.make_relative(dist.location) + '\n')
- def unpack_progress(self, src, dst):
- # Progress filter for unpacking
- log.debug("Unpacking %s to %s", src, dst)
- return dst # only unpack-and-compile skips files for dry run
-
- def unpack_and_compile(self, egg_path, destination):
- to_compile = []
- to_chmod = []
-
- def pf(src, dst):
- if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
- to_compile.append(dst)
- elif dst.endswith('.dll') or dst.endswith('.so'):
- to_chmod.append(dst)
- self.unpack_progress(src, dst)
- return not self.dry_run and dst or None
-
- unpack_archive(egg_path, destination, pf)
- self.byte_compile(to_compile)
- if not self.dry_run:
- for f in to_chmod:
- mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
- chmod(f, mode)
-
- def byte_compile(self, to_compile):
- if sys.dont_write_bytecode:
- return
-
- from distutils.util import byte_compile
-
- try:
- # try to make the byte compile messages quieter
- log.set_verbosity(self.verbose - 1)
-
- byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
- if self.optimize:
- byte_compile(
- to_compile, optimize=self.optimize, force=1,
+ def unpack_progress(self, src, dst):
+ # Progress filter for unpacking
+ log.debug("Unpacking %s to %s", src, dst)
+ return dst # only unpack-and-compile skips files for dry run
+
+ def unpack_and_compile(self, egg_path, destination):
+ to_compile = []
+ to_chmod = []
+
+ def pf(src, dst):
+ if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
+ to_compile.append(dst)
+ elif dst.endswith('.dll') or dst.endswith('.so'):
+ to_chmod.append(dst)
+ self.unpack_progress(src, dst)
+ return not self.dry_run and dst or None
+
+ unpack_archive(egg_path, destination, pf)
+ self.byte_compile(to_compile)
+ if not self.dry_run:
+ for f in to_chmod:
+ mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
+ chmod(f, mode)
+
+ def byte_compile(self, to_compile):
+ if sys.dont_write_bytecode:
+ return
+
+ from distutils.util import byte_compile
+
+ try:
+ # try to make the byte compile messages quieter
+ log.set_verbosity(self.verbose - 1)
+
+ byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
+ if self.optimize:
+ byte_compile(
+ to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
- )
- finally:
- log.set_verbosity(self.verbose) # restore original verbosity
-
- __no_default_msg = textwrap.dedent("""
- bad install directory or PYTHONPATH
-
- You are attempting to install a package to a directory that is not
- on PYTHONPATH and which Python does not read ".pth" files from. The
- installation directory you specified (via --install-dir, --prefix, or
- the distutils default setting) was:
-
- %s
-
- and your PYTHONPATH environment variable currently contains:
-
- %r
-
- Here are some of your options for correcting the problem:
-
- * You can choose a different installation directory, i.e., one that is
- on PYTHONPATH or supports .pth files
-
- * You can add the installation directory to the PYTHONPATH environment
- variable. (It must then also be on PYTHONPATH whenever you run
- Python and want to use the package(s) you are installing.)
-
- * You can set up the installation directory to support ".pth" files by
- using one of the approaches described here:
-
+ )
+ finally:
+ log.set_verbosity(self.verbose) # restore original verbosity
+
+ __no_default_msg = textwrap.dedent("""
+ bad install directory or PYTHONPATH
+
+ You are attempting to install a package to a directory that is not
+ on PYTHONPATH and which Python does not read ".pth" files from. The
+ installation directory you specified (via --install-dir, --prefix, or
+ the distutils default setting) was:
+
+ %s
+
+ and your PYTHONPATH environment variable currently contains:
+
+ %r
+
+ Here are some of your options for correcting the problem:
+
+ * You can choose a different installation directory, i.e., one that is
+ on PYTHONPATH or supports .pth files
+
+ * You can add the installation directory to the PYTHONPATH environment
+ variable. (It must then also be on PYTHONPATH whenever you run
+ Python and want to use the package(s) you are installing.)
+
+ * You can set up the installation directory to support ".pth" files by
+ using one of the approaches described here:
+
https://setuptools.pypa.io/en/latest/deprecated/easy_install.html#custom-installation-locations
-
+
Please make the appropriate changes for your system and try again.
""").strip()
-
- def create_home_path(self):
- """Create directories under ~."""
- if not self.user:
- return
- home = convert_path(os.path.expanduser("~"))
+
+ def create_home_path(self):
+ """Create directories under ~."""
+ if not self.user:
+ return
+ home = convert_path(os.path.expanduser("~"))
for name, path in self.config_vars.items():
- if path.startswith(home) and not os.path.isdir(path):
- self.debug_print("os.makedirs('%s', 0o700)" % path)
- os.makedirs(path, 0o700)
-
- INSTALL_SCHEMES = dict(
- posix=dict(
- install_dir='$base/lib/python$py_version_short/site-packages',
- script_dir='$base/bin',
- ),
- )
-
- DEFAULT_SCHEME = dict(
- install_dir='$base/Lib/site-packages',
- script_dir='$base/Scripts',
- )
-
- def _expand(self, *attrs):
- config_vars = self.get_finalized_command('install').config_vars
-
- if self.prefix:
- # Set default install_dir/scripts from --prefix
- config_vars = config_vars.copy()
- config_vars['base'] = self.prefix
- scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
- for attr, val in scheme.items():
- if getattr(self, attr, None) is None:
- setattr(self, attr, val)
-
- from distutils.util import subst_vars
-
- for attr in attrs:
- val = getattr(self, attr)
- if val is not None:
- val = subst_vars(val, config_vars)
- if os.name == 'posix':
- val = os.path.expanduser(val)
- setattr(self, attr, val)
-
-
+ if path.startswith(home) and not os.path.isdir(path):
+ self.debug_print("os.makedirs('%s', 0o700)" % path)
+ os.makedirs(path, 0o700)
+
+ INSTALL_SCHEMES = dict(
+ posix=dict(
+ install_dir='$base/lib/python$py_version_short/site-packages',
+ script_dir='$base/bin',
+ ),
+ )
+
+ DEFAULT_SCHEME = dict(
+ install_dir='$base/Lib/site-packages',
+ script_dir='$base/Scripts',
+ )
+
+ def _expand(self, *attrs):
+ config_vars = self.get_finalized_command('install').config_vars
+
+ if self.prefix:
+ # Set default install_dir/scripts from --prefix
+ config_vars = config_vars.copy()
+ config_vars['base'] = self.prefix
+ scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
+ for attr, val in scheme.items():
+ if getattr(self, attr, None) is None:
+ setattr(self, attr, val)
+
+ from distutils.util import subst_vars
+
+ for attr in attrs:
+ val = getattr(self, attr)
+ if val is not None:
+ val = subst_vars(val, config_vars)
+ if os.name == 'posix':
+ val = os.path.expanduser(val)
+ setattr(self, attr, val)
+
+
def _pythonpath():
items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
return filter(None, items)
-def get_site_dirs():
+def get_site_dirs():
"""
Return a list of 'site' dirs
"""
@@ -1378,10 +1378,10 @@ def get_site_dirs():
# start with PYTHONPATH
sitedirs.extend(_pythonpath())
- prefixes = [sys.prefix]
- if sys.exec_prefix != sys.prefix:
- prefixes.append(sys.exec_prefix)
- for prefix in prefixes:
+ prefixes = [sys.prefix]
+ if sys.exec_prefix != sys.prefix:
+ prefixes.append(sys.exec_prefix)
+ for prefix in prefixes:
if not prefix:
continue
@@ -1423,60 +1423,60 @@ def get_site_dirs():
'site-packages',
)
sitedirs.append(home_sp)
- lib_paths = get_path('purelib'), get_path('platlib')
-
+ lib_paths = get_path('purelib'), get_path('platlib')
+
sitedirs.extend(s for s in lib_paths if s not in sitedirs)
- if site.ENABLE_USER_SITE:
- sitedirs.append(site.USER_SITE)
-
+ if site.ENABLE_USER_SITE:
+ sitedirs.append(site.USER_SITE)
+
with contextlib.suppress(AttributeError):
sitedirs.extend(site.getsitepackages())
- sitedirs = list(map(normalize_path, sitedirs))
-
- return sitedirs
-
-
+ sitedirs = list(map(normalize_path, sitedirs))
+
+ return sitedirs
+
+
def expand_paths(inputs): # noqa: C901 # is too complex (11) # FIXME
- """Yield sys.path directories that might contain "old-style" packages"""
-
- seen = {}
-
- for dirname in inputs:
- dirname = normalize_path(dirname)
- if dirname in seen:
- continue
-
- seen[dirname] = 1
- if not os.path.isdir(dirname):
- continue
-
- files = os.listdir(dirname)
- yield dirname, files
-
- for name in files:
- if not name.endswith('.pth'):
- # We only care about the .pth files
- continue
- if name in ('easy-install.pth', 'setuptools.pth'):
- # Ignore .pth files that we control
- continue
-
- # Read the .pth file
- f = open(os.path.join(dirname, name))
- lines = list(yield_lines(f))
- f.close()
-
- # Yield existing non-dupe, non-import directory lines from it
- for line in lines:
+ """Yield sys.path directories that might contain "old-style" packages"""
+
+ seen = {}
+
+ for dirname in inputs:
+ dirname = normalize_path(dirname)
+ if dirname in seen:
+ continue
+
+ seen[dirname] = 1
+ if not os.path.isdir(dirname):
+ continue
+
+ files = os.listdir(dirname)
+ yield dirname, files
+
+ for name in files:
+ if not name.endswith('.pth'):
+ # We only care about the .pth files
+ continue
+ if name in ('easy-install.pth', 'setuptools.pth'):
+ # Ignore .pth files that we control
+ continue
+
+ # Read the .pth file
+ f = open(os.path.join(dirname, name))
+ lines = list(yield_lines(f))
+ f.close()
+
+ # Yield existing non-dupe, non-import directory lines from it
+ for line in lines:
if line.startswith("import"):
continue
-
+
line = normalize_path(line.rstrip())
if line in seen:
continue
-
+
seen[line] = 1
if not os.path.isdir(line):
continue
@@ -1484,542 +1484,542 @@ def expand_paths(inputs): # noqa: C901 # is too complex (11) # FIXME
yield line, os.listdir(line)
-def extract_wininst_cfg(dist_filename):
- """Extract configuration data from a bdist_wininst .exe
-
- Returns a configparser.RawConfigParser, or None
- """
- f = open(dist_filename, 'rb')
- try:
- endrec = zipfile._EndRecData(f)
- if endrec is None:
- return None
-
- prepended = (endrec[9] - endrec[5]) - endrec[6]
- if prepended < 12: # no wininst data here
- return None
- f.seek(prepended - 12)
-
- tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
- if tag not in (0x1234567A, 0x1234567B):
- return None # not a valid tag
-
- f.seek(prepended - (12 + cfglen))
+def extract_wininst_cfg(dist_filename):
+ """Extract configuration data from a bdist_wininst .exe
+
+ Returns a configparser.RawConfigParser, or None
+ """
+ f = open(dist_filename, 'rb')
+ try:
+ endrec = zipfile._EndRecData(f)
+ if endrec is None:
+ return None
+
+ prepended = (endrec[9] - endrec[5]) - endrec[6]
+ if prepended < 12: # no wininst data here
+ return None
+ f.seek(prepended - 12)
+
+ tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
+ if tag not in (0x1234567A, 0x1234567B):
+ return None # not a valid tag
+
+ f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
- try:
- part = f.read(cfglen)
- # Read up to the first null byte.
- config = part.split(b'\0', 1)[0]
- # Now the config is in bytes, but for RawConfigParser, it should
- # be text, so decode it.
- config = config.decode(sys.getfilesystemencoding())
+ try:
+ part = f.read(cfglen)
+ # Read up to the first null byte.
+ config = part.split(b'\0', 1)[0]
+ # Now the config is in bytes, but for RawConfigParser, it should
+ # be text, so decode it.
+ config = config.decode(sys.getfilesystemencoding())
cfg.read_file(io.StringIO(config))
- except configparser.Error:
- return None
- if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
- return None
- return cfg
-
- finally:
- f.close()
-
-
-def get_exe_prefixes(exe_filename):
- """Get exe->egg path translations for a given .exe file"""
-
- prefixes = [
+ except configparser.Error:
+ return None
+ if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
+ return None
+ return cfg
+
+ finally:
+ f.close()
+
+
+def get_exe_prefixes(exe_filename):
+ """Get exe->egg path translations for a given .exe file"""
+
+ prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
- ('PLATLIB/', ''),
- ('SCRIPTS/', 'EGG-INFO/scripts/'),
- ('DATA/lib/site-packages', ''),
- ]
- z = zipfile.ZipFile(exe_filename)
- try:
- for info in z.infolist():
- name = info.filename
- parts = name.split('/')
- if len(parts) == 3 and parts[2] == 'PKG-INFO':
- if parts[1].endswith('.egg-info'):
- prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
- break
- if len(parts) != 2 or not name.endswith('.pth'):
- continue
- if name.endswith('-nspkg.pth'):
- continue
- if parts[0].upper() in ('PURELIB', 'PLATLIB'):
+ ('PLATLIB/', ''),
+ ('SCRIPTS/', 'EGG-INFO/scripts/'),
+ ('DATA/lib/site-packages', ''),
+ ]
+ z = zipfile.ZipFile(exe_filename)
+ try:
+ for info in z.infolist():
+ name = info.filename
+ parts = name.split('/')
+ if len(parts) == 3 and parts[2] == 'PKG-INFO':
+ if parts[1].endswith('.egg-info'):
+ prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
+ break
+ if len(parts) != 2 or not name.endswith('.pth'):
+ continue
+ if name.endswith('-nspkg.pth'):
+ continue
+ if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name).decode()
- for pth in yield_lines(contents):
- pth = pth.strip().replace('\\', '/')
- if not pth.startswith('import'):
- prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
- finally:
- z.close()
- prefixes = [(x.lower(), y) for x, y in prefixes]
- prefixes.sort()
- prefixes.reverse()
- return prefixes
-
-
-class PthDistributions(Environment):
- """A .pth file with Distribution paths in it"""
-
- dirty = False
-
- def __init__(self, filename, sitedirs=()):
- self.filename = filename
- self.sitedirs = list(map(normalize_path, sitedirs))
- self.basedir = normalize_path(os.path.dirname(self.filename))
- self._load()
- Environment.__init__(self, [], None, None)
- for path in yield_lines(self.paths):
- list(map(self.add, find_distributions(path, True)))
-
- def _load(self):
- self.paths = []
- saw_import = False
- seen = dict.fromkeys(self.sitedirs)
- if os.path.isfile(self.filename):
- f = open(self.filename, 'rt')
- for line in f:
- if line.startswith('import'):
- saw_import = True
- continue
- path = line.rstrip()
- self.paths.append(path)
- if not path.strip() or path.strip().startswith('#'):
- continue
- # skip non-existent paths, in case somebody deleted a package
- # manually, and duplicate paths as well
- path = self.paths[-1] = normalize_path(
- os.path.join(self.basedir, path)
- )
- if not os.path.exists(path) or path in seen:
- self.paths.pop() # skip it
- self.dirty = True # we cleaned up, so we're dirty now :)
- continue
- seen[path] = 1
- f.close()
-
- if self.paths and not saw_import:
- self.dirty = True # ensure anything we touch has import wrappers
- while self.paths and not self.paths[-1].strip():
- self.paths.pop()
-
- def save(self):
- """Write changed .pth file back to disk"""
- if not self.dirty:
- return
-
- rel_paths = list(map(self.make_relative, self.paths))
- if rel_paths:
- log.debug("Saving %s", self.filename)
- lines = self._wrap_lines(rel_paths)
- data = '\n'.join(lines) + '\n'
-
- if os.path.islink(self.filename):
- os.unlink(self.filename)
- with open(self.filename, 'wt') as f:
- f.write(data)
-
- elif os.path.exists(self.filename):
- log.debug("Deleting empty %s", self.filename)
- os.unlink(self.filename)
-
- self.dirty = False
-
- @staticmethod
- def _wrap_lines(lines):
- return lines
-
- def add(self, dist):
- """Add `dist` to the distribution map"""
- new_path = (
- dist.location not in self.paths and (
- dist.location not in self.sitedirs or
- # account for '.' being in PYTHONPATH
- dist.location == os.getcwd()
- )
- )
- if new_path:
- self.paths.append(dist.location)
- self.dirty = True
- Environment.add(self, dist)
-
- def remove(self, dist):
- """Remove `dist` from the distribution map"""
- while dist.location in self.paths:
- self.paths.remove(dist.location)
- self.dirty = True
- Environment.remove(self, dist)
-
- def make_relative(self, path):
- npath, last = os.path.split(normalize_path(path))
- baselen = len(self.basedir)
- parts = [last]
- sep = os.altsep == '/' and '/' or os.sep
- while len(npath) >= baselen:
- if npath == self.basedir:
- parts.append(os.curdir)
- parts.reverse()
- return sep.join(parts)
- npath, last = os.path.split(npath)
- parts.append(last)
- else:
- return path
-
-
-class RewritePthDistributions(PthDistributions):
- @classmethod
- def _wrap_lines(cls, lines):
- yield cls.prelude
- for line in lines:
- yield line
- yield cls.postlude
-
+ for pth in yield_lines(contents):
+ pth = pth.strip().replace('\\', '/')
+ if not pth.startswith('import'):
+ prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
+ finally:
+ z.close()
+ prefixes = [(x.lower(), y) for x, y in prefixes]
+ prefixes.sort()
+ prefixes.reverse()
+ return prefixes
+
+
+class PthDistributions(Environment):
+ """A .pth file with Distribution paths in it"""
+
+ dirty = False
+
+ def __init__(self, filename, sitedirs=()):
+ self.filename = filename
+ self.sitedirs = list(map(normalize_path, sitedirs))
+ self.basedir = normalize_path(os.path.dirname(self.filename))
+ self._load()
+ Environment.__init__(self, [], None, None)
+ for path in yield_lines(self.paths):
+ list(map(self.add, find_distributions(path, True)))
+
+ def _load(self):
+ self.paths = []
+ saw_import = False
+ seen = dict.fromkeys(self.sitedirs)
+ if os.path.isfile(self.filename):
+ f = open(self.filename, 'rt')
+ for line in f:
+ if line.startswith('import'):
+ saw_import = True
+ continue
+ path = line.rstrip()
+ self.paths.append(path)
+ if not path.strip() or path.strip().startswith('#'):
+ continue
+ # skip non-existent paths, in case somebody deleted a package
+ # manually, and duplicate paths as well
+ path = self.paths[-1] = normalize_path(
+ os.path.join(self.basedir, path)
+ )
+ if not os.path.exists(path) or path in seen:
+ self.paths.pop() # skip it
+ self.dirty = True # we cleaned up, so we're dirty now :)
+ continue
+ seen[path] = 1
+ f.close()
+
+ if self.paths and not saw_import:
+ self.dirty = True # ensure anything we touch has import wrappers
+ while self.paths and not self.paths[-1].strip():
+ self.paths.pop()
+
+ def save(self):
+ """Write changed .pth file back to disk"""
+ if not self.dirty:
+ return
+
+ rel_paths = list(map(self.make_relative, self.paths))
+ if rel_paths:
+ log.debug("Saving %s", self.filename)
+ lines = self._wrap_lines(rel_paths)
+ data = '\n'.join(lines) + '\n'
+
+ if os.path.islink(self.filename):
+ os.unlink(self.filename)
+ with open(self.filename, 'wt') as f:
+ f.write(data)
+
+ elif os.path.exists(self.filename):
+ log.debug("Deleting empty %s", self.filename)
+ os.unlink(self.filename)
+
+ self.dirty = False
+
+ @staticmethod
+ def _wrap_lines(lines):
+ return lines
+
+ def add(self, dist):
+ """Add `dist` to the distribution map"""
+ new_path = (
+ dist.location not in self.paths and (
+ dist.location not in self.sitedirs or
+ # account for '.' being in PYTHONPATH
+ dist.location == os.getcwd()
+ )
+ )
+ if new_path:
+ self.paths.append(dist.location)
+ self.dirty = True
+ Environment.add(self, dist)
+
+ def remove(self, dist):
+ """Remove `dist` from the distribution map"""
+ while dist.location in self.paths:
+ self.paths.remove(dist.location)
+ self.dirty = True
+ Environment.remove(self, dist)
+
+ def make_relative(self, path):
+ npath, last = os.path.split(normalize_path(path))
+ baselen = len(self.basedir)
+ parts = [last]
+ sep = os.altsep == '/' and '/' or os.sep
+ while len(npath) >= baselen:
+ if npath == self.basedir:
+ parts.append(os.curdir)
+ parts.reverse()
+ return sep.join(parts)
+ npath, last = os.path.split(npath)
+ parts.append(last)
+ else:
+ return path
+
+
+class RewritePthDistributions(PthDistributions):
+ @classmethod
+ def _wrap_lines(cls, lines):
+ yield cls.prelude
+ for line in lines:
+ yield line
+ yield cls.postlude
+
prelude = _one_liner("""
- import sys
- sys.__plen = len(sys.path)
- """)
+ import sys
+ sys.__plen = len(sys.path)
+ """)
postlude = _one_liner("""
- import sys
- new = sys.path[sys.__plen:]
- del sys.path[sys.__plen:]
- p = getattr(sys, '__egginsert', 0)
- sys.path[p:p] = new
- sys.__egginsert = p + len(new)
- """)
-
-
+ import sys
+ new = sys.path[sys.__plen:]
+ del sys.path[sys.__plen:]
+ p = getattr(sys, '__egginsert', 0)
+ sys.path[p:p] = new
+ sys.__egginsert = p + len(new)
+ """)
+
+
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
- PthDistributions = RewritePthDistributions
-
-
-def _first_line_re():
- """
- Return a regular expression based on first_line_re suitable for matching
- strings.
- """
- if isinstance(first_line_re.pattern, str):
- return first_line_re
-
- # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
- return re.compile(first_line_re.pattern.decode())
-
-
-def auto_chmod(func, arg, exc):
+ PthDistributions = RewritePthDistributions
+
+
+def _first_line_re():
+ """
+ Return a regular expression based on first_line_re suitable for matching
+ strings.
+ """
+ if isinstance(first_line_re.pattern, str):
+ return first_line_re
+
+ # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
+ return re.compile(first_line_re.pattern.decode())
+
+
+def auto_chmod(func, arg, exc):
if func in [os.unlink, os.remove] and os.name == 'nt':
- chmod(arg, stat.S_IWRITE)
- return func(arg)
- et, ev, _ = sys.exc_info()
+ chmod(arg, stat.S_IWRITE)
+ return func(arg)
+ et, ev, _ = sys.exc_info()
# TODO: This code doesn't make sense. What is it trying to do?
raise (ev[0], ev[1] + (" %s %s" % (func, arg)))
-
-
-def update_dist_caches(dist_path, fix_zipimporter_caches):
- """
- Fix any globally cached `dist_path` related data
-
- `dist_path` should be a path of a newly installed egg distribution (zipped
- or unzipped).
-
- sys.path_importer_cache contains finder objects that have been cached when
- importing data from the original distribution. Any such finders need to be
- cleared since the replacement distribution might be packaged differently,
- e.g. a zipped egg distribution might get replaced with an unzipped egg
- folder or vice versa. Having the old finders cached may then cause Python
- to attempt loading modules from the replacement distribution using an
- incorrect loader.
-
- zipimport.zipimporter objects are Python loaders charged with importing
- data packaged inside zip archives. If stale loaders referencing the
- original distribution, are left behind, they can fail to load modules from
- the replacement distribution. E.g. if an old zipimport.zipimporter instance
- is used to load data from a new zipped egg archive, it may cause the
- operation to attempt to locate the requested data in the wrong location -
- one indicated by the original distribution's zip archive directory
- information. Such an operation may then fail outright, e.g. report having
- read a 'bad local file header', or even worse, it may fail silently &
- return invalid data.
-
- zipimport._zip_directory_cache contains cached zip archive directory
- information for all existing zipimport.zipimporter instances and all such
- instances connected to the same archive share the same cached directory
- information.
-
- If asked, and the underlying Python implementation allows it, we can fix
- all existing zipimport.zipimporter instances instead of having to track
- them down and remove them one by one, by updating their shared cached zip
- archive directory information. This, of course, assumes that the
- replacement distribution is packaged as a zipped egg.
-
- If not asked to fix existing zipimport.zipimporter instances, we still do
- our best to clear any remaining zipimport.zipimporter related cached data
- that might somehow later get used when attempting to load data from the new
- distribution and thus cause such load operations to fail. Note that when
- tracking down such remaining stale data, we can not catch every conceivable
- usage from here, and we clear only those that we know of and have found to
- cause problems if left alive. Any remaining caches should be updated by
- whomever is in charge of maintaining them, i.e. they should be ready to
- handle us replacing their zip archives with new distributions at runtime.
-
- """
- # There are several other known sources of stale zipimport.zipimporter
- # instances that we do not clear here, but might if ever given a reason to
- # do so:
- # * Global setuptools pkg_resources.working_set (a.k.a. 'master working
- # set') may contain distributions which may in turn contain their
- # zipimport.zipimporter loaders.
- # * Several zipimport.zipimporter loaders held by local variables further
- # up the function call stack when running the setuptools installation.
- # * Already loaded modules may have their __loader__ attribute set to the
- # exact loader instance used when importing them. Python 3.4 docs state
- # that this information is intended mostly for introspection and so is
- # not expected to cause us problems.
- normalized_path = normalize_path(dist_path)
- _uncache(normalized_path, sys.path_importer_cache)
- if fix_zipimporter_caches:
- _replace_zip_directory_cache_data(normalized_path)
- else:
- # Here, even though we do not want to fix existing and now stale
- # zipimporter cache information, we still want to remove it. Related to
- # Python's zip archive directory information cache, we clear each of
- # its stale entries in two phases:
- # 1. Clear the entry so attempting to access zip archive information
- # via any existing stale zipimport.zipimporter instances fails.
- # 2. Remove the entry from the cache so any newly constructed
- # zipimport.zipimporter instances do not end up using old stale
- # zip archive directory information.
- # This whole stale data removal step does not seem strictly necessary,
- # but has been left in because it was done before we started replacing
- # the zip archive directory information cache content if possible, and
- # there are no relevant unit tests that we can depend on to tell us if
- # this is really needed.
- _remove_and_clear_zip_directory_cache_data(normalized_path)
-
-
-def _collect_zipimporter_cache_entries(normalized_path, cache):
- """
- Return zipimporter cache entry keys related to a given normalized path.
-
- Alternative path spellings (e.g. those using different character case or
- those using alternative path separators) related to the same path are
- included. Any sub-path entries are included as well, i.e. those
- corresponding to zip archives embedded in other zip archives.
-
- """
- result = []
- prefix_len = len(normalized_path)
- for p in cache:
- np = normalize_path(p)
- if (np.startswith(normalized_path) and
- np[prefix_len:prefix_len + 1] in (os.sep, '')):
- result.append(p)
- return result
-
-
-def _update_zipimporter_cache(normalized_path, cache, updater=None):
- """
- Update zipimporter cache data for a given normalized path.
-
- Any sub-path entries are processed as well, i.e. those corresponding to zip
- archives embedded in other zip archives.
-
- Given updater is a callable taking a cache entry key and the original entry
- (after already removing the entry from the cache), and expected to update
- the entry and possibly return a new one to be inserted in its place.
- Returning None indicates that the entry should not be replaced with a new
- one. If no updater is given, the cache entries are simply removed without
- any additional processing, the same as if the updater simply returned None.
-
- """
- for p in _collect_zipimporter_cache_entries(normalized_path, cache):
- # N.B. pypy's custom zipimport._zip_directory_cache implementation does
- # not support the complete dict interface:
- # * Does not support item assignment, thus not allowing this function
- # to be used only for removing existing cache entries.
- # * Does not support the dict.pop() method, forcing us to use the
- # get/del patterns instead. For more detailed information see the
- # following links:
+
+
+def update_dist_caches(dist_path, fix_zipimporter_caches):
+ """
+ Fix any globally cached `dist_path` related data
+
+ `dist_path` should be a path of a newly installed egg distribution (zipped
+ or unzipped).
+
+ sys.path_importer_cache contains finder objects that have been cached when
+ importing data from the original distribution. Any such finders need to be
+ cleared since the replacement distribution might be packaged differently,
+ e.g. a zipped egg distribution might get replaced with an unzipped egg
+ folder or vice versa. Having the old finders cached may then cause Python
+ to attempt loading modules from the replacement distribution using an
+ incorrect loader.
+
+ zipimport.zipimporter objects are Python loaders charged with importing
+ data packaged inside zip archives. If stale loaders referencing the
+ original distribution, are left behind, they can fail to load modules from
+ the replacement distribution. E.g. if an old zipimport.zipimporter instance
+ is used to load data from a new zipped egg archive, it may cause the
+ operation to attempt to locate the requested data in the wrong location -
+ one indicated by the original distribution's zip archive directory
+ information. Such an operation may then fail outright, e.g. report having
+ read a 'bad local file header', or even worse, it may fail silently &
+ return invalid data.
+
+ zipimport._zip_directory_cache contains cached zip archive directory
+ information for all existing zipimport.zipimporter instances and all such
+ instances connected to the same archive share the same cached directory
+ information.
+
+ If asked, and the underlying Python implementation allows it, we can fix
+ all existing zipimport.zipimporter instances instead of having to track
+ them down and remove them one by one, by updating their shared cached zip
+ archive directory information. This, of course, assumes that the
+ replacement distribution is packaged as a zipped egg.
+
+ If not asked to fix existing zipimport.zipimporter instances, we still do
+ our best to clear any remaining zipimport.zipimporter related cached data
+ that might somehow later get used when attempting to load data from the new
+ distribution and thus cause such load operations to fail. Note that when
+ tracking down such remaining stale data, we can not catch every conceivable
+ usage from here, and we clear only those that we know of and have found to
+ cause problems if left alive. Any remaining caches should be updated by
+ whomever is in charge of maintaining them, i.e. they should be ready to
+ handle us replacing their zip archives with new distributions at runtime.
+
+ """
+ # There are several other known sources of stale zipimport.zipimporter
+ # instances that we do not clear here, but might if ever given a reason to
+ # do so:
+ # * Global setuptools pkg_resources.working_set (a.k.a. 'master working
+ # set') may contain distributions which may in turn contain their
+ # zipimport.zipimporter loaders.
+ # * Several zipimport.zipimporter loaders held by local variables further
+ # up the function call stack when running the setuptools installation.
+ # * Already loaded modules may have their __loader__ attribute set to the
+ # exact loader instance used when importing them. Python 3.4 docs state
+ # that this information is intended mostly for introspection and so is
+ # not expected to cause us problems.
+ normalized_path = normalize_path(dist_path)
+ _uncache(normalized_path, sys.path_importer_cache)
+ if fix_zipimporter_caches:
+ _replace_zip_directory_cache_data(normalized_path)
+ else:
+ # Here, even though we do not want to fix existing and now stale
+ # zipimporter cache information, we still want to remove it. Related to
+ # Python's zip archive directory information cache, we clear each of
+ # its stale entries in two phases:
+ # 1. Clear the entry so attempting to access zip archive information
+ # via any existing stale zipimport.zipimporter instances fails.
+ # 2. Remove the entry from the cache so any newly constructed
+ # zipimport.zipimporter instances do not end up using old stale
+ # zip archive directory information.
+ # This whole stale data removal step does not seem strictly necessary,
+ # but has been left in because it was done before we started replacing
+ # the zip archive directory information cache content if possible, and
+ # there are no relevant unit tests that we can depend on to tell us if
+ # this is really needed.
+ _remove_and_clear_zip_directory_cache_data(normalized_path)
+
+
+def _collect_zipimporter_cache_entries(normalized_path, cache):
+ """
+ Return zipimporter cache entry keys related to a given normalized path.
+
+ Alternative path spellings (e.g. those using different character case or
+ those using alternative path separators) related to the same path are
+ included. Any sub-path entries are included as well, i.e. those
+ corresponding to zip archives embedded in other zip archives.
+
+ """
+ result = []
+ prefix_len = len(normalized_path)
+ for p in cache:
+ np = normalize_path(p)
+ if (np.startswith(normalized_path) and
+ np[prefix_len:prefix_len + 1] in (os.sep, '')):
+ result.append(p)
+ return result
+
+
+def _update_zipimporter_cache(normalized_path, cache, updater=None):
+ """
+ Update zipimporter cache data for a given normalized path.
+
+ Any sub-path entries are processed as well, i.e. those corresponding to zip
+ archives embedded in other zip archives.
+
+ Given updater is a callable taking a cache entry key and the original entry
+ (after already removing the entry from the cache), and expected to update
+ the entry and possibly return a new one to be inserted in its place.
+ Returning None indicates that the entry should not be replaced with a new
+ one. If no updater is given, the cache entries are simply removed without
+ any additional processing, the same as if the updater simply returned None.
+
+ """
+ for p in _collect_zipimporter_cache_entries(normalized_path, cache):
+ # N.B. pypy's custom zipimport._zip_directory_cache implementation does
+ # not support the complete dict interface:
+ # * Does not support item assignment, thus not allowing this function
+ # to be used only for removing existing cache entries.
+ # * Does not support the dict.pop() method, forcing us to use the
+ # get/del patterns instead. For more detailed information see the
+ # following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# http://bit.ly/2h9itJX
- old_entry = cache[p]
- del cache[p]
- new_entry = updater and updater(p, old_entry)
- if new_entry is not None:
- cache[p] = new_entry
-
-
-def _uncache(normalized_path, cache):
- _update_zipimporter_cache(normalized_path, cache)
-
-
-def _remove_and_clear_zip_directory_cache_data(normalized_path):
- def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
- old_entry.clear()
-
- _update_zipimporter_cache(
- normalized_path, zipimport._zip_directory_cache,
- updater=clear_and_remove_cached_zip_archive_directory_data)
-
-
-# PyPy Python implementation does not allow directly writing to the
-# zipimport._zip_directory_cache and so prevents us from attempting to correct
-# its content. The best we can do there is clear the problematic cache content
-# and have PyPy repopulate it as needed. The downside is that if there are any
-# stale zipimport.zipimporter instances laying around, attempting to use them
-# will fail due to not having its zip archive directory information available
-# instead of being automatically corrected to use the new correct zip archive
-# directory information.
-if '__pypy__' in sys.builtin_module_names:
- _replace_zip_directory_cache_data = \
- _remove_and_clear_zip_directory_cache_data
-else:
-
- def _replace_zip_directory_cache_data(normalized_path):
- def replace_cached_zip_archive_directory_data(path, old_entry):
- # N.B. In theory, we could load the zip directory information just
- # once for all updated path spellings, and then copy it locally and
- # update its contained path strings to contain the correct
- # spelling, but that seems like a way too invasive move (this cache
- # structure is not officially documented anywhere and could in
- # theory change with new Python releases) for no significant
- # benefit.
- old_entry.clear()
- zipimport.zipimporter(path)
- old_entry.update(zipimport._zip_directory_cache[path])
- return old_entry
-
- _update_zipimporter_cache(
- normalized_path, zipimport._zip_directory_cache,
- updater=replace_cached_zip_archive_directory_data)
-
-
-def is_python(text, filename='<string>'):
- "Is this string a valid Python script?"
- try:
- compile(text, filename, 'exec')
- except (SyntaxError, TypeError):
- return False
- else:
- return True
-
-
-def is_sh(executable):
- """Determine if the specified executable is a .sh (contains a #! line)"""
- try:
- with io.open(executable, encoding='latin-1') as fp:
- magic = fp.read(2)
- except (OSError, IOError):
- return executable
- return magic == '#!'
-
-
-def nt_quote_arg(arg):
- """Quote a command line argument according to Windows parsing rules"""
- return subprocess.list2cmdline([arg])
-
-
-def is_python_script(script_text, filename):
- """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
- """
- if filename.endswith('.py') or filename.endswith('.pyw'):
- return True # extension says it's Python
- if is_python(script_text, filename):
- return True # it's syntactically valid Python
- if script_text.startswith('#!'):
- # It begins with a '#!' line, so check if 'python' is in it somewhere
- return 'python' in script_text.splitlines()[0].lower()
-
- return False # Not any Python I can recognize
-
-
-try:
- from os import chmod as _chmod
-except ImportError:
- # Jython compatibility
- def _chmod(*args):
- pass
-
-
-def chmod(path, mode):
- log.debug("changing mode of %s to %o", path, mode)
- try:
- _chmod(path, mode)
- except os.error as e:
- log.debug("chmod failed: %s", e)
-
-
-class CommandSpec(list):
- """
- A command spec for a #! header, specified as a list of arguments akin to
- those passed to Popen.
- """
-
- options = []
- split_args = dict()
-
- @classmethod
- def best(cls):
- """
- Choose the best CommandSpec class based on environmental conditions.
- """
- return cls
-
- @classmethod
- def _sys_executable(cls):
- _default = os.path.normpath(sys.executable)
- return os.environ.get('__PYVENV_LAUNCHER__', _default)
-
- @classmethod
- def from_param(cls, param):
- """
- Construct a CommandSpec from a parameter to build_scripts, which may
- be None.
- """
- if isinstance(param, cls):
- return param
- if isinstance(param, list):
- return cls(param)
- if param is None:
- return cls.from_environment()
- # otherwise, assume it's a string.
- return cls.from_string(param)
-
- @classmethod
- def from_environment(cls):
- return cls([cls._sys_executable()])
-
- @classmethod
- def from_string(cls, string):
- """
- Construct a command spec from a simple string representing a command
- line parseable by shlex.split.
- """
- items = shlex.split(string, **cls.split_args)
- return cls(items)
-
- def install_options(self, script_text):
- self.options = shlex.split(self._extract_options(script_text))
- cmdline = subprocess.list2cmdline(self)
- if not isascii(cmdline):
- self.options[:0] = ['-x']
-
- @staticmethod
- def _extract_options(orig_script):
- """
- Extract any options from the first line of the script.
- """
- first = (orig_script + '\n').splitlines()[0]
- match = _first_line_re().match(first)
- options = match.group(1) or '' if match else ''
- return options.strip()
-
- def as_header(self):
- return self._render(self + list(self.options))
-
- @staticmethod
+ old_entry = cache[p]
+ del cache[p]
+ new_entry = updater and updater(p, old_entry)
+ if new_entry is not None:
+ cache[p] = new_entry
+
+
+def _uncache(normalized_path, cache):
+ _update_zipimporter_cache(normalized_path, cache)
+
+
+def _remove_and_clear_zip_directory_cache_data(normalized_path):
+ def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
+ old_entry.clear()
+
+ _update_zipimporter_cache(
+ normalized_path, zipimport._zip_directory_cache,
+ updater=clear_and_remove_cached_zip_archive_directory_data)
+
+
+# PyPy Python implementation does not allow directly writing to the
+# zipimport._zip_directory_cache and so prevents us from attempting to correct
+# its content. The best we can do there is clear the problematic cache content
+# and have PyPy repopulate it as needed. The downside is that if there are any
+# stale zipimport.zipimporter instances laying around, attempting to use them
+# will fail due to not having its zip archive directory information available
+# instead of being automatically corrected to use the new correct zip archive
+# directory information.
+if '__pypy__' in sys.builtin_module_names:
+ _replace_zip_directory_cache_data = \
+ _remove_and_clear_zip_directory_cache_data
+else:
+
+ def _replace_zip_directory_cache_data(normalized_path):
+ def replace_cached_zip_archive_directory_data(path, old_entry):
+ # N.B. In theory, we could load the zip directory information just
+ # once for all updated path spellings, and then copy it locally and
+ # update its contained path strings to contain the correct
+ # spelling, but that seems like a way too invasive move (this cache
+ # structure is not officially documented anywhere and could in
+ # theory change with new Python releases) for no significant
+ # benefit.
+ old_entry.clear()
+ zipimport.zipimporter(path)
+ old_entry.update(zipimport._zip_directory_cache[path])
+ return old_entry
+
+ _update_zipimporter_cache(
+ normalized_path, zipimport._zip_directory_cache,
+ updater=replace_cached_zip_archive_directory_data)
+
+
+def is_python(text, filename='<string>'):
+ "Is this string a valid Python script?"
+ try:
+ compile(text, filename, 'exec')
+ except (SyntaxError, TypeError):
+ return False
+ else:
+ return True
+
+
+def is_sh(executable):
+ """Determine if the specified executable is a .sh (contains a #! line)"""
+ try:
+ with io.open(executable, encoding='latin-1') as fp:
+ magic = fp.read(2)
+ except (OSError, IOError):
+ return executable
+ return magic == '#!'
+
+
+def nt_quote_arg(arg):
+ """Quote a command line argument according to Windows parsing rules"""
+ return subprocess.list2cmdline([arg])
+
+
+def is_python_script(script_text, filename):
+ """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
+ """
+ if filename.endswith('.py') or filename.endswith('.pyw'):
+ return True # extension says it's Python
+ if is_python(script_text, filename):
+ return True # it's syntactically valid Python
+ if script_text.startswith('#!'):
+ # It begins with a '#!' line, so check if 'python' is in it somewhere
+ return 'python' in script_text.splitlines()[0].lower()
+
+ return False # Not any Python I can recognize
+
+
+try:
+ from os import chmod as _chmod
+except ImportError:
+ # Jython compatibility
+ def _chmod(*args):
+ pass
+
+
+def chmod(path, mode):
+ log.debug("changing mode of %s to %o", path, mode)
+ try:
+ _chmod(path, mode)
+ except os.error as e:
+ log.debug("chmod failed: %s", e)
+
+
+class CommandSpec(list):
+ """
+ A command spec for a #! header, specified as a list of arguments akin to
+ those passed to Popen.
+ """
+
+ options = []
+ split_args = dict()
+
+ @classmethod
+ def best(cls):
+ """
+ Choose the best CommandSpec class based on environmental conditions.
+ """
+ return cls
+
+ @classmethod
+ def _sys_executable(cls):
+ _default = os.path.normpath(sys.executable)
+ return os.environ.get('__PYVENV_LAUNCHER__', _default)
+
+ @classmethod
+ def from_param(cls, param):
+ """
+ Construct a CommandSpec from a parameter to build_scripts, which may
+ be None.
+ """
+ if isinstance(param, cls):
+ return param
+ if isinstance(param, list):
+ return cls(param)
+ if param is None:
+ return cls.from_environment()
+ # otherwise, assume it's a string.
+ return cls.from_string(param)
+
+ @classmethod
+ def from_environment(cls):
+ return cls([cls._sys_executable()])
+
+ @classmethod
+ def from_string(cls, string):
+ """
+ Construct a command spec from a simple string representing a command
+ line parseable by shlex.split.
+ """
+ items = shlex.split(string, **cls.split_args)
+ return cls(items)
+
+ def install_options(self, script_text):
+ self.options = shlex.split(self._extract_options(script_text))
+ cmdline = subprocess.list2cmdline(self)
+ if not isascii(cmdline):
+ self.options[:0] = ['-x']
+
+ @staticmethod
+ def _extract_options(orig_script):
+ """
+ Extract any options from the first line of the script.
+ """
+ first = (orig_script + '\n').splitlines()[0]
+ match = _first_line_re().match(first)
+ options = match.group(1) or '' if match else ''
+ return options.strip()
+
+ def as_header(self):
+ return self._render(self + list(self.options))
+
+ @staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
@@ -2028,31 +2028,31 @@ class CommandSpec(list):
return item
@staticmethod
- def _render(items):
+ def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
- return '#!' + cmdline + '\n'
-
-
-# For pbr compat; will be removed in a future version.
-sys_executable = CommandSpec._sys_executable()
-
-
-class WindowsCommandSpec(CommandSpec):
- split_args = dict(posix=False)
-
-
+ return '#!' + cmdline + '\n'
+
+
+# For pbr compat; will be removed in a future version.
+sys_executable = CommandSpec._sys_executable()
+
+
+class WindowsCommandSpec(CommandSpec):
+ split_args = dict(posix=False)
+
+
class ScriptWriter:
- """
- Encapsulates behavior around writing entry point scripts for console and
- gui apps.
- """
-
+ """
+ Encapsulates behavior around writing entry point scripts for console and
+ gui apps.
+ """
+
template = textwrap.dedent(r"""
- # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
+ # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
import re
- import sys
-
+ import sys
+
# for compatibility with easy_install; see #2198
__requires__ = %(spec)r
@@ -2078,221 +2078,221 @@ class ScriptWriter:
globals().setdefault('load_entry_point', importlib_load_entry_point)
- if __name__ == '__main__':
+ if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)())
""").lstrip()
-
- command_spec_class = CommandSpec
-
- @classmethod
- def get_script_args(cls, dist, executable=None, wininst=False):
- # for backward compatibility
+
+ command_spec_class = CommandSpec
+
+ @classmethod
+ def get_script_args(cls, dist, executable=None, wininst=False):
+ # for backward compatibility
warnings.warn("Use get_args", EasyInstallDeprecationWarning)
- writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
- header = cls.get_script_header("", executable, wininst)
- return writer.get_args(dist, header)
-
- @classmethod
- def get_script_header(cls, script_text, executable=None, wininst=False):
- # for backward compatibility
+ writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
+ header = cls.get_script_header("", executable, wininst)
+ return writer.get_args(dist, header)
+
+ @classmethod
+ def get_script_header(cls, script_text, executable=None, wininst=False):
+ # for backward compatibility
warnings.warn(
"Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
- if wininst:
- executable = "python.exe"
+ if wininst:
+ executable = "python.exe"
return cls.get_header(script_text, executable)
-
- @classmethod
- def get_args(cls, dist, header=None):
- """
- Yield write_script() argument tuples for a distribution's
- console_scripts and gui_scripts entry points.
- """
- if header is None:
- header = cls.get_header()
- spec = str(dist.as_requirement())
- for type_ in 'console', 'gui':
- group = type_ + '_scripts'
- for name, ep in dist.get_entry_map(group).items():
- cls._ensure_safe_name(name)
- script_text = cls.template % locals()
- args = cls._get_script_args(type_, name, header, script_text)
- for res in args:
- yield res
-
- @staticmethod
- def _ensure_safe_name(name):
- """
- Prevent paths in *_scripts entry point names.
- """
- has_path_sep = re.search(r'[\\/]', name)
- if has_path_sep:
- raise ValueError("Path separators not allowed in script names")
-
- @classmethod
- def get_writer(cls, force_windows):
- # for backward compatibility
+
+ @classmethod
+ def get_args(cls, dist, header=None):
+ """
+ Yield write_script() argument tuples for a distribution's
+ console_scripts and gui_scripts entry points.
+ """
+ if header is None:
+ header = cls.get_header()
+ spec = str(dist.as_requirement())
+ for type_ in 'console', 'gui':
+ group = type_ + '_scripts'
+ for name, ep in dist.get_entry_map(group).items():
+ cls._ensure_safe_name(name)
+ script_text = cls.template % locals()
+ args = cls._get_script_args(type_, name, header, script_text)
+ for res in args:
+ yield res
+
+ @staticmethod
+ def _ensure_safe_name(name):
+ """
+ Prevent paths in *_scripts entry point names.
+ """
+ has_path_sep = re.search(r'[\\/]', name)
+ if has_path_sep:
+ raise ValueError("Path separators not allowed in script names")
+
+ @classmethod
+ def get_writer(cls, force_windows):
+ # for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
- return WindowsScriptWriter.best() if force_windows else cls.best()
-
- @classmethod
- def best(cls):
- """
- Select the best ScriptWriter for this environment.
- """
- if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
- return WindowsScriptWriter.best()
- else:
- return cls
-
- @classmethod
- def _get_script_args(cls, type_, name, header, script_text):
- # Simply write the stub with no extension.
- yield (name, header + script_text)
-
- @classmethod
- def get_header(cls, script_text="", executable=None):
- """Create a #! line, getting options (if any) from script_text"""
- cmd = cls.command_spec_class.best().from_param(executable)
- cmd.install_options(script_text)
- return cmd.as_header()
-
-
-class WindowsScriptWriter(ScriptWriter):
- command_spec_class = WindowsCommandSpec
-
- @classmethod
- def get_writer(cls):
- # for backward compatibility
+ return WindowsScriptWriter.best() if force_windows else cls.best()
+
+ @classmethod
+ def best(cls):
+ """
+ Select the best ScriptWriter for this environment.
+ """
+ if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
+ return WindowsScriptWriter.best()
+ else:
+ return cls
+
+ @classmethod
+ def _get_script_args(cls, type_, name, header, script_text):
+ # Simply write the stub with no extension.
+ yield (name, header + script_text)
+
+ @classmethod
+ def get_header(cls, script_text="", executable=None):
+ """Create a #! line, getting options (if any) from script_text"""
+ cmd = cls.command_spec_class.best().from_param(executable)
+ cmd.install_options(script_text)
+ return cmd.as_header()
+
+
+class WindowsScriptWriter(ScriptWriter):
+ command_spec_class = WindowsCommandSpec
+
+ @classmethod
+ def get_writer(cls):
+ # for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
- return cls.best()
-
- @classmethod
- def best(cls):
- """
- Select the best ScriptWriter suitable for Windows
- """
- writer_lookup = dict(
- executable=WindowsExecutableLauncherWriter,
- natural=cls,
- )
- # for compatibility, use the executable launcher by default
- launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
- return writer_lookup[launcher]
-
- @classmethod
- def _get_script_args(cls, type_, name, header, script_text):
- "For Windows, add a .py extension"
- ext = dict(console='.pya', gui='.pyw')[type_]
- if ext not in os.environ['PATHEXT'].lower().split(';'):
+ return cls.best()
+
+ @classmethod
+ def best(cls):
+ """
+ Select the best ScriptWriter suitable for Windows
+ """
+ writer_lookup = dict(
+ executable=WindowsExecutableLauncherWriter,
+ natural=cls,
+ )
+ # for compatibility, use the executable launcher by default
+ launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
+ return writer_lookup[launcher]
+
+ @classmethod
+ def _get_script_args(cls, type_, name, header, script_text):
+ "For Windows, add a .py extension"
+ ext = dict(console='.pya', gui='.pyw')[type_]
+ if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
- old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
- old.remove(ext)
- header = cls._adjust_header(type_, header)
- blockers = [name + x for x in old]
- yield name + ext, header + script_text, 't', blockers
-
- @classmethod
- def _adjust_header(cls, type_, orig_header):
- """
+ old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
+ old.remove(ext)
+ header = cls._adjust_header(type_, header)
+ blockers = [name + x for x in old]
+ yield name + ext, header + script_text, 't', blockers
+
+ @classmethod
+ def _adjust_header(cls, type_, orig_header):
+ """
Make sure 'pythonw' is used for gui and 'python' is used for
- console (regardless of what sys.executable is).
- """
- pattern = 'pythonw.exe'
- repl = 'python.exe'
- if type_ == 'gui':
- pattern, repl = repl, pattern
- pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
- new_header = pattern_ob.sub(string=orig_header, repl=repl)
- return new_header if cls._use_header(new_header) else orig_header
-
- @staticmethod
- def _use_header(new_header):
- """
- Should _adjust_header use the replaced header?
-
- On non-windows systems, always use. On
- Windows systems, only use the replaced header if it resolves
- to an executable on the system.
- """
- clean_header = new_header[2:-1].strip('"')
- return sys.platform != 'win32' or find_executable(clean_header)
-
-
-class WindowsExecutableLauncherWriter(WindowsScriptWriter):
- @classmethod
- def _get_script_args(cls, type_, name, header, script_text):
- """
- For Windows, add a .py extension and an .exe launcher
- """
- if type_ == 'gui':
- launcher_type = 'gui'
- ext = '-script.pyw'
- old = ['.pyw']
- else:
- launcher_type = 'cli'
- ext = '-script.py'
- old = ['.py', '.pyc', '.pyo']
- hdr = cls._adjust_header(type_, header)
- blockers = [name + x for x in old]
- yield (name + ext, hdr + script_text, 't', blockers)
- yield (
- name + '.exe', get_win_launcher(launcher_type),
- 'b' # write in binary mode
- )
- if not is_64bit():
- # install a manifest for the launcher to prevent Windows
- # from detecting it as an installer (which it will for
- # launchers like easy_install.exe). Consider only
- # adding a manifest for launchers detected as installers.
- # See Distribute #143 for details.
- m_name = name + '.exe.manifest'
- yield (m_name, load_launcher_manifest(name), 't')
-
-
-# for backward-compatibility
-get_script_args = ScriptWriter.get_script_args
-get_script_header = ScriptWriter.get_script_header
-
-
-def get_win_launcher(type):
- """
- Load the Windows launcher (executable) suitable for launching a script.
-
- `type` should be either 'cli' or 'gui'
-
- Returns the executable as a byte string.
- """
- launcher_fn = '%s.exe' % type
- if is_64bit():
+ console (regardless of what sys.executable is).
+ """
+ pattern = 'pythonw.exe'
+ repl = 'python.exe'
+ if type_ == 'gui':
+ pattern, repl = repl, pattern
+ pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
+ new_header = pattern_ob.sub(string=orig_header, repl=repl)
+ return new_header if cls._use_header(new_header) else orig_header
+
+ @staticmethod
+ def _use_header(new_header):
+ """
+ Should _adjust_header use the replaced header?
+
+ On non-windows systems, always use. On
+ Windows systems, only use the replaced header if it resolves
+ to an executable on the system.
+ """
+ clean_header = new_header[2:-1].strip('"')
+ return sys.platform != 'win32' or find_executable(clean_header)
+
+
+class WindowsExecutableLauncherWriter(WindowsScriptWriter):
+ @classmethod
+ def _get_script_args(cls, type_, name, header, script_text):
+ """
+ For Windows, add a .py extension and an .exe launcher
+ """
+ if type_ == 'gui':
+ launcher_type = 'gui'
+ ext = '-script.pyw'
+ old = ['.pyw']
+ else:
+ launcher_type = 'cli'
+ ext = '-script.py'
+ old = ['.py', '.pyc', '.pyo']
+ hdr = cls._adjust_header(type_, header)
+ blockers = [name + x for x in old]
+ yield (name + ext, hdr + script_text, 't', blockers)
+ yield (
+ name + '.exe', get_win_launcher(launcher_type),
+ 'b' # write in binary mode
+ )
+ if not is_64bit():
+ # install a manifest for the launcher to prevent Windows
+ # from detecting it as an installer (which it will for
+ # launchers like easy_install.exe). Consider only
+ # adding a manifest for launchers detected as installers.
+ # See Distribute #143 for details.
+ m_name = name + '.exe.manifest'
+ yield (m_name, load_launcher_manifest(name), 't')
+
+
+# for backward-compatibility
+get_script_args = ScriptWriter.get_script_args
+get_script_header = ScriptWriter.get_script_header
+
+
+def get_win_launcher(type):
+ """
+ Load the Windows launcher (executable) suitable for launching a script.
+
+ `type` should be either 'cli' or 'gui'
+
+ Returns the executable as a byte string.
+ """
+ launcher_fn = '%s.exe' % type
+ if is_64bit():
if get_platform() == "win-arm64":
launcher_fn = launcher_fn.replace(".", "-arm64.")
else:
launcher_fn = launcher_fn.replace(".", "-64.")
- else:
- launcher_fn = launcher_fn.replace(".", "-32.")
- return resource_string('setuptools', launcher_fn)
-
-
-def load_launcher_manifest(name):
- manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
+ else:
+ launcher_fn = launcher_fn.replace(".", "-32.")
+ return resource_string('setuptools', launcher_fn)
+
+
+def load_launcher_manifest(name):
+ manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
return manifest.decode('utf-8') % vars()
-
-
-def rmtree(path, ignore_errors=False, onerror=auto_chmod):
+
+
+def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
-
-
-def current_umask():
- tmp = os.umask(0o022)
- os.umask(tmp)
- return tmp
-
-
+
+
+def current_umask():
+ tmp = os.umask(0o022)
+ os.umask(tmp)
+ return tmp
+
+
class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
"""
Warning for EasyInstall deprecations, bypassing suppression.
diff --git a/contrib/python/setuptools/py3/setuptools/command/egg_info.py b/contrib/python/setuptools/py3/setuptools/command/egg_info.py
index 2f2f768fef..f2210292e3 100644
--- a/contrib/python/setuptools/py3/setuptools/command/egg_info.py
+++ b/contrib/python/setuptools/py3/setuptools/command/egg_info.py
@@ -1,36 +1,36 @@
-"""setuptools.command.egg_info
-
-Create a distribution's .egg-info directory and contents"""
-
-from distutils.filelist import FileList as _FileList
+"""setuptools.command.egg_info
+
+Create a distribution's .egg-info directory and contents"""
+
+from distutils.filelist import FileList as _FileList
from distutils.errors import DistutilsInternalError
-from distutils.util import convert_path
-from distutils import log
-import distutils.errors
-import distutils.filelist
+from distutils.util import convert_path
+from distutils import log
+import distutils.errors
+import distutils.filelist
import functools
-import os
-import re
-import sys
-import io
-import warnings
-import time
+import os
+import re
+import sys
+import io
+import warnings
+import time
import collections
-
-from setuptools import Command
-from setuptools.command.sdist import sdist
-from setuptools.command.sdist import walk_revctrl
-from setuptools.command.setopt import edit_config
-from setuptools.command import bdist_egg
-from pkg_resources import (
- parse_requirements, safe_name, parse_version,
- safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
-import setuptools.unicode_utils as unicode_utils
+
+from setuptools import Command
+from setuptools.command.sdist import sdist
+from setuptools.command.sdist import walk_revctrl
+from setuptools.command.setopt import edit_config
+from setuptools.command import bdist_egg
+from pkg_resources import (
+ parse_requirements, safe_name, parse_version,
+ safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
+import setuptools.unicode_utils as unicode_utils
from setuptools.glob import glob
-
+
from setuptools.extern import packaging
from setuptools import SetuptoolsDeprecationWarning
-
+
def translate_pattern(glob): # noqa: C901 # is too complex (14) # FIXME
"""
@@ -40,7 +40,7 @@ def translate_pattern(glob): # noqa: C901 # is too complex (14) # FIXME
directories.
"""
pat = ''
-
+
# This will split on '/' within [character classes]. This is deliberate.
chunks = glob.split(os.path.sep)
@@ -147,28 +147,28 @@ class InfoCommon:
class egg_info(InfoCommon, Command):
- description = "create a distribution's .egg-info directory"
-
- user_options = [
- ('egg-base=', 'e', "directory containing .egg-info directories"
- " (default: top of the source tree)"),
- ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
- ('tag-build=', 'b', "Specify explicit tag to add to version number"),
- ('no-date', 'D', "Don't include date stamp [default]"),
- ]
-
+ description = "create a distribution's .egg-info directory"
+
+ user_options = [
+ ('egg-base=', 'e', "directory containing .egg-info directories"
+ " (default: top of the source tree)"),
+ ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
+ ('tag-build=', 'b', "Specify explicit tag to add to version number"),
+ ('no-date', 'D', "Don't include date stamp [default]"),
+ ]
+
boolean_options = ['tag-date']
negative_opt = {
'no-date': 'tag-date',
}
-
- def initialize_options(self):
+
+ def initialize_options(self):
self.egg_base = None
- self.egg_name = None
+ self.egg_name = None
self.egg_info = None
- self.egg_version = None
- self.broken_egg_info = False
-
+ self.egg_version = None
+ self.broken_egg_info = False
+
####################################
# allow the 'tag_svn_revision' to be detected and
# set, supporting sdists built on older Setuptools.
@@ -181,7 +181,7 @@ class egg_info(InfoCommon, Command):
pass
####################################
- def save_version_info(self, filename):
+ def save_version_info(self, filename):
"""
Materialize the value of date into the
build tag. Install build keys in a deterministic order
@@ -193,138 +193,138 @@ class egg_info(InfoCommon, Command):
egg_info['tag_build'] = self.tags()
egg_info['tag_date'] = 0
edit_config(filename, dict(egg_info=egg_info))
-
- def finalize_options(self):
+
+ def finalize_options(self):
# Note: we need to capture the current value returned
# by `self.tagged_version()`, so we can later update
# `self.distribution.metadata.version` without
# repercussions.
self.egg_name = self.name
- self.egg_version = self.tagged_version()
- parsed_version = parse_version(self.egg_version)
-
- try:
- is_version = isinstance(parsed_version, packaging.version.Version)
- spec = (
- "%s==%s" if is_version else "%s===%s"
- )
- list(
- parse_requirements(spec % (self.egg_name, self.egg_version))
- )
+ self.egg_version = self.tagged_version()
+ parsed_version = parse_version(self.egg_version)
+
+ try:
+ is_version = isinstance(parsed_version, packaging.version.Version)
+ spec = (
+ "%s==%s" if is_version else "%s===%s"
+ )
+ list(
+ parse_requirements(spec % (self.egg_name, self.egg_version))
+ )
except ValueError as e:
- raise distutils.errors.DistutilsOptionError(
- "Invalid distribution name or version syntax: %s-%s" %
- (self.egg_name, self.egg_version)
+ raise distutils.errors.DistutilsOptionError(
+ "Invalid distribution name or version syntax: %s-%s" %
+ (self.egg_name, self.egg_version)
) from e
-
- if self.egg_base is None:
- dirs = self.distribution.package_dir
- self.egg_base = (dirs or {}).get('', os.curdir)
-
- self.ensure_dirname('egg_base')
- self.egg_info = to_filename(self.egg_name) + '.egg-info'
- if self.egg_base != os.curdir:
- self.egg_info = os.path.join(self.egg_base, self.egg_info)
- if '-' in self.egg_name:
- self.check_broken_egg_info()
-
- # Set package version for the benefit of dumber commands
- # (e.g. sdist, bdist_wininst, etc.)
- #
- self.distribution.metadata.version = self.egg_version
-
- # If we bootstrapped around the lack of a PKG-INFO, as might be the
- # case in a fresh checkout, make sure that any special tags get added
- # to the version info
- #
- pd = self.distribution._patched_dist
- if pd is not None and pd.key == self.egg_name.lower():
- pd._version = self.egg_version
- pd._parsed_version = parse_version(self.egg_version)
- self.distribution._patched_dist = None
-
- def write_or_delete_file(self, what, filename, data, force=False):
- """Write `data` to `filename` or delete if empty
-
- If `data` is non-empty, this routine is the same as ``write_file()``.
- If `data` is empty but not ``None``, this is the same as calling
- ``delete_file(filename)`. If `data` is ``None``, then this is a no-op
- unless `filename` exists, in which case a warning is issued about the
- orphaned file (if `force` is false), or deleted (if `force` is true).
- """
- if data:
- self.write_file(what, filename, data)
- elif os.path.exists(filename):
- if data is None and not force:
- log.warn(
- "%s not set in setup(), but %s exists", what, filename
- )
- return
- else:
- self.delete_file(filename)
-
- def write_file(self, what, filename, data):
- """Write `data` to `filename` (if not a dry run) after announcing it
-
- `what` is used in a log message to identify what is being written
- to the file.
- """
- log.info("writing %s to %s", what, filename)
+
+ if self.egg_base is None:
+ dirs = self.distribution.package_dir
+ self.egg_base = (dirs or {}).get('', os.curdir)
+
+ self.ensure_dirname('egg_base')
+ self.egg_info = to_filename(self.egg_name) + '.egg-info'
+ if self.egg_base != os.curdir:
+ self.egg_info = os.path.join(self.egg_base, self.egg_info)
+ if '-' in self.egg_name:
+ self.check_broken_egg_info()
+
+ # Set package version for the benefit of dumber commands
+ # (e.g. sdist, bdist_wininst, etc.)
+ #
+ self.distribution.metadata.version = self.egg_version
+
+ # If we bootstrapped around the lack of a PKG-INFO, as might be the
+ # case in a fresh checkout, make sure that any special tags get added
+ # to the version info
+ #
+ pd = self.distribution._patched_dist
+ if pd is not None and pd.key == self.egg_name.lower():
+ pd._version = self.egg_version
+ pd._parsed_version = parse_version(self.egg_version)
+ self.distribution._patched_dist = None
+
+ def write_or_delete_file(self, what, filename, data, force=False):
+ """Write `data` to `filename` or delete if empty
+
+ If `data` is non-empty, this routine is the same as ``write_file()``.
+ If `data` is empty but not ``None``, this is the same as calling
+ ``delete_file(filename)`. If `data` is ``None``, then this is a no-op
+ unless `filename` exists, in which case a warning is issued about the
+ orphaned file (if `force` is false), or deleted (if `force` is true).
+ """
+ if data:
+ self.write_file(what, filename, data)
+ elif os.path.exists(filename):
+ if data is None and not force:
+ log.warn(
+ "%s not set in setup(), but %s exists", what, filename
+ )
+ return
+ else:
+ self.delete_file(filename)
+
+ def write_file(self, what, filename, data):
+ """Write `data` to `filename` (if not a dry run) after announcing it
+
+ `what` is used in a log message to identify what is being written
+ to the file.
+ """
+ log.info("writing %s to %s", what, filename)
data = data.encode("utf-8")
- if not self.dry_run:
- f = open(filename, 'wb')
- f.write(data)
- f.close()
-
- def delete_file(self, filename):
- """Delete `filename` (if not a dry run) after announcing it"""
- log.info("deleting %s", filename)
- if not self.dry_run:
- os.unlink(filename)
-
- def run(self):
- self.mkpath(self.egg_info)
+ if not self.dry_run:
+ f = open(filename, 'wb')
+ f.write(data)
+ f.close()
+
+ def delete_file(self, filename):
+ """Delete `filename` (if not a dry run) after announcing it"""
+ log.info("deleting %s", filename)
+ if not self.dry_run:
+ os.unlink(filename)
+
+ def run(self):
+ self.mkpath(self.egg_info)
os.utime(self.egg_info, None)
- installer = self.distribution.fetch_build_egg
- for ep in iter_entry_points('egg_info.writers'):
- ep.require(installer=installer)
- writer = ep.resolve()
- writer(self, ep.name, os.path.join(self.egg_info, ep.name))
-
- # Get rid of native_libs.txt if it was put there by older bdist_egg
- nl = os.path.join(self.egg_info, "native_libs.txt")
- if os.path.exists(nl):
- self.delete_file(nl)
-
- self.find_sources()
-
- def find_sources(self):
- """Generate SOURCES.txt manifest file"""
- manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
- mm = manifest_maker(self.distribution)
- mm.manifest = manifest_filename
- mm.run()
- self.filelist = mm.filelist
-
- def check_broken_egg_info(self):
- bei = self.egg_name + '.egg-info'
- if self.egg_base != os.curdir:
- bei = os.path.join(self.egg_base, bei)
- if os.path.exists(bei):
- log.warn(
- "-" * 78 + '\n'
- "Note: Your current .egg-info directory has a '-' in its name;"
- '\nthis will not work correctly with "setup.py develop".\n\n'
- 'Please rename %s to %s to correct this problem.\n' + '-' * 78,
- bei, self.egg_info
- )
- self.broken_egg_info = self.egg_info
- self.egg_info = bei # make it work for now
-
-
-class FileList(_FileList):
+ installer = self.distribution.fetch_build_egg
+ for ep in iter_entry_points('egg_info.writers'):
+ ep.require(installer=installer)
+ writer = ep.resolve()
+ writer(self, ep.name, os.path.join(self.egg_info, ep.name))
+
+ # Get rid of native_libs.txt if it was put there by older bdist_egg
+ nl = os.path.join(self.egg_info, "native_libs.txt")
+ if os.path.exists(nl):
+ self.delete_file(nl)
+
+ self.find_sources()
+
+ def find_sources(self):
+ """Generate SOURCES.txt manifest file"""
+ manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
+ mm = manifest_maker(self.distribution)
+ mm.manifest = manifest_filename
+ mm.run()
+ self.filelist = mm.filelist
+
+ def check_broken_egg_info(self):
+ bei = self.egg_name + '.egg-info'
+ if self.egg_base != os.curdir:
+ bei = os.path.join(self.egg_base, bei)
+ if os.path.exists(bei):
+ log.warn(
+ "-" * 78 + '\n'
+ "Note: Your current .egg-info directory has a '-' in its name;"
+ '\nthis will not work correctly with "setup.py develop".\n\n'
+ 'Please rename %s to %s to correct this problem.\n' + '-' * 78,
+ bei, self.egg_info
+ )
+ self.broken_egg_info = self.egg_info
+ self.egg_info = bei # make it work for now
+
+
+class FileList(_FileList):
# Implementations of the various MANIFEST.in commands
-
+
def process_template_line(self, line):
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
@@ -477,96 +477,96 @@ class FileList(_FileList):
match = translate_pattern(os.path.join('**', pattern))
return self._remove_files(match.match)
- def append(self, item):
- if item.endswith('\r'): # Fix older sdists built on Windows
- item = item[:-1]
- path = convert_path(item)
-
- if self._safe_path(path):
- self.files.append(path)
-
- def extend(self, paths):
- self.files.extend(filter(self._safe_path, paths))
-
- def _repair(self):
- """
- Replace self.files with only safe paths
-
- Because some owners of FileList manipulate the underlying
- ``files`` attribute directly, this method must be called to
- repair those paths.
- """
- self.files = list(filter(self._safe_path, self.files))
-
- def _safe_path(self, path):
- enc_warn = "'%s' not %s encodable -- skipping"
-
- # To avoid accidental trans-codings errors, first to unicode
- u_path = unicode_utils.filesys_decode(path)
- if u_path is None:
- log.warn("'%s' in unexpected encoding -- skipping" % path)
- return False
-
- # Must ensure utf-8 encodability
- utf8_path = unicode_utils.try_encode(u_path, "utf-8")
- if utf8_path is None:
- log.warn(enc_warn, path, 'utf-8')
- return False
-
- try:
- # accept is either way checks out
- if os.path.exists(u_path) or os.path.exists(utf8_path):
- return True
- # this will catch any encode errors decoding u_path
- except UnicodeEncodeError:
- log.warn(enc_warn, path, sys.getfilesystemencoding())
-
-
-class manifest_maker(sdist):
- template = "MANIFEST.in"
-
- def initialize_options(self):
- self.use_defaults = 1
- self.prune = 1
- self.manifest_only = 1
- self.force_manifest = 1
-
- def finalize_options(self):
- pass
-
- def run(self):
- self.filelist = FileList()
- if not os.path.exists(self.manifest):
- self.write_manifest() # it must exist so it'll get in the list
- self.add_defaults()
- if os.path.exists(self.template):
- self.read_template()
+ def append(self, item):
+ if item.endswith('\r'): # Fix older sdists built on Windows
+ item = item[:-1]
+ path = convert_path(item)
+
+ if self._safe_path(path):
+ self.files.append(path)
+
+ def extend(self, paths):
+ self.files.extend(filter(self._safe_path, paths))
+
+ def _repair(self):
+ """
+ Replace self.files with only safe paths
+
+ Because some owners of FileList manipulate the underlying
+ ``files`` attribute directly, this method must be called to
+ repair those paths.
+ """
+ self.files = list(filter(self._safe_path, self.files))
+
+ def _safe_path(self, path):
+ enc_warn = "'%s' not %s encodable -- skipping"
+
+ # To avoid accidental trans-codings errors, first to unicode
+ u_path = unicode_utils.filesys_decode(path)
+ if u_path is None:
+ log.warn("'%s' in unexpected encoding -- skipping" % path)
+ return False
+
+ # Must ensure utf-8 encodability
+ utf8_path = unicode_utils.try_encode(u_path, "utf-8")
+ if utf8_path is None:
+ log.warn(enc_warn, path, 'utf-8')
+ return False
+
+ try:
+ # accept is either way checks out
+ if os.path.exists(u_path) or os.path.exists(utf8_path):
+ return True
+ # this will catch any encode errors decoding u_path
+ except UnicodeEncodeError:
+ log.warn(enc_warn, path, sys.getfilesystemencoding())
+
+
+class manifest_maker(sdist):
+ template = "MANIFEST.in"
+
+ def initialize_options(self):
+ self.use_defaults = 1
+ self.prune = 1
+ self.manifest_only = 1
+ self.force_manifest = 1
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ self.filelist = FileList()
+ if not os.path.exists(self.manifest):
+ self.write_manifest() # it must exist so it'll get in the list
+ self.add_defaults()
+ if os.path.exists(self.template):
+ self.read_template()
self.add_license_files()
- self.prune_file_list()
- self.filelist.sort()
- self.filelist.remove_duplicates()
- self.write_manifest()
-
- def _manifest_normalize(self, path):
- path = unicode_utils.filesys_decode(path)
- return path.replace(os.sep, '/')
-
- def write_manifest(self):
- """
- Write the file list in 'self.filelist' to the manifest file
- named by 'self.manifest'.
- """
- self.filelist._repair()
-
- # Now _repairs should encodability, but not unicode
- files = [self._manifest_normalize(f) for f in self.filelist.files]
- msg = "writing manifest file '%s'" % self.manifest
- self.execute(write_file, (self.manifest, files), msg)
-
+ self.prune_file_list()
+ self.filelist.sort()
+ self.filelist.remove_duplicates()
+ self.write_manifest()
+
+ def _manifest_normalize(self, path):
+ path = unicode_utils.filesys_decode(path)
+ return path.replace(os.sep, '/')
+
+ def write_manifest(self):
+ """
+ Write the file list in 'self.filelist' to the manifest file
+ named by 'self.manifest'.
+ """
+ self.filelist._repair()
+
+ # Now _repairs should encodability, but not unicode
+ files = [self._manifest_normalize(f) for f in self.filelist.files]
+ msg = "writing manifest file '%s'" % self.manifest
+ self.execute(write_file, (self.manifest, files), msg)
+
def warn(self, msg):
if not self._should_suppress_warning(msg):
- sdist.warn(self, msg)
-
+ sdist.warn(self, msg)
+
@staticmethod
def _should_suppress_warning(msg):
"""
@@ -574,24 +574,24 @@ class manifest_maker(sdist):
"""
return re.match(r"standard file .*not found", msg)
- def add_defaults(self):
- sdist.add_defaults(self)
- self.filelist.append(self.template)
- self.filelist.append(self.manifest)
- rcfiles = list(walk_revctrl())
- if rcfiles:
- self.filelist.extend(rcfiles)
- elif os.path.exists(self.manifest):
- self.read_manifest()
+ def add_defaults(self):
+ sdist.add_defaults(self)
+ self.filelist.append(self.template)
+ self.filelist.append(self.manifest)
+ rcfiles = list(walk_revctrl())
+ if rcfiles:
+ self.filelist.extend(rcfiles)
+ elif os.path.exists(self.manifest):
+ self.read_manifest()
if os.path.exists("setup.py"):
# setup.py should be included by default, even if it's not
# the script called to create the sdist
self.filelist.append("setup.py")
- ei_cmd = self.get_finalized_command('egg_info')
+ ei_cmd = self.get_finalized_command('egg_info')
self.filelist.graft(ei_cmd.egg_info)
-
+
def add_license_files(self):
license_files = self.distribution.metadata.license_files or []
for lf in license_files:
@@ -599,22 +599,22 @@ class manifest_maker(sdist):
pass
self.filelist.extend(license_files)
- def prune_file_list(self):
- build = self.get_finalized_command('build')
- base_dir = self.distribution.get_fullname()
+ def prune_file_list(self):
+ build = self.get_finalized_command('build')
+ base_dir = self.distribution.get_fullname()
self.filelist.prune(build.build_base)
self.filelist.prune(base_dir)
- sep = re.escape(os.sep)
- self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
- is_regex=1)
-
+ sep = re.escape(os.sep)
+ self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
+ is_regex=1)
+
def _safe_data_files(self, build_py):
"""
The parent class implementation of this method
(``sdist``) will try to include data files, which
might cause recursion problems when
``include_package_data=True``.
-
+
Therefore, avoid triggering any attempt of
analyzing/building the manifest again.
"""
@@ -630,125 +630,125 @@ class manifest_maker(sdist):
return build_py.get_data_files()
-def write_file(filename, contents):
- """Create a file with the specified name and write 'contents' (a
- sequence of strings without line terminators) to it.
- """
- contents = "\n".join(contents)
-
- # assuming the contents has been vetted for utf-8 encoding
- contents = contents.encode("utf-8")
-
- with open(filename, "wb") as f: # always write POSIX-style manifest
- f.write(contents)
-
-
-def write_pkg_info(cmd, basename, filename):
- log.info("writing %s", filename)
- if not cmd.dry_run:
- metadata = cmd.distribution.metadata
- metadata.version, oldver = cmd.egg_version, metadata.version
- metadata.name, oldname = cmd.egg_name, metadata.name
-
- try:
- # write unescaped data to PKG-INFO, so older pkg_resources
- # can still parse it
- metadata.write_pkg_info(cmd.egg_info)
- finally:
- metadata.name, metadata.version = oldname, oldver
-
- safe = getattr(cmd.distribution, 'zip_safe', None)
-
- bdist_egg.write_safety_flag(cmd.egg_info, safe)
-
-
-def warn_depends_obsolete(cmd, basename, filename):
- if os.path.exists(filename):
- log.warn(
- "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
- "Use the install_requires/extras_require setup() args instead."
- )
-
-
-def _write_requirements(stream, reqs):
- lines = yield_lines(reqs or ())
+def write_file(filename, contents):
+ """Create a file with the specified name and write 'contents' (a
+ sequence of strings without line terminators) to it.
+ """
+ contents = "\n".join(contents)
+
+ # assuming the contents has been vetted for utf-8 encoding
+ contents = contents.encode("utf-8")
+
+ with open(filename, "wb") as f: # always write POSIX-style manifest
+ f.write(contents)
+
+
+def write_pkg_info(cmd, basename, filename):
+ log.info("writing %s", filename)
+ if not cmd.dry_run:
+ metadata = cmd.distribution.metadata
+ metadata.version, oldver = cmd.egg_version, metadata.version
+ metadata.name, oldname = cmd.egg_name, metadata.name
+
+ try:
+ # write unescaped data to PKG-INFO, so older pkg_resources
+ # can still parse it
+ metadata.write_pkg_info(cmd.egg_info)
+ finally:
+ metadata.name, metadata.version = oldname, oldver
+
+ safe = getattr(cmd.distribution, 'zip_safe', None)
+
+ bdist_egg.write_safety_flag(cmd.egg_info, safe)
+
+
+def warn_depends_obsolete(cmd, basename, filename):
+ if os.path.exists(filename):
+ log.warn(
+ "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
+ "Use the install_requires/extras_require setup() args instead."
+ )
+
+
+def _write_requirements(stream, reqs):
+ lines = yield_lines(reqs or ())
def append_cr(line):
return line + '\n'
- lines = map(append_cr, lines)
- stream.writelines(lines)
-
-
-def write_requirements(cmd, basename, filename):
- dist = cmd.distribution
+ lines = map(append_cr, lines)
+ stream.writelines(lines)
+
+
+def write_requirements(cmd, basename, filename):
+ dist = cmd.distribution
data = io.StringIO()
- _write_requirements(data, dist.install_requires)
- extras_require = dist.extras_require or {}
- for extra in sorted(extras_require):
- data.write('\n[{extra}]\n'.format(**vars()))
- _write_requirements(data, extras_require[extra])
- cmd.write_or_delete_file("requirements", filename, data.getvalue())
-
-
-def write_setup_requirements(cmd, basename, filename):
+ _write_requirements(data, dist.install_requires)
+ extras_require = dist.extras_require or {}
+ for extra in sorted(extras_require):
+ data.write('\n[{extra}]\n'.format(**vars()))
+ _write_requirements(data, extras_require[extra])
+ cmd.write_or_delete_file("requirements", filename, data.getvalue())
+
+
+def write_setup_requirements(cmd, basename, filename):
data = io.StringIO()
- _write_requirements(data, cmd.distribution.setup_requires)
- cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
-
-
-def write_toplevel_names(cmd, basename, filename):
- pkgs = dict.fromkeys(
- [
- k.split('.', 1)[0]
- for k in cmd.distribution.iter_distribution_names()
- ]
- )
- cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
-
-
-def overwrite_arg(cmd, basename, filename):
- write_arg(cmd, basename, filename, True)
-
-
-def write_arg(cmd, basename, filename, force=False):
- argname = os.path.splitext(basename)[0]
- value = getattr(cmd.distribution, argname, None)
- if value is not None:
- value = '\n'.join(value) + '\n'
- cmd.write_or_delete_file(argname, filename, value, force)
-
-
-def write_entries(cmd, basename, filename):
- ep = cmd.distribution.entry_points
-
+ _write_requirements(data, cmd.distribution.setup_requires)
+ cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
+
+
+def write_toplevel_names(cmd, basename, filename):
+ pkgs = dict.fromkeys(
+ [
+ k.split('.', 1)[0]
+ for k in cmd.distribution.iter_distribution_names()
+ ]
+ )
+ cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
+
+
+def overwrite_arg(cmd, basename, filename):
+ write_arg(cmd, basename, filename, True)
+
+
+def write_arg(cmd, basename, filename, force=False):
+ argname = os.path.splitext(basename)[0]
+ value = getattr(cmd.distribution, argname, None)
+ if value is not None:
+ value = '\n'.join(value) + '\n'
+ cmd.write_or_delete_file(argname, filename, value, force)
+
+
+def write_entries(cmd, basename, filename):
+ ep = cmd.distribution.entry_points
+
if isinstance(ep, str) or ep is None:
- data = ep
- elif ep is not None:
- data = []
- for section, contents in sorted(ep.items()):
+ data = ep
+ elif ep is not None:
+ data = []
+ for section, contents in sorted(ep.items()):
if not isinstance(contents, str):
- contents = EntryPoint.parse_group(section, contents)
- contents = '\n'.join(sorted(map(str, contents.values())))
- data.append('[%s]\n%s\n\n' % (section, contents))
- data = ''.join(data)
-
- cmd.write_or_delete_file('entry points', filename, data, True)
-
-
-def get_pkg_info_revision():
- """
- Get a -r### off of PKG-INFO Version in case this is an sdist of
- a subversion revision.
- """
+ contents = EntryPoint.parse_group(section, contents)
+ contents = '\n'.join(sorted(map(str, contents.values())))
+ data.append('[%s]\n%s\n\n' % (section, contents))
+ data = ''.join(data)
+
+ cmd.write_or_delete_file('entry points', filename, data, True)
+
+
+def get_pkg_info_revision():
+ """
+ Get a -r### off of PKG-INFO Version in case this is an sdist of
+ a subversion revision.
+ """
warnings.warn(
"get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning)
- if os.path.exists('PKG-INFO'):
- with io.open('PKG-INFO') as f:
- for line in f:
- match = re.match(r"Version:.*-r(\d+)\s*$", line)
- if match:
- return int(match.group(1))
- return 0
+ if os.path.exists('PKG-INFO'):
+ with io.open('PKG-INFO') as f:
+ for line in f:
+ match = re.match(r"Version:.*-r(\d+)\s*$", line)
+ if match:
+ return int(match.group(1))
+ return 0
class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning):
diff --git a/contrib/python/setuptools/py3/setuptools/command/install.py b/contrib/python/setuptools/py3/setuptools/command/install.py
index 0997eb482f..35e54d2043 100644
--- a/contrib/python/setuptools/py3/setuptools/command/install.py
+++ b/contrib/python/setuptools/py3/setuptools/command/install.py
@@ -1,35 +1,35 @@
-from distutils.errors import DistutilsArgError
-import inspect
-import glob
-import warnings
-import platform
-import distutils.command.install as orig
-
-import setuptools
-
-# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
+from distutils.errors import DistutilsArgError
+import inspect
+import glob
+import warnings
+import platform
+import distutils.command.install as orig
+
+import setuptools
+
+# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
# now. See https://github.com/pypa/setuptools/issues/199/
-_install = orig.install
-
-
-class install(orig.install):
- """Use easy_install to install the package, w/dependencies"""
-
- user_options = orig.install.user_options + [
- ('old-and-unmanageable', None, "Try not to use this!"),
- ('single-version-externally-managed', None,
- "used by system package builders to create 'flat' eggs"),
- ]
- boolean_options = orig.install.boolean_options + [
- 'old-and-unmanageable', 'single-version-externally-managed',
- ]
- new_commands = [
- ('install_egg_info', lambda self: True),
- ('install_scripts', lambda self: True),
- ]
- _nc = dict(new_commands)
-
- def initialize_options(self):
+_install = orig.install
+
+
+class install(orig.install):
+ """Use easy_install to install the package, w/dependencies"""
+
+ user_options = orig.install.user_options + [
+ ('old-and-unmanageable', None, "Try not to use this!"),
+ ('single-version-externally-managed', None,
+ "used by system package builders to create 'flat' eggs"),
+ ]
+ boolean_options = orig.install.boolean_options + [
+ 'old-and-unmanageable', 'single-version-externally-managed',
+ ]
+ new_commands = [
+ ('install_egg_info', lambda self: True),
+ ('install_scripts', lambda self: True),
+ ]
+ _nc = dict(new_commands)
+
+ def initialize_options(self):
warnings.warn(
"setup.py install is deprecated. "
@@ -37,96 +37,96 @@ class install(orig.install):
setuptools.SetuptoolsDeprecationWarning,
)
- orig.install.initialize_options(self)
- self.old_and_unmanageable = None
- self.single_version_externally_managed = None
-
- def finalize_options(self):
- orig.install.finalize_options(self)
- if self.root:
- self.single_version_externally_managed = True
- elif self.single_version_externally_managed:
- if not self.root and not self.record:
- raise DistutilsArgError(
- "You must specify --record or --root when building system"
- " packages"
- )
-
- def handle_extra_path(self):
- if self.root or self.single_version_externally_managed:
- # explicit backward-compatibility mode, allow extra_path to work
- return orig.install.handle_extra_path(self)
-
- # Ignore extra_path when installing an egg (or being run by another
- # command without --root or --single-version-externally-managed
- self.path_file = None
- self.extra_dirs = ''
-
- def run(self):
- # Explicit request for old-style install? Just do it
- if self.old_and_unmanageable or self.single_version_externally_managed:
- return orig.install.run(self)
-
- if not self._called_from_setup(inspect.currentframe()):
- # Run in backward-compatibility mode to support bdist_* commands.
- orig.install.run(self)
- else:
- self.do_egg_install()
-
- @staticmethod
- def _called_from_setup(run_frame):
- """
- Attempt to detect whether run() was called from setup() or by another
- command. If called by setup(), the parent caller will be the
- 'run_command' method in 'distutils.dist', and *its* caller will be
- the 'run_commands' method. If called any other way, the
- immediate caller *might* be 'run_command', but it won't have been
- called by 'run_commands'. Return True in that case or if a call stack
- is unavailable. Return False otherwise.
- """
- if run_frame is None:
- msg = "Call stack not available. bdist_* commands may fail."
- warnings.warn(msg)
- if platform.python_implementation() == 'IronPython':
- msg = "For best results, pass -X:Frames to enable call stack."
- warnings.warn(msg)
- return True
- res = inspect.getouterframes(run_frame)[2]
- caller, = res[:1]
- info = inspect.getframeinfo(caller)
- caller_module = caller.f_globals.get('__name__', '')
- return (
- caller_module == 'distutils.dist'
- and info.function == 'run_commands'
- )
-
- def do_egg_install(self):
-
- easy_install = self.distribution.get_command_class('easy_install')
-
- cmd = easy_install(
- self.distribution, args="x", root=self.root, record=self.record,
- )
- cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
- cmd.always_copy_from = '.' # make sure local-dir eggs get installed
-
- # pick up setup-dir .egg files only: no .egg-info
- cmd.package_index.scan(glob.glob('*.egg'))
-
- self.run_command('bdist_egg')
- args = [self.distribution.get_command_obj('bdist_egg').egg_output]
-
- if setuptools.bootstrap_install_from:
- # Bootstrap self-installation of setuptools
- args.insert(0, setuptools.bootstrap_install_from)
-
- cmd.args = args
+ orig.install.initialize_options(self)
+ self.old_and_unmanageable = None
+ self.single_version_externally_managed = None
+
+ def finalize_options(self):
+ orig.install.finalize_options(self)
+ if self.root:
+ self.single_version_externally_managed = True
+ elif self.single_version_externally_managed:
+ if not self.root and not self.record:
+ raise DistutilsArgError(
+ "You must specify --record or --root when building system"
+ " packages"
+ )
+
+ def handle_extra_path(self):
+ if self.root or self.single_version_externally_managed:
+ # explicit backward-compatibility mode, allow extra_path to work
+ return orig.install.handle_extra_path(self)
+
+ # Ignore extra_path when installing an egg (or being run by another
+ # command without --root or --single-version-externally-managed
+ self.path_file = None
+ self.extra_dirs = ''
+
+ def run(self):
+ # Explicit request for old-style install? Just do it
+ if self.old_and_unmanageable or self.single_version_externally_managed:
+ return orig.install.run(self)
+
+ if not self._called_from_setup(inspect.currentframe()):
+ # Run in backward-compatibility mode to support bdist_* commands.
+ orig.install.run(self)
+ else:
+ self.do_egg_install()
+
+ @staticmethod
+ def _called_from_setup(run_frame):
+ """
+ Attempt to detect whether run() was called from setup() or by another
+ command. If called by setup(), the parent caller will be the
+ 'run_command' method in 'distutils.dist', and *its* caller will be
+ the 'run_commands' method. If called any other way, the
+ immediate caller *might* be 'run_command', but it won't have been
+ called by 'run_commands'. Return True in that case or if a call stack
+ is unavailable. Return False otherwise.
+ """
+ if run_frame is None:
+ msg = "Call stack not available. bdist_* commands may fail."
+ warnings.warn(msg)
+ if platform.python_implementation() == 'IronPython':
+ msg = "For best results, pass -X:Frames to enable call stack."
+ warnings.warn(msg)
+ return True
+ res = inspect.getouterframes(run_frame)[2]
+ caller, = res[:1]
+ info = inspect.getframeinfo(caller)
+ caller_module = caller.f_globals.get('__name__', '')
+ return (
+ caller_module == 'distutils.dist'
+ and info.function == 'run_commands'
+ )
+
+ def do_egg_install(self):
+
+ easy_install = self.distribution.get_command_class('easy_install')
+
+ cmd = easy_install(
+ self.distribution, args="x", root=self.root, record=self.record,
+ )
+ cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
+ cmd.always_copy_from = '.' # make sure local-dir eggs get installed
+
+ # pick up setup-dir .egg files only: no .egg-info
+ cmd.package_index.scan(glob.glob('*.egg'))
+
+ self.run_command('bdist_egg')
+ args = [self.distribution.get_command_obj('bdist_egg').egg_output]
+
+ if setuptools.bootstrap_install_from:
+ # Bootstrap self-installation of setuptools
+ args.insert(0, setuptools.bootstrap_install_from)
+
+ cmd.args = args
cmd.run(show_deprecation=False)
- setuptools.bootstrap_install_from = None
-
-
-# XXX Python 3.1 doesn't see _nc if this is inside the class
-install.sub_commands = (
- [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
- install.new_commands
-)
+ setuptools.bootstrap_install_from = None
+
+
+# XXX Python 3.1 doesn't see _nc if this is inside the class
+install.sub_commands = (
+ [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
+ install.new_commands
+)
diff --git a/contrib/python/setuptools/py3/setuptools/command/install_egg_info.py b/contrib/python/setuptools/py3/setuptools/command/install_egg_info.py
index 10bdb832a9..edc4718b68 100644
--- a/contrib/python/setuptools/py3/setuptools/command/install_egg_info.py
+++ b/contrib/python/setuptools/py3/setuptools/command/install_egg_info.py
@@ -1,62 +1,62 @@
-from distutils import log, dir_util
-import os
-
-from setuptools import Command
+from distutils import log, dir_util
+import os
+
+from setuptools import Command
from setuptools import namespaces
-from setuptools.archive_util import unpack_archive
-import pkg_resources
-
-
+from setuptools.archive_util import unpack_archive
+import pkg_resources
+
+
class install_egg_info(namespaces.Installer, Command):
- """Install an .egg-info directory for the package"""
-
- description = "Install an .egg-info directory for the package"
-
- user_options = [
- ('install-dir=', 'd', "directory to install to"),
- ]
-
- def initialize_options(self):
- self.install_dir = None
-
- def finalize_options(self):
- self.set_undefined_options('install_lib',
- ('install_dir', 'install_dir'))
- ei_cmd = self.get_finalized_command("egg_info")
- basename = pkg_resources.Distribution(
- None, None, ei_cmd.egg_name, ei_cmd.egg_version
- ).egg_name() + '.egg-info'
- self.source = ei_cmd.egg_info
- self.target = os.path.join(self.install_dir, basename)
+ """Install an .egg-info directory for the package"""
+
+ description = "Install an .egg-info directory for the package"
+
+ user_options = [
+ ('install-dir=', 'd', "directory to install to"),
+ ]
+
+ def initialize_options(self):
+ self.install_dir = None
+
+ def finalize_options(self):
+ self.set_undefined_options('install_lib',
+ ('install_dir', 'install_dir'))
+ ei_cmd = self.get_finalized_command("egg_info")
+ basename = pkg_resources.Distribution(
+ None, None, ei_cmd.egg_name, ei_cmd.egg_version
+ ).egg_name() + '.egg-info'
+ self.source = ei_cmd.egg_info
+ self.target = os.path.join(self.install_dir, basename)
self.outputs = []
-
- def run(self):
- self.run_command('egg_info')
- if os.path.isdir(self.target) and not os.path.islink(self.target):
- dir_util.remove_tree(self.target, dry_run=self.dry_run)
- elif os.path.exists(self.target):
- self.execute(os.unlink, (self.target,), "Removing " + self.target)
- if not self.dry_run:
- pkg_resources.ensure_directory(self.target)
- self.execute(
- self.copytree, (), "Copying %s to %s" % (self.source, self.target)
- )
- self.install_namespaces()
-
- def get_outputs(self):
- return self.outputs
-
- def copytree(self):
- # Copy the .egg-info tree to site-packages
- def skimmer(src, dst):
- # filter out source-control directories; note that 'src' is always
- # a '/'-separated path, regardless of platform. 'dst' is a
- # platform-specific path.
- for skip in '.svn/', 'CVS/':
- if src.startswith(skip) or '/' + skip in src:
- return None
- self.outputs.append(dst)
- log.debug("Copying %s to %s", src, dst)
- return dst
-
- unpack_archive(self.source, self.target, skimmer)
+
+ def run(self):
+ self.run_command('egg_info')
+ if os.path.isdir(self.target) and not os.path.islink(self.target):
+ dir_util.remove_tree(self.target, dry_run=self.dry_run)
+ elif os.path.exists(self.target):
+ self.execute(os.unlink, (self.target,), "Removing " + self.target)
+ if not self.dry_run:
+ pkg_resources.ensure_directory(self.target)
+ self.execute(
+ self.copytree, (), "Copying %s to %s" % (self.source, self.target)
+ )
+ self.install_namespaces()
+
+ def get_outputs(self):
+ return self.outputs
+
+ def copytree(self):
+ # Copy the .egg-info tree to site-packages
+ def skimmer(src, dst):
+ # filter out source-control directories; note that 'src' is always
+ # a '/'-separated path, regardless of platform. 'dst' is a
+ # platform-specific path.
+ for skip in '.svn/', 'CVS/':
+ if src.startswith(skip) or '/' + skip in src:
+ return None
+ self.outputs.append(dst)
+ log.debug("Copying %s to %s", src, dst)
+ return dst
+
+ unpack_archive(self.source, self.target, skimmer)
diff --git a/contrib/python/setuptools/py3/setuptools/command/install_lib.py b/contrib/python/setuptools/py3/setuptools/command/install_lib.py
index 6b0cfa46d3..2e9d8757a5 100644
--- a/contrib/python/setuptools/py3/setuptools/command/install_lib.py
+++ b/contrib/python/setuptools/py3/setuptools/command/install_lib.py
@@ -1,122 +1,122 @@
-import os
+import os
import sys
-from itertools import product, starmap
-import distutils.command.install_lib as orig
-
-
-class install_lib(orig.install_lib):
- """Don't add compiled flags to filenames of non-Python files"""
-
- def run(self):
- self.build()
- outfiles = self.install()
- if outfiles is not None:
- # always compile, in case we have any extension stubs to deal with
- self.byte_compile(outfiles)
-
- def get_exclusions(self):
- """
- Return a collections.Sized collections.Container of paths to be
- excluded for single_version_externally_managed installations.
- """
- all_packages = (
- pkg
- for ns_pkg in self._get_SVEM_NSPs()
- for pkg in self._all_packages(ns_pkg)
- )
-
- excl_specs = product(all_packages, self._gen_exclusion_paths())
- return set(starmap(self._exclude_pkg_path, excl_specs))
-
- def _exclude_pkg_path(self, pkg, exclusion_path):
- """
- Given a package name and exclusion path within that package,
- compute the full exclusion path.
- """
- parts = pkg.split('.') + [exclusion_path]
- return os.path.join(self.install_dir, *parts)
-
- @staticmethod
- def _all_packages(pkg_name):
- """
- >>> list(install_lib._all_packages('foo.bar.baz'))
- ['foo.bar.baz', 'foo.bar', 'foo']
- """
- while pkg_name:
- yield pkg_name
- pkg_name, sep, child = pkg_name.rpartition('.')
-
- def _get_SVEM_NSPs(self):
- """
- Get namespace packages (list) but only for
- single_version_externally_managed installations and empty otherwise.
- """
- # TODO: is it necessary to short-circuit here? i.e. what's the cost
- # if get_finalized_command is called even when namespace_packages is
- # False?
- if not self.distribution.namespace_packages:
- return []
-
- install_cmd = self.get_finalized_command('install')
- svem = install_cmd.single_version_externally_managed
-
- return self.distribution.namespace_packages if svem else []
-
- @staticmethod
- def _gen_exclusion_paths():
- """
- Generate file paths to be excluded for namespace packages (bytecode
- cache files).
- """
- # always exclude the package module itself
- yield '__init__.py'
-
- yield '__init__.pyc'
- yield '__init__.pyo'
-
+from itertools import product, starmap
+import distutils.command.install_lib as orig
+
+
+class install_lib(orig.install_lib):
+ """Don't add compiled flags to filenames of non-Python files"""
+
+ def run(self):
+ self.build()
+ outfiles = self.install()
+ if outfiles is not None:
+ # always compile, in case we have any extension stubs to deal with
+ self.byte_compile(outfiles)
+
+ def get_exclusions(self):
+ """
+ Return a collections.Sized collections.Container of paths to be
+ excluded for single_version_externally_managed installations.
+ """
+ all_packages = (
+ pkg
+ for ns_pkg in self._get_SVEM_NSPs()
+ for pkg in self._all_packages(ns_pkg)
+ )
+
+ excl_specs = product(all_packages, self._gen_exclusion_paths())
+ return set(starmap(self._exclude_pkg_path, excl_specs))
+
+ def _exclude_pkg_path(self, pkg, exclusion_path):
+ """
+ Given a package name and exclusion path within that package,
+ compute the full exclusion path.
+ """
+ parts = pkg.split('.') + [exclusion_path]
+ return os.path.join(self.install_dir, *parts)
+
+ @staticmethod
+ def _all_packages(pkg_name):
+ """
+ >>> list(install_lib._all_packages('foo.bar.baz'))
+ ['foo.bar.baz', 'foo.bar', 'foo']
+ """
+ while pkg_name:
+ yield pkg_name
+ pkg_name, sep, child = pkg_name.rpartition('.')
+
+ def _get_SVEM_NSPs(self):
+ """
+ Get namespace packages (list) but only for
+ single_version_externally_managed installations and empty otherwise.
+ """
+ # TODO: is it necessary to short-circuit here? i.e. what's the cost
+ # if get_finalized_command is called even when namespace_packages is
+ # False?
+ if not self.distribution.namespace_packages:
+ return []
+
+ install_cmd = self.get_finalized_command('install')
+ svem = install_cmd.single_version_externally_managed
+
+ return self.distribution.namespace_packages if svem else []
+
+ @staticmethod
+ def _gen_exclusion_paths():
+ """
+ Generate file paths to be excluded for namespace packages (bytecode
+ cache files).
+ """
+ # always exclude the package module itself
+ yield '__init__.py'
+
+ yield '__init__.pyc'
+ yield '__init__.pyo'
+
if not hasattr(sys, 'implementation'):
- return
-
+ return
+
base = os.path.join(
'__pycache__', '__init__.' + sys.implementation.cache_tag)
- yield base + '.pyc'
- yield base + '.pyo'
- yield base + '.opt-1.pyc'
- yield base + '.opt-2.pyc'
-
- def copy_tree(
- self, infile, outfile,
- preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
- ):
- assert preserve_mode and preserve_times and not preserve_symlinks
- exclude = self.get_exclusions()
-
- if not exclude:
- return orig.install_lib.copy_tree(self, infile, outfile)
-
- # Exclude namespace package __init__.py* files from the output
-
- from setuptools.archive_util import unpack_directory
- from distutils import log
-
- outfiles = []
-
- def pf(src, dst):
- if dst in exclude:
- log.warn("Skipping installation of %s (namespace package)",
- dst)
- return False
-
- log.info("copying %s -> %s", src, os.path.dirname(dst))
- outfiles.append(dst)
- return dst
-
- unpack_directory(infile, outfile, pf)
- return outfiles
-
- def get_outputs(self):
- outputs = orig.install_lib.get_outputs(self)
- exclude = self.get_exclusions()
- if exclude:
- return [f for f in outputs if f not in exclude]
- return outputs
+ yield base + '.pyc'
+ yield base + '.pyo'
+ yield base + '.opt-1.pyc'
+ yield base + '.opt-2.pyc'
+
+ def copy_tree(
+ self, infile, outfile,
+ preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
+ ):
+ assert preserve_mode and preserve_times and not preserve_symlinks
+ exclude = self.get_exclusions()
+
+ if not exclude:
+ return orig.install_lib.copy_tree(self, infile, outfile)
+
+ # Exclude namespace package __init__.py* files from the output
+
+ from setuptools.archive_util import unpack_directory
+ from distutils import log
+
+ outfiles = []
+
+ def pf(src, dst):
+ if dst in exclude:
+ log.warn("Skipping installation of %s (namespace package)",
+ dst)
+ return False
+
+ log.info("copying %s -> %s", src, os.path.dirname(dst))
+ outfiles.append(dst)
+ return dst
+
+ unpack_directory(infile, outfile, pf)
+ return outfiles
+
+ def get_outputs(self):
+ outputs = orig.install_lib.get_outputs(self)
+ exclude = self.get_exclusions()
+ if exclude:
+ return [f for f in outputs if f not in exclude]
+ return outputs
diff --git a/contrib/python/setuptools/py3/setuptools/command/install_scripts.py b/contrib/python/setuptools/py3/setuptools/command/install_scripts.py
index 5f83b5e019..9cd8eb0627 100644
--- a/contrib/python/setuptools/py3/setuptools/command/install_scripts.py
+++ b/contrib/python/setuptools/py3/setuptools/command/install_scripts.py
@@ -1,69 +1,69 @@
-from distutils import log
-import distutils.command.install_scripts as orig
+from distutils import log
+import distutils.command.install_scripts as orig
from distutils.errors import DistutilsModuleError
-import os
+import os
import sys
-
-from pkg_resources import Distribution, PathMetadata, ensure_directory
-
-
-class install_scripts(orig.install_scripts):
- """Do normal script install, plus any egg_info wrapper scripts"""
-
- def initialize_options(self):
- orig.install_scripts.initialize_options(self)
- self.no_ep = False
-
- def run(self):
- import setuptools.command.easy_install as ei
-
- self.run_command("egg_info")
- if self.distribution.scripts:
- orig.install_scripts.run(self) # run first to set up self.outfiles
- else:
- self.outfiles = []
- if self.no_ep:
- # don't install entry point scripts into .egg file!
- return
-
- ei_cmd = self.get_finalized_command("egg_info")
- dist = Distribution(
- ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
- ei_cmd.egg_name, ei_cmd.egg_version,
- )
- bs_cmd = self.get_finalized_command('build_scripts')
- exec_param = getattr(bs_cmd, 'executable', None)
+
+from pkg_resources import Distribution, PathMetadata, ensure_directory
+
+
+class install_scripts(orig.install_scripts):
+ """Do normal script install, plus any egg_info wrapper scripts"""
+
+ def initialize_options(self):
+ orig.install_scripts.initialize_options(self)
+ self.no_ep = False
+
+ def run(self):
+ import setuptools.command.easy_install as ei
+
+ self.run_command("egg_info")
+ if self.distribution.scripts:
+ orig.install_scripts.run(self) # run first to set up self.outfiles
+ else:
+ self.outfiles = []
+ if self.no_ep:
+ # don't install entry point scripts into .egg file!
+ return
+
+ ei_cmd = self.get_finalized_command("egg_info")
+ dist = Distribution(
+ ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
+ ei_cmd.egg_name, ei_cmd.egg_version,
+ )
+ bs_cmd = self.get_finalized_command('build_scripts')
+ exec_param = getattr(bs_cmd, 'executable', None)
try:
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
except (ImportError, DistutilsModuleError):
is_wininst = False
- writer = ei.ScriptWriter
- if is_wininst:
- exec_param = "python.exe"
- writer = ei.WindowsScriptWriter
+ writer = ei.ScriptWriter
+ if is_wininst:
+ exec_param = "python.exe"
+ writer = ei.WindowsScriptWriter
if exec_param == sys.executable:
# In case the path to the Python executable contains a space, wrap
# it so it's not split up.
exec_param = [exec_param]
- # resolve the writer to the environment
- writer = writer.best()
- cmd = writer.command_spec_class.best().from_param(exec_param)
- for args in writer.get_args(dist, cmd.as_header()):
- self.write_script(*args)
-
- def write_script(self, script_name, contents, mode="t", *ignored):
- """Write an executable file to the scripts directory"""
- from setuptools.command.easy_install import chmod, current_umask
-
- log.info("Installing %s script to %s", script_name, self.install_dir)
- target = os.path.join(self.install_dir, script_name)
- self.outfiles.append(target)
-
- mask = current_umask()
- if not self.dry_run:
- ensure_directory(target)
- f = open(target, "w" + mode)
- f.write(contents)
- f.close()
- chmod(target, 0o777 - mask)
+ # resolve the writer to the environment
+ writer = writer.best()
+ cmd = writer.command_spec_class.best().from_param(exec_param)
+ for args in writer.get_args(dist, cmd.as_header()):
+ self.write_script(*args)
+
+ def write_script(self, script_name, contents, mode="t", *ignored):
+ """Write an executable file to the scripts directory"""
+ from setuptools.command.easy_install import chmod, current_umask
+
+ log.info("Installing %s script to %s", script_name, self.install_dir)
+ target = os.path.join(self.install_dir, script_name)
+ self.outfiles.append(target)
+
+ mask = current_umask()
+ if not self.dry_run:
+ ensure_directory(target)
+ f = open(target, "w" + mode)
+ f.write(contents)
+ f.close()
+ chmod(target, 0o777 - mask)
diff --git a/contrib/python/setuptools/py3/setuptools/command/register.py b/contrib/python/setuptools/py3/setuptools/command/register.py
index dbe5e619f7..b8266b9a60 100644
--- a/contrib/python/setuptools/py3/setuptools/command/register.py
+++ b/contrib/python/setuptools/py3/setuptools/command/register.py
@@ -1,13 +1,13 @@
from distutils import log
-import distutils.command.register as orig
-
+import distutils.command.register as orig
+
from setuptools.errors import RemovedCommandError
-
-class register(orig.register):
+
+class register(orig.register):
"""Formerly used to register packages on PyPI."""
-
- def run(self):
+
+ def run(self):
msg = (
"The register command has been removed, use twine to upload "
+ "instead (https://pypi.org/p/twine)"
diff --git a/contrib/python/setuptools/py3/setuptools/command/rotate.py b/contrib/python/setuptools/py3/setuptools/command/rotate.py
index dd51ae188c..74795ba922 100644
--- a/contrib/python/setuptools/py3/setuptools/command/rotate.py
+++ b/contrib/python/setuptools/py3/setuptools/command/rotate.py
@@ -1,63 +1,63 @@
-from distutils.util import convert_path
-from distutils import log
-from distutils.errors import DistutilsOptionError
-import os
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
import shutil
-
-from setuptools import Command
-
-
-class rotate(Command):
- """Delete older distributions"""
-
- description = "delete older distributions, keeping N newest files"
- user_options = [
- ('match=', 'm', "patterns to match (required)"),
- ('dist-dir=', 'd', "directory where the distributions are"),
- ('keep=', 'k', "number of matching distributions to keep"),
- ]
-
- boolean_options = []
-
- def initialize_options(self):
- self.match = None
- self.dist_dir = None
- self.keep = None
-
- def finalize_options(self):
- if self.match is None:
- raise DistutilsOptionError(
- "Must specify one or more (comma-separated) match patterns "
- "(e.g. '.zip' or '.egg')"
- )
- if self.keep is None:
- raise DistutilsOptionError("Must specify number of files to keep")
- try:
- self.keep = int(self.keep)
+
+from setuptools import Command
+
+
+class rotate(Command):
+ """Delete older distributions"""
+
+ description = "delete older distributions, keeping N newest files"
+ user_options = [
+ ('match=', 'm', "patterns to match (required)"),
+ ('dist-dir=', 'd', "directory where the distributions are"),
+ ('keep=', 'k', "number of matching distributions to keep"),
+ ]
+
+ boolean_options = []
+
+ def initialize_options(self):
+ self.match = None
+ self.dist_dir = None
+ self.keep = None
+
+ def finalize_options(self):
+ if self.match is None:
+ raise DistutilsOptionError(
+ "Must specify one or more (comma-separated) match patterns "
+ "(e.g. '.zip' or '.egg')"
+ )
+ if self.keep is None:
+ raise DistutilsOptionError("Must specify number of files to keep")
+ try:
+ self.keep = int(self.keep)
except ValueError as e:
raise DistutilsOptionError("--keep must be an integer") from e
if isinstance(self.match, str):
- self.match = [
- convert_path(p.strip()) for p in self.match.split(',')
- ]
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
-
- def run(self):
- self.run_command("egg_info")
- from glob import glob
-
- for pattern in self.match:
- pattern = self.distribution.get_name() + '*' + pattern
- files = glob(os.path.join(self.dist_dir, pattern))
- files = [(os.path.getmtime(f), f) for f in files]
- files.sort()
- files.reverse()
-
- log.info("%d file(s) matching %s", len(files), pattern)
- files = files[self.keep:]
- for (t, f) in files:
- log.info("Deleting %s", f)
- if not self.dry_run:
+ self.match = [
+ convert_path(p.strip()) for p in self.match.split(',')
+ ]
+ self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+ def run(self):
+ self.run_command("egg_info")
+ from glob import glob
+
+ for pattern in self.match:
+ pattern = self.distribution.get_name() + '*' + pattern
+ files = glob(os.path.join(self.dist_dir, pattern))
+ files = [(os.path.getmtime(f), f) for f in files]
+ files.sort()
+ files.reverse()
+
+ log.info("%d file(s) matching %s", len(files), pattern)
+ files = files[self.keep:]
+ for (t, f) in files:
+ log.info("Deleting %s", f)
+ if not self.dry_run:
if os.path.isdir(f):
shutil.rmtree(f)
else:
diff --git a/contrib/python/setuptools/py3/setuptools/command/saveopts.py b/contrib/python/setuptools/py3/setuptools/command/saveopts.py
index 3ce03df53c..611cec5528 100644
--- a/contrib/python/setuptools/py3/setuptools/command/saveopts.py
+++ b/contrib/python/setuptools/py3/setuptools/command/saveopts.py
@@ -1,22 +1,22 @@
-from setuptools.command.setopt import edit_config, option_base
-
-
-class saveopts(option_base):
- """Save command-line options to a file"""
-
- description = "save supplied options to setup.cfg or other config file"
-
- def run(self):
- dist = self.distribution
- settings = {}
-
- for cmd in dist.command_options:
-
- if cmd == 'saveopts':
- continue # don't save our own options!
-
- for opt, (src, val) in dist.get_option_dict(cmd).items():
- if src == "command line":
- settings.setdefault(cmd, {})[opt] = val
-
- edit_config(self.filename, settings, self.dry_run)
+from setuptools.command.setopt import edit_config, option_base
+
+
+class saveopts(option_base):
+ """Save command-line options to a file"""
+
+ description = "save supplied options to setup.cfg or other config file"
+
+ def run(self):
+ dist = self.distribution
+ settings = {}
+
+ for cmd in dist.command_options:
+
+ if cmd == 'saveopts':
+ continue # don't save our own options!
+
+ for opt, (src, val) in dist.get_option_dict(cmd).items():
+ if src == "command line":
+ settings.setdefault(cmd, {})[opt] = val
+
+ edit_config(self.filename, settings, self.dry_run)
diff --git a/contrib/python/setuptools/py3/setuptools/command/sdist.py b/contrib/python/setuptools/py3/setuptools/command/sdist.py
index 50201e6714..0285b690fc 100644
--- a/contrib/python/setuptools/py3/setuptools/command/sdist.py
+++ b/contrib/python/setuptools/py3/setuptools/command/sdist.py
@@ -1,66 +1,66 @@
-from distutils import log
-import distutils.command.sdist as orig
-import os
-import sys
-import io
+from distutils import log
+import distutils.command.sdist as orig
+import os
+import sys
+import io
import contextlib
-
+
from .py36compat import sdist_add_defaults
-
-import pkg_resources
-
-_default_revctrl = list
-
-
-def walk_revctrl(dirname=''):
- """Find all files under revision control"""
- for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
- for item in ep.load()(dirname):
- yield item
-
-
+
+import pkg_resources
+
+_default_revctrl = list
+
+
+def walk_revctrl(dirname=''):
+ """Find all files under revision control"""
+ for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
+ for item in ep.load()(dirname):
+ yield item
+
+
class sdist(sdist_add_defaults, orig.sdist):
- """Smart sdist that finds anything supported by revision control"""
-
- user_options = [
- ('formats=', None,
- "formats for source distribution (comma-separated list)"),
- ('keep-temp', 'k',
- "keep the distribution tree around after creating " +
- "archive file(s)"),
- ('dist-dir=', 'd',
- "directory to put the source distribution archive(s) in "
- "[default: dist]"),
+ """Smart sdist that finds anything supported by revision control"""
+
+ user_options = [
+ ('formats=', None,
+ "formats for source distribution (comma-separated list)"),
+ ('keep-temp', 'k',
+ "keep the distribution tree around after creating " +
+ "archive file(s)"),
+ ('dist-dir=', 'd',
+ "directory to put the source distribution archive(s) in "
+ "[default: dist]"),
('owner=', 'u',
"Owner name used when creating a tar file [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file [default: current group]"),
- ]
-
- negative_opt = {}
-
+ ]
+
+ negative_opt = {}
+
README_EXTENSIONS = ['', '.rst', '.txt', '.md']
READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS)
- def run(self):
- self.run_command('egg_info')
- ei_cmd = self.get_finalized_command('egg_info')
- self.filelist = ei_cmd.filelist
- self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
- self.check_readme()
-
- # Run sub commands
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- self.make_distribution()
-
- dist_files = getattr(self.distribution, 'dist_files', [])
- for file in self.archive_files:
- data = ('sdist', '', file)
- if data not in dist_files:
- dist_files.append(data)
-
+ def run(self):
+ self.run_command('egg_info')
+ ei_cmd = self.get_finalized_command('egg_info')
+ self.filelist = ei_cmd.filelist
+ self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
+ self.check_readme()
+
+ # Run sub commands
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ self.make_distribution()
+
+ dist_files = getattr(self.distribution, 'dist_files', [])
+ for file in self.archive_files:
+ data = ('sdist', '', file)
+ if data not in dist_files:
+ dist_files.append(data)
+
def initialize_options(self):
orig.sdist.initialize_options(self)
@@ -107,11 +107,11 @@ class sdist(sdist_add_defaults, orig.sdist):
def _add_defaults_python(self):
"""getting python files"""
- if self.distribution.has_pure_modules():
- build_py = self.get_finalized_command('build_py')
- self.filelist.extend(build_py.get_source_files())
+ if self.distribution.has_pure_modules():
+ build_py = self.get_finalized_command('build_py')
+ self.filelist.extend(build_py.get_source_files())
self._add_data_files(self._safe_data_files(build_py))
-
+
def _safe_data_files(self, build_py):
"""
Since the ``sdist`` class is also used to compute the MANIFEST
@@ -140,57 +140,57 @@ class sdist(sdist_add_defaults, orig.sdist):
super()._add_defaults_data_files()
except TypeError:
log.warn("data_files contains unexpected objects")
-
- def check_readme(self):
+
+ def check_readme(self):
for f in self.READMES:
- if os.path.exists(f):
- return
- else:
- self.warn(
- "standard file not found: should have one of " +
+ if os.path.exists(f):
+ return
+ else:
+ self.warn(
+ "standard file not found: should have one of " +
', '.join(self.READMES)
- )
-
- def make_release_tree(self, base_dir, files):
- orig.sdist.make_release_tree(self, base_dir, files)
-
- # Save any egg_info command line options used to create this sdist
- dest = os.path.join(base_dir, 'setup.cfg')
- if hasattr(os, 'link') and os.path.exists(dest):
- # unlink and re-copy, since it might be hard-linked, and
- # we don't want to change the source version
- os.unlink(dest)
- self.copy_file('setup.cfg', dest)
-
- self.get_finalized_command('egg_info').save_version_info(dest)
-
- def _manifest_is_not_generated(self):
- # check for special comment used in 2.7.1 and higher
- if not os.path.isfile(self.manifest):
- return False
-
- with io.open(self.manifest, 'rb') as fp:
- first_line = fp.readline()
- return (first_line !=
- '# file GENERATED by distutils, do NOT edit\n'.encode())
-
- def read_manifest(self):
- """Read the manifest file (named by 'self.manifest') and use it to
- fill in 'self.filelist', the list of files to include in the source
- distribution.
- """
- log.info("reading manifest file '%s'", self.manifest)
+ )
+
+ def make_release_tree(self, base_dir, files):
+ orig.sdist.make_release_tree(self, base_dir, files)
+
+ # Save any egg_info command line options used to create this sdist
+ dest = os.path.join(base_dir, 'setup.cfg')
+ if hasattr(os, 'link') and os.path.exists(dest):
+ # unlink and re-copy, since it might be hard-linked, and
+ # we don't want to change the source version
+ os.unlink(dest)
+ self.copy_file('setup.cfg', dest)
+
+ self.get_finalized_command('egg_info').save_version_info(dest)
+
+ def _manifest_is_not_generated(self):
+ # check for special comment used in 2.7.1 and higher
+ if not os.path.isfile(self.manifest):
+ return False
+
+ with io.open(self.manifest, 'rb') as fp:
+ first_line = fp.readline()
+ return (first_line !=
+ '# file GENERATED by distutils, do NOT edit\n'.encode())
+
+ def read_manifest(self):
+ """Read the manifest file (named by 'self.manifest') and use it to
+ fill in 'self.filelist', the list of files to include in the source
+ distribution.
+ """
+ log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rb')
- for line in manifest:
- # The manifest must contain UTF-8. See #303.
+ for line in manifest:
+ # The manifest must contain UTF-8. See #303.
try:
line = line.decode('UTF-8')
except UnicodeDecodeError:
log.warn("%r not UTF-8 decodable -- skipping" % line)
continue
- # ignore comments and blank lines
- line = line.strip()
- if line.startswith('#') or not line:
- continue
- self.filelist.append(line)
- manifest.close()
+ # ignore comments and blank lines
+ line = line.strip()
+ if line.startswith('#') or not line:
+ continue
+ self.filelist.append(line)
+ manifest.close()
diff --git a/contrib/python/setuptools/py3/setuptools/command/setopt.py b/contrib/python/setuptools/py3/setuptools/command/setopt.py
index 7750080f25..6358c0451b 100644
--- a/contrib/python/setuptools/py3/setuptools/command/setopt.py
+++ b/contrib/python/setuptools/py3/setuptools/command/setopt.py
@@ -1,149 +1,149 @@
-from distutils.util import convert_path
-from distutils import log
-from distutils.errors import DistutilsOptionError
-import distutils
-import os
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import distutils
+import os
import configparser
-
-from setuptools import Command
-
-__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
-
-
-def config_file(kind="local"):
- """Get the filename of the distutils, local, global, or per-user config
-
- `kind` must be one of "local", "global", or "user"
- """
- if kind == 'local':
- return 'setup.cfg'
- if kind == 'global':
- return os.path.join(
- os.path.dirname(distutils.__file__), 'distutils.cfg'
- )
- if kind == 'user':
- dot = os.name == 'posix' and '.' or ''
- return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
- raise ValueError(
- "config_file() type must be 'local', 'global', or 'user'", kind
- )
-
-
-def edit_config(filename, settings, dry_run=False):
- """Edit a configuration file to include `settings`
-
- `settings` is a dictionary of dictionaries or ``None`` values, keyed by
- command/section name. A ``None`` value means to delete the entire section,
- while a dictionary lists settings to be changed or deleted in that section.
- A setting of ``None`` means to delete that setting.
- """
- log.debug("Reading configuration from %s", filename)
- opts = configparser.RawConfigParser()
+
+from setuptools import Command
+
+__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
+
+
+def config_file(kind="local"):
+ """Get the filename of the distutils, local, global, or per-user config
+
+ `kind` must be one of "local", "global", or "user"
+ """
+ if kind == 'local':
+ return 'setup.cfg'
+ if kind == 'global':
+ return os.path.join(
+ os.path.dirname(distutils.__file__), 'distutils.cfg'
+ )
+ if kind == 'user':
+ dot = os.name == 'posix' and '.' or ''
+ return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
+ raise ValueError(
+ "config_file() type must be 'local', 'global', or 'user'", kind
+ )
+
+
+def edit_config(filename, settings, dry_run=False):
+ """Edit a configuration file to include `settings`
+
+ `settings` is a dictionary of dictionaries or ``None`` values, keyed by
+ command/section name. A ``None`` value means to delete the entire section,
+ while a dictionary lists settings to be changed or deleted in that section.
+ A setting of ``None`` means to delete that setting.
+ """
+ log.debug("Reading configuration from %s", filename)
+ opts = configparser.RawConfigParser()
opts.optionxform = lambda x: x
- opts.read([filename])
- for section, options in settings.items():
- if options is None:
- log.info("Deleting section [%s] from %s", section, filename)
- opts.remove_section(section)
- else:
- if not opts.has_section(section):
- log.debug("Adding new section [%s] to %s", section, filename)
- opts.add_section(section)
- for option, value in options.items():
- if value is None:
- log.debug(
- "Deleting %s.%s from %s",
- section, option, filename
- )
- opts.remove_option(section, option)
- if not opts.options(section):
- log.info("Deleting empty [%s] section from %s",
- section, filename)
- opts.remove_section(section)
- else:
- log.debug(
- "Setting %s.%s to %r in %s",
- section, option, value, filename
- )
- opts.set(section, option, value)
-
- log.info("Writing %s", filename)
- if not dry_run:
- with open(filename, 'w') as f:
- opts.write(f)
-
-
-class option_base(Command):
- """Abstract base class for commands that mess with config files"""
-
- user_options = [
- ('global-config', 'g',
- "save options to the site-wide distutils.cfg file"),
- ('user-config', 'u',
- "save options to the current user's pydistutils.cfg file"),
- ('filename=', 'f',
- "configuration file to use (default=setup.cfg)"),
- ]
-
- boolean_options = [
- 'global-config', 'user-config',
- ]
-
- def initialize_options(self):
- self.global_config = None
- self.user_config = None
- self.filename = None
-
- def finalize_options(self):
- filenames = []
- if self.global_config:
- filenames.append(config_file('global'))
- if self.user_config:
- filenames.append(config_file('user'))
- if self.filename is not None:
- filenames.append(self.filename)
- if not filenames:
- filenames.append(config_file('local'))
- if len(filenames) > 1:
- raise DistutilsOptionError(
- "Must specify only one configuration file option",
- filenames
- )
- self.filename, = filenames
-
-
-class setopt(option_base):
- """Save command-line options to a file"""
-
- description = "set an option in setup.cfg or another config file"
-
- user_options = [
- ('command=', 'c', 'command to set an option for'),
- ('option=', 'o', 'option to set'),
- ('set-value=', 's', 'value of the option'),
- ('remove', 'r', 'remove (unset) the value'),
- ] + option_base.user_options
-
- boolean_options = option_base.boolean_options + ['remove']
-
- def initialize_options(self):
- option_base.initialize_options(self)
- self.command = None
- self.option = None
- self.set_value = None
- self.remove = None
-
- def finalize_options(self):
- option_base.finalize_options(self)
- if self.command is None or self.option is None:
- raise DistutilsOptionError("Must specify --command *and* --option")
- if self.set_value is None and not self.remove:
- raise DistutilsOptionError("Must specify --set-value or --remove")
-
- def run(self):
- edit_config(
- self.filename, {
- self.command: {self.option.replace('-', '_'): self.set_value}
- },
- self.dry_run
- )
+ opts.read([filename])
+ for section, options in settings.items():
+ if options is None:
+ log.info("Deleting section [%s] from %s", section, filename)
+ opts.remove_section(section)
+ else:
+ if not opts.has_section(section):
+ log.debug("Adding new section [%s] to %s", section, filename)
+ opts.add_section(section)
+ for option, value in options.items():
+ if value is None:
+ log.debug(
+ "Deleting %s.%s from %s",
+ section, option, filename
+ )
+ opts.remove_option(section, option)
+ if not opts.options(section):
+ log.info("Deleting empty [%s] section from %s",
+ section, filename)
+ opts.remove_section(section)
+ else:
+ log.debug(
+ "Setting %s.%s to %r in %s",
+ section, option, value, filename
+ )
+ opts.set(section, option, value)
+
+ log.info("Writing %s", filename)
+ if not dry_run:
+ with open(filename, 'w') as f:
+ opts.write(f)
+
+
+class option_base(Command):
+ """Abstract base class for commands that mess with config files"""
+
+ user_options = [
+ ('global-config', 'g',
+ "save options to the site-wide distutils.cfg file"),
+ ('user-config', 'u',
+ "save options to the current user's pydistutils.cfg file"),
+ ('filename=', 'f',
+ "configuration file to use (default=setup.cfg)"),
+ ]
+
+ boolean_options = [
+ 'global-config', 'user-config',
+ ]
+
+ def initialize_options(self):
+ self.global_config = None
+ self.user_config = None
+ self.filename = None
+
+ def finalize_options(self):
+ filenames = []
+ if self.global_config:
+ filenames.append(config_file('global'))
+ if self.user_config:
+ filenames.append(config_file('user'))
+ if self.filename is not None:
+ filenames.append(self.filename)
+ if not filenames:
+ filenames.append(config_file('local'))
+ if len(filenames) > 1:
+ raise DistutilsOptionError(
+ "Must specify only one configuration file option",
+ filenames
+ )
+ self.filename, = filenames
+
+
+class setopt(option_base):
+ """Save command-line options to a file"""
+
+ description = "set an option in setup.cfg or another config file"
+
+ user_options = [
+ ('command=', 'c', 'command to set an option for'),
+ ('option=', 'o', 'option to set'),
+ ('set-value=', 's', 'value of the option'),
+ ('remove', 'r', 'remove (unset) the value'),
+ ] + option_base.user_options
+
+ boolean_options = option_base.boolean_options + ['remove']
+
+ def initialize_options(self):
+ option_base.initialize_options(self)
+ self.command = None
+ self.option = None
+ self.set_value = None
+ self.remove = None
+
+ def finalize_options(self):
+ option_base.finalize_options(self)
+ if self.command is None or self.option is None:
+ raise DistutilsOptionError("Must specify --command *and* --option")
+ if self.set_value is None and not self.remove:
+ raise DistutilsOptionError("Must specify --set-value or --remove")
+
+ def run(self):
+ edit_config(
+ self.filename, {
+ self.command: {self.option.replace('-', '_'): self.set_value}
+ },
+ self.dry_run
+ )
diff --git a/contrib/python/setuptools/py3/setuptools/command/test.py b/contrib/python/setuptools/py3/setuptools/command/test.py
index 52c6fcd22a..4a389e4d07 100644
--- a/contrib/python/setuptools/py3/setuptools/command/test.py
+++ b/contrib/python/setuptools/py3/setuptools/command/test.py
@@ -6,8 +6,8 @@ import itertools
import unittest
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils import log
-from unittest import TestLoader
-
+from unittest import TestLoader
+
from pkg_resources import (
resource_listdir,
resource_exists,
@@ -18,113 +18,113 @@ from pkg_resources import (
require,
EntryPoint,
)
-from setuptools import Command
+from setuptools import Command
from setuptools.extern.more_itertools import unique_everseen
-
-
-class ScanningLoader(TestLoader):
+
+
+class ScanningLoader(TestLoader):
def __init__(self):
TestLoader.__init__(self)
self._visited = set()
- def loadTestsFromModule(self, module, pattern=None):
- """Return a suite of all tests cases contained in the given module
-
- If the module is a package, load tests from all the modules in it.
- If the module has an ``additional_tests`` function, call it and add
- the return value to the tests.
- """
+ def loadTestsFromModule(self, module, pattern=None):
+ """Return a suite of all tests cases contained in the given module
+
+ If the module is a package, load tests from all the modules in it.
+ If the module has an ``additional_tests`` function, call it and add
+ the return value to the tests.
+ """
if module in self._visited:
return None
self._visited.add(module)
- tests = []
- tests.append(TestLoader.loadTestsFromModule(self, module))
-
- if hasattr(module, "additional_tests"):
- tests.append(module.additional_tests())
-
- if hasattr(module, '__path__'):
- for file in resource_listdir(module.__name__, ''):
- if file.endswith('.py') and file != '__init__.py':
- submodule = module.__name__ + '.' + file[:-3]
- else:
- if resource_exists(module.__name__, file + '/__init__.py'):
- submodule = module.__name__ + '.' + file
- else:
- continue
- tests.append(self.loadTestsFromName(submodule))
-
- if len(tests) != 1:
- return self.suiteClass(tests)
- else:
- return tests[0] # don't create a nested suite for only one return
-
-
-# adapted from jaraco.classes.properties:NonDataProperty
+ tests = []
+ tests.append(TestLoader.loadTestsFromModule(self, module))
+
+ if hasattr(module, "additional_tests"):
+ tests.append(module.additional_tests())
+
+ if hasattr(module, '__path__'):
+ for file in resource_listdir(module.__name__, ''):
+ if file.endswith('.py') and file != '__init__.py':
+ submodule = module.__name__ + '.' + file[:-3]
+ else:
+ if resource_exists(module.__name__, file + '/__init__.py'):
+ submodule = module.__name__ + '.' + file
+ else:
+ continue
+ tests.append(self.loadTestsFromName(submodule))
+
+ if len(tests) != 1:
+ return self.suiteClass(tests)
+ else:
+ return tests[0] # don't create a nested suite for only one return
+
+
+# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty:
- def __init__(self, fget):
- self.fget = fget
-
- def __get__(self, obj, objtype=None):
- if obj is None:
- return self
- return self.fget(obj)
-
-
-class test(Command):
- """Command to run unit tests after in-place build"""
-
+ def __init__(self, fget):
+ self.fget = fget
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+ return self.fget(obj)
+
+
+class test(Command):
+ """Command to run unit tests after in-place build"""
+
description = "run unit tests after in-place build (deprecated)"
-
- user_options = [
- ('test-module=', 'm', "Run 'test_suite' in specified module"),
+
+ user_options = [
+ ('test-module=', 'm', "Run 'test_suite' in specified module"),
(
'test-suite=',
's',
"Run single test, case or suite (e.g. 'module.test_suite')",
),
- ('test-runner=', 'r', "Test runner to use"),
- ]
-
- def initialize_options(self):
- self.test_suite = None
- self.test_module = None
- self.test_loader = None
- self.test_runner = None
-
- def finalize_options(self):
-
- if self.test_suite and self.test_module:
- msg = "You may specify a module or a suite, but not both"
- raise DistutilsOptionError(msg)
-
- if self.test_suite is None:
- if self.test_module is None:
- self.test_suite = self.distribution.test_suite
- else:
- self.test_suite = self.test_module + ".test_suite"
-
- if self.test_loader is None:
- self.test_loader = getattr(self.distribution, 'test_loader', None)
- if self.test_loader is None:
- self.test_loader = "setuptools.command.test:ScanningLoader"
- if self.test_runner is None:
- self.test_runner = getattr(self.distribution, 'test_runner', None)
-
- @NonDataProperty
- def test_args(self):
- return list(self._test_args())
-
- def _test_args(self):
+ ('test-runner=', 'r', "Test runner to use"),
+ ]
+
+ def initialize_options(self):
+ self.test_suite = None
+ self.test_module = None
+ self.test_loader = None
+ self.test_runner = None
+
+ def finalize_options(self):
+
+ if self.test_suite and self.test_module:
+ msg = "You may specify a module or a suite, but not both"
+ raise DistutilsOptionError(msg)
+
+ if self.test_suite is None:
+ if self.test_module is None:
+ self.test_suite = self.distribution.test_suite
+ else:
+ self.test_suite = self.test_module + ".test_suite"
+
+ if self.test_loader is None:
+ self.test_loader = getattr(self.distribution, 'test_loader', None)
+ if self.test_loader is None:
+ self.test_loader = "setuptools.command.test:ScanningLoader"
+ if self.test_runner is None:
+ self.test_runner = getattr(self.distribution, 'test_runner', None)
+
+ @NonDataProperty
+ def test_args(self):
+ return list(self._test_args())
+
+ def _test_args(self):
if not self.test_suite and sys.version_info >= (2, 7):
yield 'discover'
- if self.verbose:
- yield '--verbose'
- if self.test_suite:
- yield self.test_suite
-
- def with_project_on_sys_path(self, func):
+ if self.verbose:
+ yield '--verbose'
+ if self.test_suite:
+ yield self.test_suite
+
+ def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
@@ -134,30 +134,30 @@ class test(Command):
@contextlib.contextmanager
def project_on_sys_path(self, include_dists=[]):
self.run_command('egg_info')
-
+
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
-
- ei_cmd = self.get_finalized_command("egg_info")
-
- old_path = sys.path[:]
- old_modules = sys.modules.copy()
-
- try:
+
+ ei_cmd = self.get_finalized_command("egg_info")
+
+ old_path = sys.path[:]
+ old_modules = sys.modules.copy()
+
+ try:
project_path = normalize_path(ei_cmd.egg_base)
sys.path.insert(0, project_path)
- working_set.__init__()
- add_activation_listener(lambda dist: dist.activate())
- require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
+ working_set.__init__()
+ add_activation_listener(lambda dist: dist.activate())
+ require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
with self.paths_on_pythonpath([project_path]):
yield
- finally:
- sys.path[:] = old_path
- sys.modules.clear()
- sys.modules.update(old_modules)
- working_set.__init__()
-
+ finally:
+ sys.path[:] = old_path
+ sys.modules.clear()
+ sys.modules.update(old_modules)
+ working_set.__init__()
+
@staticmethod
@contextlib.contextmanager
def paths_on_pythonpath(paths):
@@ -199,7 +199,7 @@ class test(Command):
)
return itertools.chain(ir_d, tr_d, er_d)
- def run(self):
+ def run(self):
self.announce(
"WARNING: Testing via this command is deprecated and will be "
"removed in a future version. Users looking for a generic test "
@@ -209,12 +209,12 @@ class test(Command):
)
installed_dists = self.install_dists(self.distribution)
-
- cmd = ' '.join(self._argv)
- if self.dry_run:
- self.announce('skipping "%s" (dry run)' % cmd)
+
+ cmd = ' '.join(self._argv)
+ if self.dry_run:
+ self.announce('skipping "%s" (dry run)' % cmd)
return
-
+
self.announce('running "%s"' % cmd)
paths = map(operator.attrgetter('location'), installed_dists)
@@ -222,31 +222,31 @@ class test(Command):
with self.project_on_sys_path():
self.run_tests()
- def run_tests(self):
+ def run_tests(self):
test = unittest.main(
None,
None,
self._argv,
- testLoader=self._resolve_as_ep(self.test_loader),
- testRunner=self._resolve_as_ep(self.test_runner),
+ testLoader=self._resolve_as_ep(self.test_loader),
+ testRunner=self._resolve_as_ep(self.test_runner),
exit=False,
- )
+ )
if not test.result.wasSuccessful():
msg = 'Test failed: %s' % test.result
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
-
- @property
- def _argv(self):
- return ['unittest'] + self.test_args
-
- @staticmethod
- def _resolve_as_ep(val):
- """
- Load the indicated attribute value, called, as a as if it were
- specified as an entry point.
- """
- if val is None:
- return
- parsed = EntryPoint.parse("x=" + val)
- return parsed.resolve()()
+
+ @property
+ def _argv(self):
+ return ['unittest'] + self.test_args
+
+ @staticmethod
+ def _resolve_as_ep(val):
+ """
+ Load the indicated attribute value, called, as a as if it were
+ specified as an entry point.
+ """
+ if val is None:
+ return
+ parsed = EntryPoint.parse("x=" + val)
+ return parsed.resolve()()
diff --git a/contrib/python/setuptools/py3/setuptools/command/upload_docs.py b/contrib/python/setuptools/py3/setuptools/command/upload_docs.py
index 7dbf039316..845bff4421 100644
--- a/contrib/python/setuptools/py3/setuptools/command/upload_docs.py
+++ b/contrib/python/setuptools/py3/setuptools/command/upload_docs.py
@@ -1,105 +1,105 @@
-# -*- coding: utf-8 -*-
-"""upload_docs
-
-Implements a Distutils 'upload_docs' subcommand (upload documentation to
+# -*- coding: utf-8 -*-
+"""upload_docs
+
+Implements a Distutils 'upload_docs' subcommand (upload documentation to
sites other than PyPi such as devpi).
-"""
-
-from base64 import standard_b64encode
-from distutils import log
-from distutils.errors import DistutilsOptionError
-import os
-import socket
-import zipfile
-import tempfile
-import shutil
+"""
+
+from base64 import standard_b64encode
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
+import socket
+import zipfile
+import tempfile
+import shutil
import itertools
import functools
import http.client
import urllib.parse
-
-from pkg_resources import iter_entry_points
+
+from pkg_resources import iter_entry_points
from .upload import upload
-
-
+
+
def _encode(s):
return s.encode('utf-8', 'surrogateescape')
-
-
+
+
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
-
+
description = 'Upload documentation to sites other than PyPi such as devpi'
-
- user_options = [
- ('repository=', 'r',
- "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
- ('show-response', None,
- 'display full response text from server'),
- ('upload-dir=', None, 'directory to upload'),
- ]
- boolean_options = upload.boolean_options
-
- def has_sphinx(self):
- if self.upload_dir is None:
- for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
- return True
-
- sub_commands = [('build_sphinx', has_sphinx)]
-
- def initialize_options(self):
- upload.initialize_options(self)
- self.upload_dir = None
- self.target_dir = None
-
- def finalize_options(self):
- upload.finalize_options(self)
- if self.upload_dir is None:
- if self.has_sphinx():
- build_sphinx = self.get_finalized_command('build_sphinx')
+
+ user_options = [
+ ('repository=', 'r',
+ "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
+ ('show-response', None,
+ 'display full response text from server'),
+ ('upload-dir=', None, 'directory to upload'),
+ ]
+ boolean_options = upload.boolean_options
+
+ def has_sphinx(self):
+ if self.upload_dir is None:
+ for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
+ return True
+
+ sub_commands = [('build_sphinx', has_sphinx)]
+
+ def initialize_options(self):
+ upload.initialize_options(self)
+ self.upload_dir = None
+ self.target_dir = None
+
+ def finalize_options(self):
+ upload.finalize_options(self)
+ if self.upload_dir is None:
+ if self.has_sphinx():
+ build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = dict(build_sphinx.builder_target_dirs)['html']
- else:
- build = self.get_finalized_command('build')
- self.target_dir = os.path.join(build.build_base, 'docs')
- else:
- self.ensure_dirname('upload_dir')
- self.target_dir = self.upload_dir
+ else:
+ build = self.get_finalized_command('build')
+ self.target_dir = os.path.join(build.build_base, 'docs')
+ else:
+ self.ensure_dirname('upload_dir')
+ self.target_dir = self.upload_dir
if 'pypi.python.org' in self.repository:
log.warn("Upload_docs command is deprecated for PyPi. Use RTD instead.")
- self.announce('Using upload directory %s' % self.target_dir)
-
- def create_zipfile(self, filename):
- zip_file = zipfile.ZipFile(filename, "w")
- try:
- self.mkpath(self.target_dir) # just in case
- for root, dirs, files in os.walk(self.target_dir):
- if root == self.target_dir and not files:
+ self.announce('Using upload directory %s' % self.target_dir)
+
+ def create_zipfile(self, filename):
+ zip_file = zipfile.ZipFile(filename, "w")
+ try:
+ self.mkpath(self.target_dir) # just in case
+ for root, dirs, files in os.walk(self.target_dir):
+ if root == self.target_dir and not files:
tmpl = "no files found in upload directory '%s'"
raise DistutilsOptionError(tmpl % self.target_dir)
- for name in files:
- full = os.path.join(root, name)
- relative = root[len(self.target_dir):].lstrip(os.path.sep)
- dest = os.path.join(relative, name)
- zip_file.write(full, dest)
- finally:
- zip_file.close()
-
- def run(self):
- # Run sub commands
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- tmp_dir = tempfile.mkdtemp()
- name = self.distribution.metadata.get_name()
- zip_file = os.path.join(tmp_dir, "%s.zip" % name)
- try:
- self.create_zipfile(zip_file)
- self.upload_file(zip_file)
- finally:
- shutil.rmtree(tmp_dir)
-
+ for name in files:
+ full = os.path.join(root, name)
+ relative = root[len(self.target_dir):].lstrip(os.path.sep)
+ dest = os.path.join(relative, name)
+ zip_file.write(full, dest)
+ finally:
+ zip_file.close()
+
+ def run(self):
+ # Run sub commands
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ tmp_dir = tempfile.mkdtemp()
+ name = self.distribution.metadata.get_name()
+ zip_file = os.path.join(tmp_dir, "%s.zip" % name)
+ try:
+ self.create_zipfile(zip_file)
+ self.upload_file(zip_file)
+ finally:
+ shutil.rmtree(tmp_dir)
+
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
@@ -139,64 +139,64 @@ class upload_docs(upload):
content_type = 'multipart/form-data; boundary=%s' % boundary
return b''.join(body_items), content_type
- def upload_file(self, filename):
+ def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
- meta = self.distribution.metadata
- data = {
- ':action': 'doc_upload',
- 'name': meta.get_name(),
- 'content': (os.path.basename(filename), content),
- }
- # set up the authentication
+ meta = self.distribution.metadata
+ data = {
+ ':action': 'doc_upload',
+ 'name': meta.get_name(),
+ 'content': (os.path.basename(filename), content),
+ }
+ # set up the authentication
credentials = _encode(self.username + ':' + self.password)
credentials = standard_b64encode(credentials).decode('ascii')
- auth = "Basic " + credentials
-
+ auth = "Basic " + credentials
+
body, ct = self._build_multipart(data)
-
+
msg = "Submitting documentation to %s" % (self.repository)
self.announce(msg, log.INFO)
-
- # build the Request
- # We can't use urllib2 since we need to send the Basic
- # auth right with the first request
- schema, netloc, url, params, query, fragments = \
- urllib.parse.urlparse(self.repository)
- assert not params and not query and not fragments
- if schema == 'http':
+
+ # build the Request
+ # We can't use urllib2 since we need to send the Basic
+ # auth right with the first request
+ schema, netloc, url, params, query, fragments = \
+ urllib.parse.urlparse(self.repository)
+ assert not params and not query and not fragments
+ if schema == 'http':
conn = http.client.HTTPConnection(netloc)
- elif schema == 'https':
+ elif schema == 'https':
conn = http.client.HTTPSConnection(netloc)
- else:
- raise AssertionError("unsupported schema " + schema)
-
- data = ''
- try:
- conn.connect()
- conn.putrequest("POST", url)
+ else:
+ raise AssertionError("unsupported schema " + schema)
+
+ data = ''
+ try:
+ conn.connect()
+ conn.putrequest("POST", url)
content_type = ct
- conn.putheader('Content-type', content_type)
- conn.putheader('Content-length', str(len(body)))
- conn.putheader('Authorization', auth)
- conn.endheaders()
- conn.send(body)
- except socket.error as e:
- self.announce(str(e), log.ERROR)
- return
-
- r = conn.getresponse()
- if r.status == 200:
+ conn.putheader('Content-type', content_type)
+ conn.putheader('Content-length', str(len(body)))
+ conn.putheader('Authorization', auth)
+ conn.endheaders()
+ conn.send(body)
+ except socket.error as e:
+ self.announce(str(e), log.ERROR)
+ return
+
+ r = conn.getresponse()
+ if r.status == 200:
msg = 'Server response (%s): %s' % (r.status, r.reason)
self.announce(msg, log.INFO)
- elif r.status == 301:
- location = r.getheader('Location')
- if location is None:
- location = 'https://pythonhosted.org/%s/' % meta.get_name()
+ elif r.status == 301:
+ location = r.getheader('Location')
+ if location is None:
+ location = 'https://pythonhosted.org/%s/' % meta.get_name()
msg = 'Upload successful. Visit %s' % location
self.announce(msg, log.INFO)
- else:
+ else:
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
self.announce(msg, log.ERROR)
- if self.show_response:
- print('-' * 75, r.read(), '-' * 75)
+ if self.show_response:
+ print('-' * 75, r.read(), '-' * 75)
diff --git a/contrib/python/setuptools/py3/setuptools/depends.py b/contrib/python/setuptools/py3/setuptools/depends.py
index 3108f7ca42..adffd12db8 100644
--- a/contrib/python/setuptools/py3/setuptools/depends.py
+++ b/contrib/python/setuptools/py3/setuptools/depends.py
@@ -1,87 +1,87 @@
-import sys
-import marshal
+import sys
+import marshal
import contextlib
import dis
-
+
from setuptools.extern.packaging import version
from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
from . import _imp
-
-__all__ = [
- 'Require', 'find_module', 'get_module_constant', 'extract_constant'
-]
-
-class Require:
- """A prerequisite to building or installing a distribution"""
-
+__all__ = [
+ 'Require', 'find_module', 'get_module_constant', 'extract_constant'
+]
+
+
+class Require:
+ """A prerequisite to building or installing a distribution"""
+
def __init__(
self, name, requested_version, module, homepage='',
- attribute=None, format=None):
-
- if format is None and requested_version is not None:
+ attribute=None, format=None):
+
+ if format is None and requested_version is not None:
format = version.Version
-
- if format is not None:
- requested_version = format(requested_version)
- if attribute is None:
- attribute = '__version__'
-
- self.__dict__.update(locals())
- del self.self
-
- def full_name(self):
- """Return full package/distribution name, w/version"""
- if self.requested_version is not None:
+
+ if format is not None:
+ requested_version = format(requested_version)
+ if attribute is None:
+ attribute = '__version__'
+
+ self.__dict__.update(locals())
+ del self.self
+
+ def full_name(self):
+ """Return full package/distribution name, w/version"""
+ if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
- return self.name
-
- def version_ok(self, version):
- """Is 'version' sufficiently up-to-date?"""
- return self.attribute is None or self.format is None or \
+ return self.name
+
+ def version_ok(self, version):
+ """Is 'version' sufficiently up-to-date?"""
+ return self.attribute is None or self.format is None or \
str(version) != "unknown" and self.format(version) >= self.requested_version
-
- def get_version(self, paths=None, default="unknown"):
- """Get version number of installed module, 'None', or 'default'
-
- Search 'paths' for module. If not found, return 'None'. If found,
- return the extracted version attribute, or 'default' if no version
- attribute was specified, or the value cannot be determined without
- importing the module. The version is formatted according to the
- requirement's version format (if any), unless it is 'None' or the
- supplied 'default'.
- """
-
- if self.attribute is None:
- try:
+
+ def get_version(self, paths=None, default="unknown"):
+ """Get version number of installed module, 'None', or 'default'
+
+ Search 'paths' for module. If not found, return 'None'. If found,
+ return the extracted version attribute, or 'default' if no version
+ attribute was specified, or the value cannot be determined without
+ importing the module. The version is formatted according to the
+ requirement's version format (if any), unless it is 'None' or the
+ supplied 'default'.
+ """
+
+ if self.attribute is None:
+ try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
- return default
- except ImportError:
- return None
-
- v = get_module_constant(self.module, self.attribute, default, paths)
-
- if v is not None and v is not default and self.format is not None:
- return self.format(v)
-
- return v
-
- def is_present(self, paths=None):
- """Return true if dependency is present on 'paths'"""
- return self.get_version(paths) is not None
-
- def is_current(self, paths=None):
- """Return true if dependency is present and up-to-date on 'paths'"""
- version = self.get_version(paths)
- if version is None:
- return False
+ return default
+ except ImportError:
+ return None
+
+ v = get_module_constant(self.module, self.attribute, default, paths)
+
+ if v is not None and v is not default and self.format is not None:
+ return self.format(v)
+
+ return v
+
+ def is_present(self, paths=None):
+ """Return true if dependency is present on 'paths'"""
+ return self.get_version(paths) is not None
+
+ def is_current(self, paths=None):
+ """Return true if dependency is present and up-to-date on 'paths'"""
+ version = self.get_version(paths)
+ if version is None:
+ return False
return self.version_ok(str(version))
-
-
+
+
def maybe_close(f):
@contextlib.contextmanager
def empty():
@@ -89,88 +89,88 @@ def maybe_close(f):
return
if not f:
return empty()
-
+
return contextlib.closing(f)
-
-
-def get_module_constant(module, symbol, default=-1, paths=None):
- """Find 'module' by searching 'paths', and extract 'symbol'
-
- Return 'None' if 'module' does not exist on 'paths', or it does not define
- 'symbol'. If the module defines 'symbol' as a constant, return the
- constant. Otherwise, return 'default'."""
-
- try:
+
+
+def get_module_constant(module, symbol, default=-1, paths=None):
+ """Find 'module' by searching 'paths', and extract 'symbol'
+
+ Return 'None' if 'module' does not exist on 'paths', or it does not define
+ 'symbol'. If the module defines 'symbol' as a constant, return the
+ constant. Otherwise, return 'default'."""
+
+ try:
f, path, (suffix, mode, kind) = info = find_module(module, paths)
- except ImportError:
- # Module doesn't exist
- return None
-
+ except ImportError:
+ # Module doesn't exist
+ return None
+
with maybe_close(f):
if kind == PY_COMPILED:
f.read(8) # skip magic & date
- code = marshal.load(f)
+ code = marshal.load(f)
elif kind == PY_FROZEN:
code = _imp.get_frozen_object(module, paths)
elif kind == PY_SOURCE:
- code = compile(f.read(), path, 'exec')
- else:
- # Not something we can parse; we'll have to import it. :(
+ code = compile(f.read(), path, 'exec')
+ else:
+ # Not something we can parse; we'll have to import it. :(
imported = _imp.get_module(module, paths, info)
return getattr(imported, symbol, None)
-
- return extract_constant(code, symbol, default)
-
-
-def extract_constant(code, symbol, default=-1):
- """Extract the constant value of 'symbol' from 'code'
-
- If the name 'symbol' is bound to a constant value by the Python code
- object 'code', return that value. If 'symbol' is bound to an expression,
- return 'default'. Otherwise, return 'None'.
-
- Return value is based on the first assignment to 'symbol'. 'symbol' must
- be a global, or at least a non-"fast" local in the code block. That is,
- only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
- must be present in 'code.co_names'.
- """
- if symbol not in code.co_names:
+
+ return extract_constant(code, symbol, default)
+
+
+def extract_constant(code, symbol, default=-1):
+ """Extract the constant value of 'symbol' from 'code'
+
+ If the name 'symbol' is bound to a constant value by the Python code
+ object 'code', return that value. If 'symbol' is bound to an expression,
+ return 'default'. Otherwise, return 'None'.
+
+ Return value is based on the first assignment to 'symbol'. 'symbol' must
+ be a global, or at least a non-"fast" local in the code block. That is,
+ only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
+ must be present in 'code.co_names'.
+ """
+ if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
- return None
-
- name_idx = list(code.co_names).index(symbol)
-
- STORE_NAME = 90
- STORE_GLOBAL = 97
- LOAD_CONST = 100
-
- const = default
-
+ return None
+
+ name_idx = list(code.co_names).index(symbol)
+
+ STORE_NAME = 90
+ STORE_GLOBAL = 97
+ LOAD_CONST = 100
+
+ const = default
+
for byte_code in dis.Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
-
+
if op == LOAD_CONST:
- const = code.co_consts[arg]
+ const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
- return const
- else:
- const = default
-
-
-def _update_globals():
- """
- Patch the globals to remove the objects not available on some platforms.
-
- XXX it'd be better to test assertions about bytecode instead.
- """
-
- if not sys.platform.startswith('java') and sys.platform != 'cli':
- return
- incompatible = 'extract_constant', 'get_module_constant'
- for name in incompatible:
- del globals()[name]
- __all__.remove(name)
-
-
-_update_globals()
+ return const
+ else:
+ const = default
+
+
+def _update_globals():
+ """
+ Patch the globals to remove the objects not available on some platforms.
+
+ XXX it'd be better to test assertions about bytecode instead.
+ """
+
+ if not sys.platform.startswith('java') and sys.platform != 'cli':
+ return
+ incompatible = 'extract_constant', 'get_module_constant'
+ for name in incompatible:
+ del globals()[name]
+ __all__.remove(name)
+
+
+_update_globals()
diff --git a/contrib/python/setuptools/py3/setuptools/dist.py b/contrib/python/setuptools/py3/setuptools/dist.py
index cf4e8465f6..37a10d1dcd 100644
--- a/contrib/python/setuptools/py3/setuptools/dist.py
+++ b/contrib/python/setuptools/py3/setuptools/dist.py
@@ -1,16 +1,16 @@
# -*- coding: utf-8 -*-
-__all__ = ['Distribution']
-
+__all__ = ['Distribution']
+
import io
import sys
-import re
-import os
-import warnings
-import numbers
-import distutils.log
-import distutils.core
-import distutils.cmd
-import distutils.dist
+import re
+import os
+import warnings
+import numbers
+import distutils.log
+import distutils.core
+import distutils.cmd
+import distutils.dist
import distutils.command
from distutils.util import strtobool
from distutils.debug import DEBUG
@@ -25,32 +25,32 @@ from email import message_from_file
from distutils.errors import DistutilsOptionError, DistutilsSetupError
from distutils.util import rfc822_escape
-
+
from setuptools.extern import packaging
from setuptools.extern import ordered_set
from setuptools.extern.more_itertools import unique_everseen
-
+
from . import SetuptoolsDeprecationWarning
import setuptools
import setuptools.command
-from setuptools import windows_support
+from setuptools import windows_support
from setuptools.monkey import get_unpatched
from setuptools.config import parse_configuration
-import pkg_resources
+import pkg_resources
from setuptools.extern.packaging import version
-
+
if TYPE_CHECKING:
from email.message import Message
__import__('setuptools.extern.packaging.specifiers')
__import__('setuptools.extern.packaging.version')
-
-def _get_unpatched(cls):
+
+def _get_unpatched(cls):
warnings.warn("Do not call this function", DistDeprecationWarning)
return get_unpatched(cls)
-
+
def get_metadata_version(self):
mv = getattr(self, 'metadata_version', None)
@@ -163,7 +163,7 @@ def single_line(val):
def write_pkg_file(self, file): # noqa: C901 # is too complex (14) # FIXME
"""Write the PKG-INFO format data to a file object."""
version = self.get_metadata_version()
-
+
def write_field(key, value):
file.write("%s: %s\n" % (key, value))
@@ -191,7 +191,7 @@ def write_pkg_file(self, file): # noqa: C901 # is too complex (14) # FIXME
write_field('Download-URL', self.download_url)
for project_url in self.project_urls.items():
write_field('Project-URL', '%s, %s' % project_url)
-
+
keywords = ','.join(self.get_keywords())
if keywords:
write_field('Keywords', keywords)
@@ -218,47 +218,47 @@ def write_pkg_file(self, file): # noqa: C901 # is too complex (14) # FIXME
write_field('Provides-Extra', extra)
self._write_list(file, 'License-File', self.license_files or [])
-
+
file.write("\n%s\n\n" % self.get_long_description())
-sequence = tuple, list
-
+sequence = tuple, list
-def check_importable(dist, attr, value):
- try:
+
+def check_importable(dist, attr, value):
+ try:
ep = pkg_resources.EntryPoint.parse('x=' + value)
- assert not ep.extras
+ assert not ep.extras
except (TypeError, ValueError, AttributeError, AssertionError) as e:
- raise DistutilsSetupError(
+ raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)" % (attr, value)
) from e
-
-
-def assert_string_list(dist, attr, value):
+
+
+def assert_string_list(dist, attr, value):
"""Verify that value is a string list"""
- try:
+ try:
# verify that value is a list or tuple to exclude unordered
# or single-use iterables
assert isinstance(value, (list, tuple))
# verify that elements of value are strings
assert ''.join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError) as e:
- raise DistutilsSetupError(
+ raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
) from e
-def check_nsp(dist, attr, value):
- """Verify that namespace packages are valid"""
+def check_nsp(dist, attr, value):
+ """Verify that namespace packages are valid"""
ns_packages = value
assert_string_list(dist, attr, ns_packages)
for nsp in ns_packages:
- if not dist.has_contents_for(nsp):
- raise DistutilsSetupError(
+ if not dist.has_contents_for(nsp):
+ raise DistutilsSetupError(
"Distribution contains no modules or packages for "
+ "namespace package %r" % nsp
- )
+ )
parent, sep, child = nsp.rpartition('.')
if parent and parent not in ns_packages:
distutils.log.warn(
@@ -267,19 +267,19 @@ def check_nsp(dist, attr, value):
nsp,
parent,
)
-
-def check_extras(dist, attr, value):
- """Verify that extras_require mapping is valid"""
- try:
+
+def check_extras(dist, attr, value):
+ """Verify that extras_require mapping is valid"""
+ try:
list(itertools.starmap(_check_extra, value.items()))
except (TypeError, ValueError, AttributeError) as e:
- raise DistutilsSetupError(
- "'extras_require' must be a dictionary whose values are "
- "strings or lists of strings containing valid project/version "
- "requirement specifiers."
+ raise DistutilsSetupError(
+ "'extras_require' must be a dictionary whose values are "
+ "strings or lists of strings containing valid project/version "
+ "requirement specifiers."
) from e
-
+
def _check_extra(extra, reqs):
name, sep, marker = extra.partition(':')
@@ -288,13 +288,13 @@ def _check_extra(extra, reqs):
list(pkg_resources.parse_requirements(reqs))
-def assert_bool(dist, attr, value):
- """Verify that value is True, False, 0, or 1"""
- if bool(value) != value:
- tmpl = "{attr!r} must be a boolean value (got {value!r})"
- raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
-
-
+def assert_bool(dist, attr, value):
+ """Verify that value is True, False, 0, or 1"""
+ if bool(value) != value:
+ tmpl = "{attr!r} must be a boolean value (got {value!r})"
+ raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
+
+
def invalid_unless_false(dist, attr, value):
if not value:
warnings.warn(f"{attr} is ignored.", DistDeprecationWarning)
@@ -302,19 +302,19 @@ def invalid_unless_false(dist, attr, value):
raise DistutilsSetupError(f"{attr} is invalid.")
-def check_requirements(dist, attr, value):
- """Verify that install_requires is a valid requirements list"""
- try:
- list(pkg_resources.parse_requirements(value))
+def check_requirements(dist, attr, value):
+ """Verify that install_requires is a valid requirements list"""
+ try:
+ list(pkg_resources.parse_requirements(value))
if isinstance(value, (dict, set)):
raise TypeError("Unordered types are not allowed")
- except (TypeError, ValueError) as error:
- tmpl = (
- "{attr!r} must be a string or list of strings "
- "containing valid project/version requirement specifiers; {error}"
- )
+ except (TypeError, ValueError) as error:
+ tmpl = (
+ "{attr!r} must be a string or list of strings "
+ "containing valid project/version requirement specifiers; {error}"
+ )
raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) from error
-
+
def check_specifier(dist, attr, value):
"""Verify that value is a valid version specifier"""
@@ -327,21 +327,21 @@ def check_specifier(dist, attr, value):
raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) from error
-def check_entry_points(dist, attr, value):
- """Verify that entry_points map is parseable"""
- try:
- pkg_resources.EntryPoint.parse_map(value)
- except ValueError as e:
+def check_entry_points(dist, attr, value):
+ """Verify that entry_points map is parseable"""
+ try:
+ pkg_resources.EntryPoint.parse_map(value)
+ except ValueError as e:
raise DistutilsSetupError(e) from e
-
-def check_test_suite(dist, attr, value):
+
+def check_test_suite(dist, attr, value):
if not isinstance(value, str):
- raise DistutilsSetupError("test_suite must be a string")
-
+ raise DistutilsSetupError("test_suite must be a string")
-def check_package_data(dist, attr, value):
- """Verify that value is a dictionary of package names to glob lists"""
+
+def check_package_data(dist, attr, value):
+ """Verify that value is a dictionary of package names to glob lists"""
if not isinstance(value, dict):
raise DistutilsSetupError(
"{!r} must be a dictionary mapping package names to lists of "
@@ -353,73 +353,73 @@ def check_package_data(dist, attr, value):
"keys of {!r} dict must be strings (got {!r})".format(attr, k)
)
assert_string_list(dist, 'values of {!r} dict'.format(attr), v)
-
-def check_packages(dist, attr, value):
- for pkgname in value:
- if not re.match(r'\w+(\.\w+)*', pkgname):
- distutils.log.warn(
- "WARNING: %r not a valid package name; please use only "
+
+def check_packages(dist, attr, value):
+ for pkgname in value:
+ if not re.match(r'\w+(\.\w+)*', pkgname):
+ distutils.log.warn(
+ "WARNING: %r not a valid package name; please use only "
".-separated package names in setup.py",
pkgname,
- )
-
-
+ )
+
+
_Distribution = get_unpatched(distutils.core.Distribution)
class Distribution(_Distribution):
"""Distribution with support for tests and package data
-
- This is an enhanced version of 'distutils.dist.Distribution' that
- effectively adds the following new optional keyword arguments to 'setup()':
-
- 'install_requires' -- a string or sequence of strings specifying project
- versions that the distribution requires when installed, in the format
- used by 'pkg_resources.require()'. They will be installed
- automatically when the package is installed. If you wish to use
- packages that are not available in PyPI, or want to give your users an
- alternate download location, you can add a 'find_links' option to the
- '[easy_install]' section of your project's 'setup.cfg' file, and then
- setuptools will scan the listed web pages for links that satisfy the
- requirements.
-
- 'extras_require' -- a dictionary mapping names of optional "extras" to the
- additional requirement(s) that using those extras incurs. For example,
- this::
-
- extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
-
- indicates that the distribution can optionally provide an extra
- capability called "reST", but it can only be used if docutils and
- reSTedit are installed. If the user installs your package using
- EasyInstall and requests one of your extras, the corresponding
- additional requirements will be installed if needed.
-
- 'test_suite' -- the name of a test suite to run for the 'test' command.
- If the user runs 'python setup.py test', the package will be installed,
- and the named test suite will be run. The format is the same as
- would be used on a 'unittest.py' command line. That is, it is the
- dotted name of an object to import and call to generate a test suite.
-
- 'package_data' -- a dictionary mapping package names to lists of filenames
- or globs to use to find data files contained in the named packages.
- If the dictionary has filenames or globs listed under '""' (the empty
- string), those names will be searched for in every package, in addition
- to any names for the specific package. Data files found using these
- names/globs will be installed along with the package, in the same
- location as the package. Note that globs are allowed to reference
- the contents of non-package subdirectories, as long as you use '/' as
- a path separator. (Globs are automatically converted to
- platform-specific paths at runtime.)
-
- In addition to these new keywords, this class also has several new methods
- for manipulating the distribution's contents. For example, the 'include()'
- and 'exclude()' methods can be thought of as in-place add and subtract
- commands that add or remove packages, modules, extensions, and so on from
+
+ This is an enhanced version of 'distutils.dist.Distribution' that
+ effectively adds the following new optional keyword arguments to 'setup()':
+
+ 'install_requires' -- a string or sequence of strings specifying project
+ versions that the distribution requires when installed, in the format
+ used by 'pkg_resources.require()'. They will be installed
+ automatically when the package is installed. If you wish to use
+ packages that are not available in PyPI, or want to give your users an
+ alternate download location, you can add a 'find_links' option to the
+ '[easy_install]' section of your project's 'setup.cfg' file, and then
+ setuptools will scan the listed web pages for links that satisfy the
+ requirements.
+
+ 'extras_require' -- a dictionary mapping names of optional "extras" to the
+ additional requirement(s) that using those extras incurs. For example,
+ this::
+
+ extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
+
+ indicates that the distribution can optionally provide an extra
+ capability called "reST", but it can only be used if docutils and
+ reSTedit are installed. If the user installs your package using
+ EasyInstall and requests one of your extras, the corresponding
+ additional requirements will be installed if needed.
+
+ 'test_suite' -- the name of a test suite to run for the 'test' command.
+ If the user runs 'python setup.py test', the package will be installed,
+ and the named test suite will be run. The format is the same as
+ would be used on a 'unittest.py' command line. That is, it is the
+ dotted name of an object to import and call to generate a test suite.
+
+ 'package_data' -- a dictionary mapping package names to lists of filenames
+ or globs to use to find data files contained in the named packages.
+ If the dictionary has filenames or globs listed under '""' (the empty
+ string), those names will be searched for in every package, in addition
+ to any names for the specific package. Data files found using these
+ names/globs will be installed along with the package, in the same
+ location as the package. Note that globs are allowed to reference
+ the contents of non-package subdirectories, as long as you use '/' as
+ a path separator. (Globs are automatically converted to
+ platform-specific paths at runtime.)
+
+ In addition to these new keywords, this class also has several new methods
+ for manipulating the distribution's contents. For example, the 'include()'
+ and 'exclude()' methods can be thought of as in-place add and subtract
+ commands that add or remove packages, modules, extensions, and so on from
the distribution.
- """
-
+ """
+
_DISTUTILS_UNSUPPORTED_METADATA = {
'long_description_content_type': lambda: None,
'project_urls': dict,
@@ -428,34 +428,34 @@ class Distribution(_Distribution):
'license_files': lambda: None,
}
- _patched_dist = None
-
- def patch_missing_pkg_info(self, attrs):
- # Fake up a replacement for the data that would normally come from
- # PKG-INFO, but which might not yet be built if this is a fresh
- # checkout.
- #
- if not attrs or 'name' not in attrs or 'version' not in attrs:
- return
- key = pkg_resources.safe_name(str(attrs['name'])).lower()
- dist = pkg_resources.working_set.by_key.get(key)
- if dist is not None and not dist.has_metadata('PKG-INFO'):
- dist._version = pkg_resources.safe_version(str(attrs['version']))
- self._patched_dist = dist
-
- def __init__(self, attrs=None):
- have_package_data = hasattr(self, "package_data")
- if not have_package_data:
- self.package_data = {}
+ _patched_dist = None
+
+ def patch_missing_pkg_info(self, attrs):
+ # Fake up a replacement for the data that would normally come from
+ # PKG-INFO, but which might not yet be built if this is a fresh
+ # checkout.
+ #
+ if not attrs or 'name' not in attrs or 'version' not in attrs:
+ return
+ key = pkg_resources.safe_name(str(attrs['name'])).lower()
+ dist = pkg_resources.working_set.by_key.get(key)
+ if dist is not None and not dist.has_metadata('PKG-INFO'):
+ dist._version = pkg_resources.safe_version(str(attrs['version']))
+ self._patched_dist = dist
+
+ def __init__(self, attrs=None):
+ have_package_data = hasattr(self, "package_data")
+ if not have_package_data:
+ self.package_data = {}
attrs = attrs or {}
- self.dist_files = []
+ self.dist_files = []
# Filter-out setuptools' specific options.
self.src_root = attrs.pop("src_root", None)
- self.patch_missing_pkg_info(attrs)
+ self.patch_missing_pkg_info(attrs)
self.dependency_links = attrs.pop('dependency_links', [])
self.setup_requires = attrs.pop('setup_requires', [])
- for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
- vars(self).setdefault(ep.name, None)
+ for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+ vars(self).setdefault(ep.name, None)
_Distribution.__init__(
self,
{
@@ -497,19 +497,19 @@ class Distribution(_Distribution):
@staticmethod
def _validate_version(version):
if isinstance(version, numbers.Number):
- # Some people apparently take "version number" too literally :)
+ # Some people apparently take "version number" too literally :)
version = str(version)
-
+
if version is not None:
- try:
+ try:
packaging.version.Version(version)
- except (packaging.version.InvalidVersion, TypeError):
- warnings.warn(
- "The version specified (%r) is an invalid version, this "
- "may not work as expected with newer versions of "
- "setuptools, pip, and PyPI. Please see PEP 440 for more "
+ except (packaging.version.InvalidVersion, TypeError):
+ warnings.warn(
+ "The version specified (%r) is an invalid version, this "
+ "may not work as expected with newer versions of "
+ "setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % version
- )
+ )
return setuptools.sic(version)
return version
@@ -520,7 +520,7 @@ class Distribution(_Distribution):
"""
if getattr(self, 'python_requires', None):
self.metadata.python_requires = self.python_requires
-
+
if getattr(self, 'extras_require', None):
for extra in self.extras_require.keys():
# Since this gets called multiple times at points where the
@@ -807,18 +807,18 @@ class Distribution(_Distribution):
self._finalize_requires()
self._finalize_license_files()
- def fetch_build_eggs(self, requires):
- """Resolve pre-setup requirements"""
- resolved_dists = pkg_resources.working_set.resolve(
- pkg_resources.parse_requirements(requires),
- installer=self.fetch_build_egg,
- replace_conflicting=True,
- )
- for dist in resolved_dists:
- pkg_resources.working_set.add(dist, replace=True)
+ def fetch_build_eggs(self, requires):
+ """Resolve pre-setup requirements"""
+ resolved_dists = pkg_resources.working_set.resolve(
+ pkg_resources.parse_requirements(requires),
+ installer=self.fetch_build_egg,
+ replace_conflicting=True,
+ )
+ for dist in resolved_dists:
+ pkg_resources.working_set.add(dist, replace=True)
return resolved_dists
-
- def finalize_options(self):
+
+ def finalize_options(self):
"""
Allow plugins to apply arbitrary operations to the
distribution. Each hook may optionally define a 'order'
@@ -826,7 +826,7 @@ class Distribution(_Distribution):
go first and the default is 0.
"""
group = 'setuptools.finalize_distribution_options'
-
+
def by_order(hook):
return getattr(hook, 'order', 0)
@@ -851,19 +851,19 @@ class Distribution(_Distribution):
return ep.name in removed
def _finalize_setup_keywords(self):
- for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+ for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self, ep.name, None)
- if value is not None:
- ep.require(installer=self.fetch_build_egg)
- ep.load()(self, ep.name, value)
-
- def get_egg_cache_dir(self):
- egg_cache_dir = os.path.join(os.curdir, '.eggs')
- if not os.path.exists(egg_cache_dir):
- os.mkdir(egg_cache_dir)
- windows_support.hide_file(egg_cache_dir)
- readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
- with open(readme_txt_filename, 'w') as f:
+ if value is not None:
+ ep.require(installer=self.fetch_build_egg)
+ ep.load()(self, ep.name, value)
+
+ def get_egg_cache_dir(self):
+ egg_cache_dir = os.path.join(os.curdir, '.eggs')
+ if not os.path.exists(egg_cache_dir):
+ os.mkdir(egg_cache_dir)
+ windows_support.hide_file(egg_cache_dir)
+ readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
+ with open(readme_txt_filename, 'w') as f:
f.write(
'This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n'
@@ -872,285 +872,285 @@ class Distribution(_Distribution):
'This directory caches those eggs to prevent '
'repeated downloads.\n\n'
)
- f.write('However, it is safe to delete this directory.\n\n')
-
- return egg_cache_dir
-
- def fetch_build_egg(self, req):
- """Fetch an egg needed for building"""
+ f.write('However, it is safe to delete this directory.\n\n')
+
+ return egg_cache_dir
+
+ def fetch_build_egg(self, req):
+ """Fetch an egg needed for building"""
from setuptools.installer import fetch_build_egg
return fetch_build_egg(self, req)
-
- def get_command_class(self, command):
- """Pluggable version of get_command_class()"""
- if command in self.cmdclass:
- return self.cmdclass[command]
-
+
+ def get_command_class(self, command):
+ """Pluggable version of get_command_class()"""
+ if command in self.cmdclass:
+ return self.cmdclass[command]
+
eps = pkg_resources.iter_entry_points('distutils.commands', command)
for ep in eps:
- ep.require(installer=self.fetch_build_egg)
- self.cmdclass[command] = cmdclass = ep.load()
- return cmdclass
- else:
- return _Distribution.get_command_class(self, command)
-
- def print_commands(self):
- for ep in pkg_resources.iter_entry_points('distutils.commands'):
- if ep.name not in self.cmdclass:
- # don't require extras as the commands won't be invoked
- cmdclass = ep.resolve()
- self.cmdclass[ep.name] = cmdclass
- return _Distribution.print_commands(self)
-
- def get_command_list(self):
- for ep in pkg_resources.iter_entry_points('distutils.commands'):
- if ep.name not in self.cmdclass:
- # don't require extras as the commands won't be invoked
- cmdclass = ep.resolve()
- self.cmdclass[ep.name] = cmdclass
- return _Distribution.get_command_list(self)
-
+ ep.require(installer=self.fetch_build_egg)
+ self.cmdclass[command] = cmdclass = ep.load()
+ return cmdclass
+ else:
+ return _Distribution.get_command_class(self, command)
+
+ def print_commands(self):
+ for ep in pkg_resources.iter_entry_points('distutils.commands'):
+ if ep.name not in self.cmdclass:
+ # don't require extras as the commands won't be invoked
+ cmdclass = ep.resolve()
+ self.cmdclass[ep.name] = cmdclass
+ return _Distribution.print_commands(self)
+
+ def get_command_list(self):
+ for ep in pkg_resources.iter_entry_points('distutils.commands'):
+ if ep.name not in self.cmdclass:
+ # don't require extras as the commands won't be invoked
+ cmdclass = ep.resolve()
+ self.cmdclass[ep.name] = cmdclass
+ return _Distribution.get_command_list(self)
+
def include(self, **attrs):
- """Add items to distribution that are named in keyword arguments
-
+ """Add items to distribution that are named in keyword arguments
+
For example, 'dist.include(py_modules=["x"])' would add 'x' to
- the distribution's 'py_modules' attribute, if it was not already
- there.
-
- Currently, this method only supports inclusion for attributes that are
- lists or tuples. If you need to add support for adding to other
- attributes in this or a subclass, you can add an '_include_X' method,
- where 'X' is the name of the attribute. The method will be called with
- the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
- will try to call 'dist._include_foo({"bar":"baz"})', which can then
- handle whatever special inclusion logic is needed.
- """
+ the distribution's 'py_modules' attribute, if it was not already
+ there.
+
+ Currently, this method only supports inclusion for attributes that are
+ lists or tuples. If you need to add support for adding to other
+ attributes in this or a subclass, you can add an '_include_X' method,
+ where 'X' is the name of the attribute. The method will be called with
+ the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
+ will try to call 'dist._include_foo({"bar":"baz"})', which can then
+ handle whatever special inclusion logic is needed.
+ """
for k, v in attrs.items():
include = getattr(self, '_include_' + k, None)
- if include:
- include(v)
- else:
+ if include:
+ include(v)
+ else:
self._include_misc(k, v)
-
+
def exclude_package(self, package):
- """Remove packages, modules, and extensions in named package"""
-
+ """Remove packages, modules, and extensions in named package"""
+
pfx = package + '.'
- if self.packages:
- self.packages = [
+ if self.packages:
+ self.packages = [
p for p in self.packages if p != package and not p.startswith(pfx)
- ]
-
- if self.py_modules:
- self.py_modules = [
+ ]
+
+ if self.py_modules:
+ self.py_modules = [
p for p in self.py_modules if p != package and not p.startswith(pfx)
- ]
-
- if self.ext_modules:
- self.ext_modules = [
+ ]
+
+ if self.ext_modules:
+ self.ext_modules = [
p
for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
- ]
-
+ ]
+
def has_contents_for(self, package):
- """Return true if 'exclude_package(package)' would do something"""
-
+ """Return true if 'exclude_package(package)' would do something"""
+
pfx = package + '.'
-
- for p in self.iter_distribution_names():
+
+ for p in self.iter_distribution_names():
if p == package or p.startswith(pfx):
- return True
-
+ return True
+
def _exclude_misc(self, name, value):
- """Handle 'exclude()' for list/tuple attrs without a special handler"""
+ """Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
- raise DistutilsSetupError(
- "%s: setting must be a list or tuple (%r)" % (name, value)
- )
- try:
+ raise DistutilsSetupError(
+ "%s: setting must be a list or tuple (%r)" % (name, value)
+ )
+ try:
old = getattr(self, name)
except AttributeError as e:
raise DistutilsSetupError("%s: No such distribution setting" % name) from e
if old is not None and not isinstance(old, sequence):
- raise DistutilsSetupError(
+ raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
- )
- elif old:
+ )
+ elif old:
setattr(self, name, [item for item in old if item not in value])
-
+
def _include_misc(self, name, value):
- """Handle 'include()' for list/tuple attrs without a special handler"""
-
+ """Handle 'include()' for list/tuple attrs without a special handler"""
+
if not isinstance(value, sequence):
raise DistutilsSetupError("%s: setting must be a list (%r)" % (name, value))
- try:
+ try:
old = getattr(self, name)
except AttributeError as e:
raise DistutilsSetupError("%s: No such distribution setting" % name) from e
- if old is None:
+ if old is None:
setattr(self, name, value)
elif not isinstance(old, sequence):
- raise DistutilsSetupError(
+ raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
- )
- else:
+ )
+ else:
new = [item for item in value if item not in old]
setattr(self, name, old + new)
-
+
def exclude(self, **attrs):
- """Remove items from distribution that are named in keyword arguments
-
- For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
- the distribution's 'py_modules' attribute. Excluding packages uses
- the 'exclude_package()' method, so all of the package's contained
- packages, modules, and extensions are also excluded.
-
- Currently, this method only supports exclusion from attributes that are
- lists or tuples. If you need to add support for excluding from other
- attributes in this or a subclass, you can add an '_exclude_X' method,
- where 'X' is the name of the attribute. The method will be called with
- the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
- will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
- handle whatever special exclusion logic is needed.
- """
+ """Remove items from distribution that are named in keyword arguments
+
+ For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
+ the distribution's 'py_modules' attribute. Excluding packages uses
+ the 'exclude_package()' method, so all of the package's contained
+ packages, modules, and extensions are also excluded.
+
+ Currently, this method only supports exclusion from attributes that are
+ lists or tuples. If you need to add support for excluding from other
+ attributes in this or a subclass, you can add an '_exclude_X' method,
+ where 'X' is the name of the attribute. The method will be called with
+ the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
+ will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
+ handle whatever special exclusion logic is needed.
+ """
for k, v in attrs.items():
exclude = getattr(self, '_exclude_' + k, None)
- if exclude:
- exclude(v)
- else:
+ if exclude:
+ exclude(v)
+ else:
self._exclude_misc(k, v)
-
+
def _exclude_packages(self, packages):
if not isinstance(packages, sequence):
- raise DistutilsSetupError(
- "packages: setting must be a list or tuple (%r)" % (packages,)
- )
- list(map(self.exclude_package, packages))
-
- def _parse_command_opts(self, parser, args):
- # Remove --with-X/--without-X options when processing command args
- self.global_options = self.__class__.global_options
- self.negative_opt = self.__class__.negative_opt
-
- # First, expand any aliases
- command = args[0]
- aliases = self.get_option_dict('aliases')
- while command in aliases:
+ raise DistutilsSetupError(
+ "packages: setting must be a list or tuple (%r)" % (packages,)
+ )
+ list(map(self.exclude_package, packages))
+
+ def _parse_command_opts(self, parser, args):
+ # Remove --with-X/--without-X options when processing command args
+ self.global_options = self.__class__.global_options
+ self.negative_opt = self.__class__.negative_opt
+
+ # First, expand any aliases
+ command = args[0]
+ aliases = self.get_option_dict('aliases')
+ while command in aliases:
src, alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
- import shlex
+ import shlex
args[:1] = shlex.split(alias, True)
- command = args[0]
-
- nargs = _Distribution._parse_command_opts(self, parser, args)
-
- # Handle commands that want to consume all remaining arguments
- cmd_class = self.get_command_class(command)
+ command = args[0]
+
+ nargs = _Distribution._parse_command_opts(self, parser, args)
+
+ # Handle commands that want to consume all remaining arguments
+ cmd_class = self.get_command_class(command)
if getattr(cmd_class, 'command_consumes_arguments', None):
- self.get_option_dict(command)['args'] = ("command line", nargs)
- if nargs is not None:
- return []
-
- return nargs
-
- def get_cmdline_options(self):
- """Return a '{cmd: {opt:val}}' map of all command-line options
-
- Option names are all long, but do not include the leading '--', and
- contain dashes rather than underscores. If the option doesn't take
- an argument (e.g. '--quiet'), the 'val' is 'None'.
-
- Note that options provided by config files are intentionally excluded.
- """
-
- d = {}
-
+ self.get_option_dict(command)['args'] = ("command line", nargs)
+ if nargs is not None:
+ return []
+
+ return nargs
+
+ def get_cmdline_options(self):
+ """Return a '{cmd: {opt:val}}' map of all command-line options
+
+ Option names are all long, but do not include the leading '--', and
+ contain dashes rather than underscores. If the option doesn't take
+ an argument (e.g. '--quiet'), the 'val' is 'None'.
+
+ Note that options provided by config files are intentionally excluded.
+ """
+
+ d = {}
+
for cmd, opts in self.command_options.items():
-
+
for opt, (src, val) in opts.items():
-
- if src != "command line":
- continue
-
+
+ if src != "command line":
+ continue
+
opt = opt.replace('_', '-')
-
+
if val == 0:
- cmdobj = self.get_command_obj(cmd)
- neg_opt = self.negative_opt.copy()
+ cmdobj = self.get_command_obj(cmd)
+ neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
for neg, pos in neg_opt.items():
if pos == opt:
opt = neg
val = None
- break
- else:
- raise AssertionError("Shouldn't be able to get here")
-
+ break
+ else:
+ raise AssertionError("Shouldn't be able to get here")
+
elif val == 1:
- val = None
-
+ val = None
+
d.setdefault(cmd, {})[opt] = val
-
- return d
-
- def iter_distribution_names(self):
- """Yield all packages, modules, and extension names in distribution"""
-
- for pkg in self.packages or ():
- yield pkg
-
- for module in self.py_modules or ():
- yield module
-
- for ext in self.ext_modules or ():
+
+ return d
+
+ def iter_distribution_names(self):
+ """Yield all packages, modules, and extension names in distribution"""
+
+ for pkg in self.packages or ():
+ yield pkg
+
+ for module in self.py_modules or ():
+ yield module
+
+ for ext in self.ext_modules or ():
if isinstance(ext, tuple):
- name, buildinfo = ext
- else:
- name = ext.name
- if name.endswith('module'):
- name = name[:-6]
- yield name
-
- def handle_display_options(self, option_order):
- """If there were any non-global "display-only" options
- (--help-commands or the metadata display options) on the command
- line, display the requested info and return true; else return
- false.
- """
- import sys
-
+ name, buildinfo = ext
+ else:
+ name = ext.name
+ if name.endswith('module'):
+ name = name[:-6]
+ yield name
+
+ def handle_display_options(self, option_order):
+ """If there were any non-global "display-only" options
+ (--help-commands or the metadata display options) on the command
+ line, display the requested info and return true; else return
+ false.
+ """
+ import sys
+
if self.help_commands:
- return _Distribution.handle_display_options(self, option_order)
-
- # Stdout may be StringIO (e.g. in tests)
- if not isinstance(sys.stdout, io.TextIOWrapper):
- return _Distribution.handle_display_options(self, option_order)
-
- # Don't wrap stdout if utf-8 is already the encoding. Provides
- # workaround for #334.
- if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
- return _Distribution.handle_display_options(self, option_order)
-
- # Print metadata in UTF-8 no matter the platform
- encoding = sys.stdout.encoding
- errors = sys.stdout.errors
- newline = sys.platform != 'win32' and '\n' or None
- line_buffering = sys.stdout.line_buffering
-
- sys.stdout = io.TextIOWrapper(
+ return _Distribution.handle_display_options(self, option_order)
+
+ # Stdout may be StringIO (e.g. in tests)
+ if not isinstance(sys.stdout, io.TextIOWrapper):
+ return _Distribution.handle_display_options(self, option_order)
+
+ # Don't wrap stdout if utf-8 is already the encoding. Provides
+ # workaround for #334.
+ if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
+ return _Distribution.handle_display_options(self, option_order)
+
+ # Print metadata in UTF-8 no matter the platform
+ encoding = sys.stdout.encoding
+ errors = sys.stdout.errors
+ newline = sys.platform != 'win32' and '\n' or None
+ line_buffering = sys.stdout.line_buffering
+
+ sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering
)
- try:
- return _Distribution.handle_display_options(self, option_order)
- finally:
- sys.stdout = io.TextIOWrapper(
+ try:
+ return _Distribution.handle_display_options(self, option_order)
+ finally:
+ sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering
)
-
-
+
+
class DistDeprecationWarning(SetuptoolsDeprecationWarning):
"""Class for warning about deprecations in dist in
setuptools. Not ignored by default, unlike DeprecationWarning."""
diff --git a/contrib/python/setuptools/py3/setuptools/extension.py b/contrib/python/setuptools/py3/setuptools/extension.py
index d733f35bc6..1820722a49 100644
--- a/contrib/python/setuptools/py3/setuptools/extension.py
+++ b/contrib/python/setuptools/py3/setuptools/extension.py
@@ -1,55 +1,55 @@
-import re
-import functools
-import distutils.core
-import distutils.errors
-import distutils.extension
-
+import re
+import functools
+import distutils.core
+import distutils.errors
+import distutils.extension
+
from .monkey import get_unpatched
-
-
-def _have_cython():
- """
- Return True if Cython can be imported.
- """
+
+
+def _have_cython():
+ """
+ Return True if Cython can be imported.
+ """
cython_impl = 'Cython.Distutils.build_ext'
- try:
- # from (cython_impl) import build_ext
- __import__(cython_impl, fromlist=['build_ext']).build_ext
- return True
- except Exception:
- pass
- return False
-
-
-# for compatibility
-have_pyrex = _have_cython
-
+ try:
+ # from (cython_impl) import build_ext
+ __import__(cython_impl, fromlist=['build_ext']).build_ext
+ return True
+ except Exception:
+ pass
+ return False
+
+
+# for compatibility
+have_pyrex = _have_cython
+
_Extension = get_unpatched(distutils.core.Extension)
-
-class Extension(_Extension):
- """Extension that uses '.c' files in place of '.pyx' files"""
-
+
+class Extension(_Extension):
+ """Extension that uses '.c' files in place of '.pyx' files"""
+
def __init__(self, name, sources, *args, **kw):
# The *args is needed for compatibility as calls may use positional
# arguments. py_limited_api may be set only via keyword.
self.py_limited_api = kw.pop("py_limited_api", False)
_Extension.__init__(self, name, sources, *args, **kw)
- def _convert_pyx_sources_to_lang(self):
- """
- Replace sources with .pyx extensions to sources with the target
- language extension. This mechanism allows language authors to supply
- pre-converted sources but to prefer the .pyx sources.
- """
- if _have_cython():
- # the build has Cython, so allow it to compile the .pyx files
- return
- lang = self.language or ''
- target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
- sub = functools.partial(re.sub, '.pyx$', target_ext)
- self.sources = list(map(sub, self.sources))
-
-
-class Library(Extension):
- """Just like a regular Extension, but built as a library instead"""
+ def _convert_pyx_sources_to_lang(self):
+ """
+ Replace sources with .pyx extensions to sources with the target
+ language extension. This mechanism allows language authors to supply
+ pre-converted sources but to prefer the .pyx sources.
+ """
+ if _have_cython():
+ # the build has Cython, so allow it to compile the .pyx files
+ return
+ lang = self.language or ''
+ target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
+ sub = functools.partial(re.sub, '.pyx$', target_ext)
+ self.sources = list(map(sub, self.sources))
+
+
+class Library(Extension):
+ """Just like a regular Extension, but built as a library instead"""
diff --git a/contrib/python/setuptools/py3/setuptools/launch.py b/contrib/python/setuptools/py3/setuptools/launch.py
index 4d0600e539..0208fdf33b 100644
--- a/contrib/python/setuptools/py3/setuptools/launch.py
+++ b/contrib/python/setuptools/py3/setuptools/launch.py
@@ -1,16 +1,16 @@
-"""
-Launch the Python script on the command line after
-setuptools is bootstrapped via import.
-"""
-
-# Note that setuptools gets imported implicitly by the
-# invocation of this script using python -m setuptools.launch
-
-import tokenize
-import sys
-
-
-def run():
+"""
+Launch the Python script on the command line after
+setuptools is bootstrapped via import.
+"""
+
+# Note that setuptools gets imported implicitly by the
+# invocation of this script using python -m setuptools.launch
+
+import tokenize
+import sys
+
+
+def run():
"""
Run the script in sys.argv[1] as if it had
been invoked naturally.
@@ -23,14 +23,14 @@ def run():
__doc__=None,
)
sys.argv[:] = sys.argv[1:]
-
+
open_ = getattr(tokenize, 'open', open)
with open_(script_name) as fid:
script = fid.read()
norm_script = script.replace('\\r\\n', '\\n')
code = compile(norm_script, script_name, 'exec')
exec(code, namespace)
-
-
-if __name__ == '__main__':
+
+
+if __name__ == '__main__':
run()
diff --git a/contrib/python/setuptools/py3/setuptools/package_index.py b/contrib/python/setuptools/py3/setuptools/package_index.py
index 449e684568..270e7f3c91 100644
--- a/contrib/python/setuptools/py3/setuptools/package_index.py
+++ b/contrib/python/setuptools/py3/setuptools/package_index.py
@@ -1,13 +1,13 @@
-"""PyPI and direct package downloading"""
-import sys
-import os
-import re
+"""PyPI and direct package downloading"""
+import sys
+import os
+import re
import io
-import shutil
-import socket
-import base64
-import hashlib
-import itertools
+import shutil
+import socket
+import base64
+import hashlib
+import itertools
import warnings
import configparser
import html
@@ -15,37 +15,37 @@ import http.client
import urllib.parse
import urllib.request
import urllib.error
-from functools import wraps
-
+from functools import wraps
+
import setuptools
-from pkg_resources import (
- CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
+from pkg_resources import (
+ CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST, EGG_DIST, parse_version,
-)
-from distutils import log
-from distutils.errors import DistutilsError
-from fnmatch import translate
+)
+from distutils import log
+from distutils.errors import DistutilsError
+from fnmatch import translate
from setuptools.wheel import Wheel
from setuptools.extern.more_itertools import unique_everseen
-
+
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
-PYPI_MD5 = re.compile(
+PYPI_MD5 = re.compile(
r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)'
r'href="[^?]+\?:action=show_md5&amp;digest=([0-9a-f]{32})">md5</a>\)'
-)
+)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
-EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
-
-__all__ = [
- 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
- 'interpret_distro_name',
-]
-
-_SOCKET_TIMEOUT = 15
-
+EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
+
+__all__ = [
+ 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
+ 'interpret_distro_name',
+]
+
+_SOCKET_TIMEOUT = 15
+
_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
user_agent = _tmpl.format(
py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools)
@@ -60,62 +60,62 @@ def parse_requirement_arg(spec):
) from e
-def parse_bdist_wininst(name):
- """Return (base,pyversion) or (None,None) for possible .exe name"""
-
- lower = name.lower()
- base, py_ver, plat = None, None, None
-
- if lower.endswith('.exe'):
- if lower.endswith('.win32.exe'):
- base = name[:-10]
- plat = 'win32'
+def parse_bdist_wininst(name):
+ """Return (base,pyversion) or (None,None) for possible .exe name"""
+
+ lower = name.lower()
+ base, py_ver, plat = None, None, None
+
+ if lower.endswith('.exe'):
+ if lower.endswith('.win32.exe'):
+ base = name[:-10]
+ plat = 'win32'
elif lower.startswith('.win32-py', -16):
- py_ver = name[-7:-4]
- base = name[:-16]
- plat = 'win32'
- elif lower.endswith('.win-amd64.exe'):
- base = name[:-14]
- plat = 'win-amd64'
+ py_ver = name[-7:-4]
+ base = name[:-16]
+ plat = 'win32'
+ elif lower.endswith('.win-amd64.exe'):
+ base = name[:-14]
+ plat = 'win-amd64'
elif lower.startswith('.win-amd64-py', -20):
- py_ver = name[-7:-4]
- base = name[:-20]
- plat = 'win-amd64'
+ py_ver = name[-7:-4]
+ base = name[:-20]
+ plat = 'win-amd64'
return base, py_ver, plat
-
-
-def egg_info_for_url(url):
- parts = urllib.parse.urlparse(url)
- scheme, server, path, parameters, query, fragment = parts
- base = urllib.parse.unquote(path.split('/')[-1])
+
+
+def egg_info_for_url(url):
+ parts = urllib.parse.urlparse(url)
+ scheme, server, path, parameters, query, fragment = parts
+ base = urllib.parse.unquote(path.split('/')[-1])
if server == 'sourceforge.net' and base == 'download': # XXX Yuck
- base = urllib.parse.unquote(path.split('/')[-2])
+ base = urllib.parse.unquote(path.split('/')[-2])
if '#' in base:
base, fragment = base.split('#', 1)
return base, fragment
-
-def distros_for_url(url, metadata=None):
- """Yield egg or source distribution objects that might be found at a URL"""
- base, fragment = egg_info_for_url(url)
+
+def distros_for_url(url, metadata=None):
+ """Yield egg or source distribution objects that might be found at a URL"""
+ base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata):
yield dist
- if fragment:
- match = EGG_FRAGMENT.match(fragment)
- if match:
- for dist in interpret_distro_name(
+ if fragment:
+ match = EGG_FRAGMENT.match(fragment)
+ if match:
+ for dist in interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST
- ):
- yield dist
-
+ ):
+ yield dist
+
-def distros_for_location(location, basename, metadata=None):
- """Yield egg or source distribution objects based on basename"""
- if basename.endswith('.egg.zip'):
+def distros_for_location(location, basename, metadata=None):
+ """Yield egg or source distribution objects based on basename"""
+ if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
- if basename.endswith('.egg') and '-' in basename:
- # only one, unambiguous interpretation
- return [Distribution.from_location(location, basename, metadata)]
+ if basename.endswith('.egg') and '-' in basename:
+ # only one, unambiguous interpretation
+ return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.whl') and '-' in basename:
wheel = Wheel(basename)
if not wheel.is_compatible():
@@ -127,173 +127,173 @@ def distros_for_location(location, basename, metadata=None):
# Increase priority over eggs.
precedence=EGG_DIST + 1,
)]
- if basename.endswith('.exe'):
- win_base, py_ver, platform = parse_bdist_wininst(basename)
- if win_base is not None:
- return interpret_distro_name(
- location, win_base, metadata, py_ver, BINARY_DIST, platform
- )
- # Try source distro extensions (.zip, .tgz, etc.)
- #
- for ext in EXTENSIONS:
- if basename.endswith(ext):
- basename = basename[:-len(ext)]
- return interpret_distro_name(location, basename, metadata)
- return [] # no extension matched
-
-
-def distros_for_filename(filename, metadata=None):
- """Yield possible egg or source distribution objects based on a filename"""
- return distros_for_location(
- normalize_path(filename), os.path.basename(filename), metadata
- )
-
-
-def interpret_distro_name(
- location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
- platform=None
+ if basename.endswith('.exe'):
+ win_base, py_ver, platform = parse_bdist_wininst(basename)
+ if win_base is not None:
+ return interpret_distro_name(
+ location, win_base, metadata, py_ver, BINARY_DIST, platform
+ )
+ # Try source distro extensions (.zip, .tgz, etc.)
+ #
+ for ext in EXTENSIONS:
+ if basename.endswith(ext):
+ basename = basename[:-len(ext)]
+ return interpret_distro_name(location, basename, metadata)
+ return [] # no extension matched
+
+
+def distros_for_filename(filename, metadata=None):
+ """Yield possible egg or source distribution objects based on a filename"""
+ return distros_for_location(
+ normalize_path(filename), os.path.basename(filename), metadata
+ )
+
+
+def interpret_distro_name(
+ location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
+ platform=None
):
- """Generate alternative interpretations of a source distro name
-
- Note: if `location` is a filesystem filename, you should call
- ``pkg_resources.normalize_path()`` on it before passing it to this
- routine!
- """
- # Generate alternative interpretations of a source distro name
- # Because some packages are ambiguous as to name/versions split
- # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
+ """Generate alternative interpretations of a source distro name
+
+ Note: if `location` is a filesystem filename, you should call
+ ``pkg_resources.normalize_path()`` on it before passing it to this
+ routine!
+ """
+ # Generate alternative interpretations of a source distro name
+ # Because some packages are ambiguous as to name/versions split
+ # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interpretation (e.g. "adns, python-1.1.0"
- # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
- # the spurious interpretations should be ignored, because in the event
- # there's also an "adns" package, the spurious "python-1.1.0" version will
- # compare lower than any numeric version number, and is therefore unlikely
- # to match a request for it. It's still a potential problem, though, and
- # in the long run PyPI and the distutils should go for "safe" names and
- # versions in distribution archive names (sdist and bdist).
-
- parts = basename.split('-')
+ # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
+ # the spurious interpretations should be ignored, because in the event
+ # there's also an "adns" package, the spurious "python-1.1.0" version will
+ # compare lower than any numeric version number, and is therefore unlikely
+ # to match a request for it. It's still a potential problem, though, and
+ # in the long run PyPI and the distutils should go for "safe" names and
+ # versions in distribution archive names (sdist and bdist).
+
+ parts = basename.split('-')
if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
- # it is a bdist_dumb, not an sdist -- bail out
- return
-
+ # it is a bdist_dumb, not an sdist -- bail out
+ return
+
for p in range(1, len(parts) + 1):
- yield Distribution(
- location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
+ yield Distribution(
+ location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence=precedence,
platform=platform
- )
-
+ )
+
-def unique_values(func):
- """
- Wrap a function returning an iterable such that the resulting iterable
- only ever yields unique items.
- """
+def unique_values(func):
+ """
+ Wrap a function returning an iterable such that the resulting iterable
+ only ever yields unique items.
+ """
- @wraps(func)
- def wrapper(*args, **kwargs):
- return unique_everseen(func(*args, **kwargs))
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ return unique_everseen(func(*args, **kwargs))
+
+ return wrapper
- return wrapper
-
REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
-# this line is here to fix emacs' cruddy broken syntax highlighting
-
-
-@unique_values
-def find_external_links(url, page):
- """Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
-
- for match in REL.finditer(page):
- tag, rel = match.groups()
- rels = set(map(str.strip, rel.lower().split(',')))
- if 'homepage' in rels or 'download' in rels:
- for match in HREF.finditer(tag):
- yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
-
- for tag in ("<th>Home Page", "<th>Download URL"):
- pos = page.find(tag)
+# this line is here to fix emacs' cruddy broken syntax highlighting
+
+
+@unique_values
+def find_external_links(url, page):
+ """Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
+
+ for match in REL.finditer(page):
+ tag, rel = match.groups()
+ rels = set(map(str.strip, rel.lower().split(',')))
+ if 'homepage' in rels or 'download' in rels:
+ for match in HREF.finditer(tag):
+ yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
+
+ for tag in ("<th>Home Page", "<th>Download URL"):
+ pos = page.find(tag)
if pos != -1:
match = HREF.search(page, pos)
- if match:
- yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
-
-
+ if match:
+ yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
+
+
class ContentChecker:
- """
- A null content checker that defines the interface for checking content
- """
-
- def feed(self, block):
- """
- Feed a block of data to the hash.
- """
- return
-
- def is_valid(self):
- """
- Check the hash. Return False if validation fails.
- """
- return True
-
- def report(self, reporter, template):
- """
- Call reporter with information about the checker (hash name)
- substituted into the template.
- """
- return
-
-
-class HashChecker(ContentChecker):
- pattern = re.compile(
- r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
- r'(?P<expected>[a-f0-9]+)'
- )
-
- def __init__(self, hash_name, expected):
- self.hash_name = hash_name
- self.hash = hashlib.new(hash_name)
- self.expected = expected
-
- @classmethod
- def from_url(cls, url):
- "Construct a (possibly null) ContentChecker from a URL"
- fragment = urllib.parse.urlparse(url)[-1]
- if not fragment:
- return ContentChecker()
- match = cls.pattern.search(fragment)
- if not match:
- return ContentChecker()
- return cls(**match.groupdict())
-
- def feed(self, block):
- self.hash.update(block)
-
- def is_valid(self):
- return self.hash.hexdigest() == self.expected
-
- def report(self, reporter, template):
- msg = template % self.hash_name
- return reporter(msg)
-
-
-class PackageIndex(Environment):
- """A distribution index that scans web pages for download URLs"""
-
- def __init__(
+ """
+ A null content checker that defines the interface for checking content
+ """
+
+ def feed(self, block):
+ """
+ Feed a block of data to the hash.
+ """
+ return
+
+ def is_valid(self):
+ """
+ Check the hash. Return False if validation fails.
+ """
+ return True
+
+ def report(self, reporter, template):
+ """
+ Call reporter with information about the checker (hash name)
+ substituted into the template.
+ """
+ return
+
+
+class HashChecker(ContentChecker):
+ pattern = re.compile(
+ r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
+ r'(?P<expected>[a-f0-9]+)'
+ )
+
+ def __init__(self, hash_name, expected):
+ self.hash_name = hash_name
+ self.hash = hashlib.new(hash_name)
+ self.expected = expected
+
+ @classmethod
+ def from_url(cls, url):
+ "Construct a (possibly null) ContentChecker from a URL"
+ fragment = urllib.parse.urlparse(url)[-1]
+ if not fragment:
+ return ContentChecker()
+ match = cls.pattern.search(fragment)
+ if not match:
+ return ContentChecker()
+ return cls(**match.groupdict())
+
+ def feed(self, block):
+ self.hash.update(block)
+
+ def is_valid(self):
+ return self.hash.hexdigest() == self.expected
+
+ def report(self, reporter, template):
+ msg = template % self.hash_name
+ return reporter(msg)
+
+
+class PackageIndex(Environment):
+ """A distribution index that scans web pages for download URLs"""
+
+ def __init__(
self, index_url="https://pypi.org/simple/", hosts=('*',),
- ca_bundle=None, verify_ssl=True, *args, **kw
+ ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self, *args, **kw)
self.index_url = index_url + "/" [:not index_url.endswith('/')]
- self.scanned_urls = {}
- self.fetched_urls = {}
- self.package_pages = {}
+ self.scanned_urls = {}
+ self.fetched_urls = {}
+ self.package_pages = {}
self.allows = re.compile('|'.join(map(translate, hosts))).match
- self.to_scan = []
+ self.to_scan = []
self.opener = urllib.request.urlopen
-
+
def add(self, dist):
# ignore invalid versions
try:
@@ -304,29 +304,29 @@ class PackageIndex(Environment):
# FIXME: 'PackageIndex.process_url' is too complex (14)
def process_url(self, url, retrieve=False): # noqa: C901
- """Evaluate a URL as a possible download, and maybe retrieve it"""
- if url in self.scanned_urls and not retrieve:
- return
- self.scanned_urls[url] = True
- if not URL_SCHEME(url):
- self.process_filename(url)
- return
- else:
- dists = list(distros_for_url(url))
- if dists:
- if not self.url_ok(url):
- return
- self.debug("Found link: %s", url)
-
- if dists or not retrieve or url in self.fetched_urls:
- list(map(self.add, dists))
- return # don't need the actual page
-
- if not self.url_ok(url):
- self.fetched_urls[url] = True
- return
-
- self.info("Reading %s", url)
+ """Evaluate a URL as a possible download, and maybe retrieve it"""
+ if url in self.scanned_urls and not retrieve:
+ return
+ self.scanned_urls[url] = True
+ if not URL_SCHEME(url):
+ self.process_filename(url)
+ return
+ else:
+ dists = list(distros_for_url(url))
+ if dists:
+ if not self.url_ok(url):
+ return
+ self.debug("Found link: %s", url)
+
+ if dists or not retrieve or url in self.fetched_urls:
+ list(map(self.add, dists))
+ return # don't need the actual page
+
+ if not self.url_ok(url):
+ self.fetched_urls[url] = True
+ return
+
+ self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
tmpl = "Download error on %s: %%s -- Some packages may not be found!"
f = self.open_url(url, tmpl % url)
@@ -334,83 +334,83 @@ class PackageIndex(Environment):
return
if isinstance(f, urllib.error.HTTPError) and f.code == 401:
self.info("Authentication error: %s" % f.msg)
- self.fetched_urls[f.url] = True
- if 'html' not in f.headers.get('content-type', '').lower():
+ self.fetched_urls[f.url] = True
+ if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
- return
-
+ return
+
base = f.url # handle redirects
- page = f.read()
+ page = f.read()
if not isinstance(page, str):
# In Python 3 and got bytes but want str.
- if isinstance(f, urllib.error.HTTPError):
- # Errors have no charset, assume latin1:
- charset = 'latin-1'
- else:
- charset = f.headers.get_param('charset') or 'latin-1'
- page = page.decode(charset, "ignore")
- f.close()
- for match in HREF.finditer(page):
- link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
- self.process_url(link)
+ if isinstance(f, urllib.error.HTTPError):
+ # Errors have no charset, assume latin1:
+ charset = 'latin-1'
+ else:
+ charset = f.headers.get_param('charset') or 'latin-1'
+ page = page.decode(charset, "ignore")
+ f.close()
+ for match in HREF.finditer(page):
+ link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
+ self.process_url(link)
if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
- page = self.process_index(url, page)
-
- def process_filename(self, fn, nested=False):
- # process filenames or directories
- if not os.path.exists(fn):
- self.warn("Not found: %s", fn)
- return
-
- if os.path.isdir(fn) and not nested:
- path = os.path.realpath(fn)
- for item in os.listdir(path):
+ page = self.process_index(url, page)
+
+ def process_filename(self, fn, nested=False):
+ # process filenames or directories
+ if not os.path.exists(fn):
+ self.warn("Not found: %s", fn)
+ return
+
+ if os.path.isdir(fn) and not nested:
+ path = os.path.realpath(fn)
+ for item in os.listdir(path):
self.process_filename(os.path.join(path, item), True)
-
- dists = distros_for_filename(fn)
- if dists:
- self.debug("Found: %s", fn)
- list(map(self.add, dists))
-
- def url_ok(self, url, fatal=False):
- s = URL_SCHEME(url)
+
+ dists = distros_for_filename(fn)
+ if dists:
+ self.debug("Found: %s", fn)
+ list(map(self.add, dists))
+
+ def url_ok(self, url, fatal=False):
+ s = URL_SCHEME(url)
is_file = s and s.group(1).lower() == 'file'
if is_file or self.allows(urllib.parse.urlparse(url)[1]):
- return True
+ return True
msg = (
"\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/2hrImnY for details).\n")
- if fatal:
- raise DistutilsError(msg % url)
- else:
- self.warn(msg, url)
-
- def scan_egg_links(self, search_path):
- dirs = filter(os.path.isdir, search_path)
- egg_links = (
- (path, entry)
- for path in dirs
- for entry in os.listdir(path)
- if entry.endswith('.egg-link')
- )
- list(itertools.starmap(self.scan_egg_link, egg_links))
-
- def scan_egg_link(self, path, entry):
- with open(os.path.join(path, entry)) as raw_lines:
- # filter non-empty lines
- lines = list(filter(None, map(str.strip, raw_lines)))
-
- if len(lines) != 2:
- # format is not recognized; punt
- return
-
- egg_path, setup_path = lines
-
- for dist in find_distributions(os.path.join(path, egg_path)):
- dist.location = os.path.join(path, *lines)
- dist.precedence = SOURCE_DIST
- self.add(dist)
-
+ if fatal:
+ raise DistutilsError(msg % url)
+ else:
+ self.warn(msg, url)
+
+ def scan_egg_links(self, search_path):
+ dirs = filter(os.path.isdir, search_path)
+ egg_links = (
+ (path, entry)
+ for path in dirs
+ for entry in os.listdir(path)
+ if entry.endswith('.egg-link')
+ )
+ list(itertools.starmap(self.scan_egg_link, egg_links))
+
+ def scan_egg_link(self, path, entry):
+ with open(os.path.join(path, entry)) as raw_lines:
+ # filter non-empty lines
+ lines = list(filter(None, map(str.strip, raw_lines)))
+
+ if len(lines) != 2:
+ # format is not recognized; punt
+ return
+
+ egg_path, setup_path = lines
+
+ for dist in find_distributions(os.path.join(path, egg_path)):
+ dist.location = os.path.join(path, *lines)
+ dist.precedence = SOURCE_DIST
+ self.add(dist)
+
def _scan(self, link):
# Process a URL to see if it's for a package page
NO_MATCH_SENTINEL = None, None
@@ -430,19 +430,19 @@ class PackageIndex(Environment):
return to_filename(pkg), to_filename(ver)
def process_index(self, url, page):
- """Process the contents of a PyPI page"""
+ """Process the contents of a PyPI page"""
- # process an index page into the package-page index
- for match in HREF.finditer(page):
- try:
+ # process an index page into the package-page index
+ for match in HREF.finditer(page):
+ try:
self._scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
- except ValueError:
- pass
-
+ except ValueError:
+ pass
+
pkg, ver = self._scan(url) # ensure this page is in the page index
if not pkg:
return "" # no sense double-scanning non-package pages
-
+
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
@@ -458,165 +458,165 @@ class PackageIndex(Environment):
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
)
- def need_version_info(self, url):
- self.scan_all(
- "Page at %s links to .py file(s) without version info; an index "
- "scan is required.", url
- )
-
- def scan_all(self, msg=None, *args):
- if self.index_url not in self.fetched_urls:
+ def need_version_info(self, url):
+ self.scan_all(
+ "Page at %s links to .py file(s) without version info; an index "
+ "scan is required.", url
+ )
+
+ def scan_all(self, msg=None, *args):
+ if self.index_url not in self.fetched_urls:
if msg:
self.warn(msg, *args)
- self.info(
- "Scanning index of all packages (this may take a while)"
- )
- self.scan_url(self.index_url)
-
- def find_packages(self, requirement):
+ self.info(
+ "Scanning index of all packages (this may take a while)"
+ )
+ self.scan_url(self.index_url)
+
+ def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name + '/')
-
- if not self.package_pages.get(requirement.key):
- # Fall back to safe version of the name
+
+ if not self.package_pages.get(requirement.key):
+ # Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name + '/')
-
- if not self.package_pages.get(requirement.key):
- # We couldn't find the target package, so search the index page too
- self.not_found_in_index(requirement)
-
+
+ if not self.package_pages.get(requirement.key):
+ # We couldn't find the target package, so search the index page too
+ self.not_found_in_index(requirement)
+
for url in list(self.package_pages.get(requirement.key, ())):
- # scan each page that might be related to the desired package
- self.scan_url(url)
-
- def obtain(self, requirement, installer=None):
- self.prescan()
- self.find_packages(requirement)
- for dist in self[requirement.key]:
- if dist in requirement:
- return dist
- self.debug("%s does not match %s", requirement, dist)
+ # scan each page that might be related to the desired package
+ self.scan_url(url)
+
+ def obtain(self, requirement, installer=None):
+ self.prescan()
+ self.find_packages(requirement)
+ for dist in self[requirement.key]:
+ if dist in requirement:
+ return dist
+ self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement, installer)
-
- def check_hash(self, checker, filename, tfp):
- """
- checker is a ContentChecker
- """
+
+ def check_hash(self, checker, filename, tfp):
+ """
+ checker is a ContentChecker
+ """
checker.report(
self.debug,
- "Validating %%s checksum for %s" % filename)
- if not checker.is_valid():
- tfp.close()
- os.unlink(filename)
- raise DistutilsError(
- "%s validation failed for %s; "
+ "Validating %%s checksum for %s" % filename)
+ if not checker.is_valid():
+ tfp.close()
+ os.unlink(filename)
+ raise DistutilsError(
+ "%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
- )
-
- def add_find_links(self, urls):
- """Add `urls` to the list that will be prescanned for searches"""
- for url in urls:
- if (
+ )
+
+ def add_find_links(self, urls):
+ """Add `urls` to the list that will be prescanned for searches"""
+ for url in urls:
+ if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
- or url.startswith('file:')
+ or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
- ):
- # then go ahead and process it now
- self.scan_url(url)
- else:
- # otherwise, defer retrieval till later
- self.to_scan.append(url)
-
- def prescan(self):
- """Scan urls scheduled for prescanning (e.g. --find-links)"""
- if self.to_scan:
- list(map(self.scan_url, self.to_scan))
+ ):
+ # then go ahead and process it now
+ self.scan_url(url)
+ else:
+ # otherwise, defer retrieval till later
+ self.to_scan.append(url)
+
+ def prescan(self):
+ """Scan urls scheduled for prescanning (e.g. --find-links)"""
+ if self.to_scan:
+ list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
-
- def not_found_in_index(self, requirement):
+
+ def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
- meth, msg = self.info, "Couldn't retrieve index page for %r"
+ meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (
self.warn,
- "Couldn't find index page for %r (maybe misspelled?)")
- meth(msg, requirement.unsafe_name)
- self.scan_all()
-
- def download(self, spec, tmpdir):
- """Locate and/or download `spec` to `tmpdir`, returning a local path
-
- `spec` may be a ``Requirement`` object, or a string containing a URL,
- an existing local filename, or a project/version requirement spec
- (i.e. the string form of a ``Requirement`` object). If it is the URL
- of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
- that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
- automatically created alongside the downloaded file.
-
- If `spec` is a ``Requirement`` object or a string containing a
- project/version requirement spec, this method returns the location of
- a matching distribution (possibly after downloading it to `tmpdir`).
- If `spec` is a locally existing file or directory name, it is simply
- returned unchanged. If `spec` is a URL, it is downloaded to a subpath
- of `tmpdir`, and the local filename is returned. Various errors may be
- raised if a problem occurs during downloading.
- """
+ "Couldn't find index page for %r (maybe misspelled?)")
+ meth(msg, requirement.unsafe_name)
+ self.scan_all()
+
+ def download(self, spec, tmpdir):
+ """Locate and/or download `spec` to `tmpdir`, returning a local path
+
+ `spec` may be a ``Requirement`` object, or a string containing a URL,
+ an existing local filename, or a project/version requirement spec
+ (i.e. the string form of a ``Requirement`` object). If it is the URL
+ of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
+ that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
+ automatically created alongside the downloaded file.
+
+ If `spec` is a ``Requirement`` object or a string containing a
+ project/version requirement spec, this method returns the location of
+ a matching distribution (possibly after downloading it to `tmpdir`).
+ If `spec` is a locally existing file or directory name, it is simply
+ returned unchanged. If `spec` is a URL, it is downloaded to a subpath
+ of `tmpdir`, and the local filename is returned. Various errors may be
+ raised if a problem occurs during downloading.
+ """
if not isinstance(spec, Requirement):
- scheme = URL_SCHEME(spec)
- if scheme:
- # It's a url, download it to tmpdir
- found = self._download_url(scheme.group(1), spec, tmpdir)
- base, fragment = egg_info_for_url(spec)
- if base.endswith('.py'):
+ scheme = URL_SCHEME(spec)
+ if scheme:
+ # It's a url, download it to tmpdir
+ found = self._download_url(scheme.group(1), spec, tmpdir)
+ base, fragment = egg_info_for_url(spec)
+ if base.endswith('.py'):
found = self.gen_setup(found, fragment, tmpdir)
- return found
- elif os.path.exists(spec):
- # Existing file or directory, just return it
- return spec
- else:
+ return found
+ elif os.path.exists(spec):
+ # Existing file or directory, just return it
+ return spec
+ else:
spec = parse_requirement_arg(spec)
return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
-
+
def fetch_distribution( # noqa: C901 # is too complex (14) # FIXME
- self, requirement, tmpdir, force_scan=False, source=False,
+ self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None):
- """Obtain a distribution suitable for fulfilling `requirement`
-
- `requirement` must be a ``pkg_resources.Requirement`` instance.
- If necessary, or if the `force_scan` flag is set, the requirement is
- searched for in the (online) package index as well as the locally
- installed packages. If a distribution matching `requirement` is found,
- the returned distribution's ``location`` is the value you would have
- gotten from calling the ``download()`` method with the matching
- distribution's URL or filename. If no matching distribution is found,
- ``None`` is returned.
-
- If the `source` flag is set, only source distributions and source
- checkout links will be considered. Unless the `develop_ok` flag is
- set, development and system eggs (i.e., those using the ``.egg-info``
- format) will be ignored.
- """
- # process a Requirement
- self.info("Searching for %s", requirement)
- skipped = {}
- dist = None
-
- def find(req, env=None):
- if env is None:
- env = self
- # Find a matching distribution; may be called more than once
-
- for dist in env[req.key]:
-
+ """Obtain a distribution suitable for fulfilling `requirement`
+
+ `requirement` must be a ``pkg_resources.Requirement`` instance.
+ If necessary, or if the `force_scan` flag is set, the requirement is
+ searched for in the (online) package index as well as the locally
+ installed packages. If a distribution matching `requirement` is found,
+ the returned distribution's ``location`` is the value you would have
+ gotten from calling the ``download()`` method with the matching
+ distribution's URL or filename. If no matching distribution is found,
+ ``None`` is returned.
+
+ If the `source` flag is set, only source distributions and source
+ checkout links will be considered. Unless the `develop_ok` flag is
+ set, development and system eggs (i.e., those using the ``.egg-info``
+ format) will be ignored.
+ """
+ # process a Requirement
+ self.info("Searching for %s", requirement)
+ skipped = {}
+ dist = None
+
+ def find(req, env=None):
+ if env is None:
+ env = self
+ # Find a matching distribution; may be called more than once
+
+ for dist in env[req.key]:
+
if dist.precedence == DEVELOP_DIST and not develop_ok:
- if dist not in skipped:
+ if dist not in skipped:
self.warn(
"Skipping development or system egg: %s", dist,
)
- skipped[dist] = 1
- continue
-
+ skipped[dist] = 1
+ continue
+
test = (
dist in req
and (dist.precedence <= SOURCE_DIST or not source)
@@ -626,312 +626,312 @@ class PackageIndex(Environment):
dist.download_location = loc
if os.path.exists(dist.download_location):
return dist
-
- if force_scan:
- self.prescan()
- self.find_packages(requirement)
- dist = find(requirement)
-
+
+ if force_scan:
+ self.prescan()
+ self.find_packages(requirement)
+ dist = find(requirement)
+
if not dist and local_index is not None:
dist = find(requirement, local_index)
-
- if dist is None:
- if self.to_scan is not None:
- self.prescan()
- dist = find(requirement)
-
- if dist is None and not force_scan:
- self.find_packages(requirement)
- dist = find(requirement)
-
- if dist is None:
- self.warn(
+
+ if dist is None:
+ if self.to_scan is not None:
+ self.prescan()
+ dist = find(requirement)
+
+ if dist is None and not force_scan:
+ self.find_packages(requirement)
+ dist = find(requirement)
+
+ if dist is None:
+ self.warn(
"No local packages or working download links found for %s%s",
- (source and "a source distribution of " or ""),
- requirement,
- )
- else:
- self.info("Best match: %s", dist)
+ (source and "a source distribution of " or ""),
+ requirement,
+ )
+ else:
+ self.info("Best match: %s", dist)
return dist.clone(location=dist.download_location)
-
- def fetch(self, requirement, tmpdir, force_scan=False, source=False):
- """Obtain a file suitable for fulfilling `requirement`
-
- DEPRECATED; use the ``fetch_distribution()`` method now instead. For
- backward compatibility, this routine is identical but returns the
- ``location`` of the downloaded distribution instead of a distribution
- object.
- """
+
+ def fetch(self, requirement, tmpdir, force_scan=False, source=False):
+ """Obtain a file suitable for fulfilling `requirement`
+
+ DEPRECATED; use the ``fetch_distribution()`` method now instead. For
+ backward compatibility, this routine is identical but returns the
+ ``location`` of the downloaded distribution instead of a distribution
+ object.
+ """
dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
- if dist is not None:
- return dist.location
- return None
-
- def gen_setup(self, filename, fragment, tmpdir):
- match = EGG_FRAGMENT.match(fragment)
- dists = match and [
- d for d in
- interpret_distro_name(filename, match.group(1), None) if d.version
- ] or []
-
+ if dist is not None:
+ return dist.location
+ return None
+
+ def gen_setup(self, filename, fragment, tmpdir):
+ match = EGG_FRAGMENT.match(fragment)
+ dists = match and [
+ d for d in
+ interpret_distro_name(filename, match.group(1), None) if d.version
+ ] or []
+
if len(dists) == 1: # unambiguous ``#egg`` fragment
- basename = os.path.basename(filename)
-
- # Make sure the file has been downloaded to the temp dir.
- if os.path.dirname(filename) != tmpdir:
- dst = os.path.join(tmpdir, basename)
- from setuptools.command.easy_install import samefile
- if not samefile(filename, dst):
- shutil.copy2(filename, dst)
+ basename = os.path.basename(filename)
+
+ # Make sure the file has been downloaded to the temp dir.
+ if os.path.dirname(filename) != tmpdir:
+ dst = os.path.join(tmpdir, basename)
+ from setuptools.command.easy_install import samefile
+ if not samefile(filename, dst):
+ shutil.copy2(filename, dst)
filename = dst
-
- with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
- file.write(
- "from setuptools import setup\n"
- "setup(name=%r, version=%r, py_modules=[%r])\n"
- % (
- dists[0].project_name, dists[0].version,
- os.path.splitext(basename)[0]
- )
- )
- return filename
-
- elif match:
- raise DistutilsError(
- "Can't unambiguously interpret project/version identifier %r; "
- "any dashes in the name or version should be escaped using "
+
+ with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
+ file.write(
+ "from setuptools import setup\n"
+ "setup(name=%r, version=%r, py_modules=[%r])\n"
+ % (
+ dists[0].project_name, dists[0].version,
+ os.path.splitext(basename)[0]
+ )
+ )
+ return filename
+
+ elif match:
+ raise DistutilsError(
+ "Can't unambiguously interpret project/version identifier %r; "
+ "any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment, dists)
- )
- else:
- raise DistutilsError(
- "Can't process plain .py files without an '#egg=name-version'"
- " suffix to enable automatic setup script generation."
- )
-
- dl_blocksize = 8192
-
- def _download_to(self, url, filename):
- self.info("Downloading %s", url)
- # Download the file
+ )
+ else:
+ raise DistutilsError(
+ "Can't process plain .py files without an '#egg=name-version'"
+ " suffix to enable automatic setup script generation."
+ )
+
+ dl_blocksize = 8192
+
+ def _download_to(self, url, filename):
+ self.info("Downloading %s", url)
+ # Download the file
fp = None
- try:
- checker = HashChecker.from_url(url)
+ try:
+ checker = HashChecker.from_url(url)
fp = self.open_url(url)
- if isinstance(fp, urllib.error.HTTPError):
- raise DistutilsError(
+ if isinstance(fp, urllib.error.HTTPError):
+ raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code, fp.msg)
- )
- headers = fp.info()
- blocknum = 0
- bs = self.dl_blocksize
- size = -1
- if "content-length" in headers:
- # Some servers return multiple Content-Length headers :(
+ )
+ headers = fp.info()
+ blocknum = 0
+ bs = self.dl_blocksize
+ size = -1
+ if "content-length" in headers:
+ # Some servers return multiple Content-Length headers :(
sizes = headers.get_all('Content-Length')
- size = max(map(int, sizes))
- self.reporthook(url, filename, blocknum, bs, size)
+ size = max(map(int, sizes))
+ self.reporthook(url, filename, blocknum, bs, size)
with open(filename, 'wb') as tfp:
- while True:
- block = fp.read(bs)
- if block:
- checker.feed(block)
- tfp.write(block)
- blocknum += 1
- self.reporthook(url, filename, blocknum, bs, size)
- else:
- break
- self.check_hash(checker, filename, tfp)
- return headers
- finally:
+ while True:
+ block = fp.read(bs)
+ if block:
+ checker.feed(block)
+ tfp.write(block)
+ blocknum += 1
+ self.reporthook(url, filename, blocknum, bs, size)
+ else:
+ break
+ self.check_hash(checker, filename, tfp)
+ return headers
+ finally:
if fp:
fp.close()
-
- def reporthook(self, url, filename, blocknum, blksize, size):
+
+ def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
-
+
# FIXME:
def open_url(self, url, warning=None): # noqa: C901 # is too complex (12)
- if url.startswith('file:'):
- return local_open(url)
- try:
- return open_with_auth(url, self.opener)
+ if url.startswith('file:'):
+ return local_open(url)
+ try:
+ return open_with_auth(url, self.opener)
except (ValueError, http.client.InvalidURL) as v:
- msg = ' '.join([str(arg) for arg in v.args])
- if warning:
- self.warn(warning, msg)
- else:
+ msg = ' '.join([str(arg) for arg in v.args])
+ if warning:
+ self.warn(warning, msg)
+ else:
raise DistutilsError('%s %s' % (url, msg)) from v
- except urllib.error.HTTPError as v:
- return v
- except urllib.error.URLError as v:
- if warning:
- self.warn(warning, v.reason)
- else:
- raise DistutilsError("Download error for %s: %s"
+ except urllib.error.HTTPError as v:
+ return v
+ except urllib.error.URLError as v:
+ if warning:
+ self.warn(warning, v.reason)
+ else:
+ raise DistutilsError("Download error for %s: %s"
% (url, v.reason)) from v
except http.client.BadStatusLine as v:
- if warning:
- self.warn(warning, v.line)
- else:
- raise DistutilsError(
- '%s returned a bad status line. The server might be '
- 'down, %s' %
- (url, v.line)
+ if warning:
+ self.warn(warning, v.line)
+ else:
+ raise DistutilsError(
+ '%s returned a bad status line. The server might be '
+ 'down, %s' %
+ (url, v.line)
) from v
except (http.client.HTTPException, socket.error) as v:
- if warning:
- self.warn(warning, v)
- else:
- raise DistutilsError("Download error for %s: %s"
+ if warning:
+ self.warn(warning, v)
+ else:
+ raise DistutilsError("Download error for %s: %s"
% (url, v)) from v
-
- def _download_url(self, scheme, url, tmpdir):
- # Determine download filename
- #
- name, fragment = egg_info_for_url(url)
- if name:
- while '..' in name:
+
+ def _download_url(self, scheme, url, tmpdir):
+ # Determine download filename
+ #
+ name, fragment = egg_info_for_url(url)
+ if name:
+ while '..' in name:
name = name.replace('..', '.').replace('\\', '_')
- else:
+ else:
name = "__downloaded__" # default if URL has no path contents
-
- if name.endswith('.egg.zip'):
+
+ if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
-
+
filename = os.path.join(tmpdir, name)
-
- # Download the file
- #
+
+ # Download the file
+ #
if scheme == 'svn' or scheme.startswith('svn+'):
- return self._download_svn(url, filename)
+ return self._download_svn(url, filename)
elif scheme == 'git' or scheme.startswith('git+'):
- return self._download_git(url, filename)
- elif scheme.startswith('hg+'):
- return self._download_hg(url, filename)
+ return self._download_git(url, filename)
+ elif scheme.startswith('hg+'):
+ return self._download_hg(url, filename)
elif scheme == 'file':
- return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
- else:
+ return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
+ else:
self.url_ok(url, True) # raises error if not allowed
- return self._attempt_download(url, filename)
-
- def scan_url(self, url):
- self.process_url(url, True)
-
- def _attempt_download(self, url, filename):
- headers = self._download_to(url, filename)
+ return self._attempt_download(url, filename)
+
+ def scan_url(self, url):
+ self.process_url(url, True)
+
+ def _attempt_download(self, url, filename):
+ headers = self._download_to(url, filename)
if 'html' in headers.get('content-type', '').lower():
- return self._download_html(url, headers, filename)
- else:
- return filename
-
- def _download_html(self, url, headers, filename):
- file = open(filename)
- for line in file:
- if line.strip():
- # Check for a subversion index page
- if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
- # it's a subversion index page:
- file.close()
- os.unlink(filename)
- return self._download_svn(url, filename)
+ return self._download_html(url, headers, filename)
+ else:
+ return filename
+
+ def _download_html(self, url, headers, filename):
+ file = open(filename)
+ for line in file:
+ if line.strip():
+ # Check for a subversion index page
+ if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
+ # it's a subversion index page:
+ file.close()
+ os.unlink(filename)
+ return self._download_svn(url, filename)
break # not an index page
- file.close()
- os.unlink(filename)
+ file.close()
+ os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at " + url)
-
- def _download_svn(self, url, filename):
+
+ def _download_svn(self, url, filename):
warnings.warn("SVN download support is deprecated", UserWarning)
url = url.split('#', 1)[0] # remove any fragment for svn's sake
- creds = ''
- if url.lower().startswith('svn:') and '@' in url:
- scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
- if not netloc and path.startswith('//') and '/' in path[2:]:
+ creds = ''
+ if url.lower().startswith('svn:') and '@' in url:
+ scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
+ if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/', 1)
auth, host = _splituser(netloc)
- if auth:
- if ':' in auth:
+ if auth:
+ if ':' in auth:
user, pw = auth.split(':', 1)
- creds = " --username=%s --password=%s" % (user, pw)
- else:
+ creds = " --username=%s --password=%s" % (user, pw)
+ else:
creds = " --username=" + auth
- netloc = host
- parts = scheme, netloc, url, p, q, f
- url = urllib.parse.urlunparse(parts)
- self.info("Doing subversion checkout from %s to %s", url, filename)
- os.system("svn checkout%s -q %s %s" % (creds, url, filename))
- return filename
-
- @staticmethod
- def _vcs_split_rev_from_url(url, pop_prefix=False):
- scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
-
- scheme = scheme.split('+', 1)[-1]
-
- # Some fragment identification fails
+ netloc = host
+ parts = scheme, netloc, url, p, q, f
+ url = urllib.parse.urlunparse(parts)
+ self.info("Doing subversion checkout from %s to %s", url, filename)
+ os.system("svn checkout%s -q %s %s" % (creds, url, filename))
+ return filename
+
+ @staticmethod
+ def _vcs_split_rev_from_url(url, pop_prefix=False):
+ scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
+
+ scheme = scheme.split('+', 1)[-1]
+
+ # Some fragment identification fails
path = path.split('#', 1)[0]
-
- rev = None
- if '@' in path:
- path, rev = path.rsplit('@', 1)
-
- # Also, discard fragment
- url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
-
- return url, rev
-
- def _download_git(self, url, filename):
+
+ rev = None
+ if '@' in path:
+ path, rev = path.rsplit('@', 1)
+
+ # Also, discard fragment
+ url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
+
+ return url, rev
+
+ def _download_git(self, url, filename):
filename = filename.split('#', 1)[0]
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
-
- self.info("Doing git clone from %s to %s", url, filename)
- os.system("git clone --quiet %s %s" % (url, filename))
-
- if rev is not None:
- self.info("Checking out %s", rev)
+ url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
+
+ self.info("Doing git clone from %s to %s", url, filename)
+ os.system("git clone --quiet %s %s" % (url, filename))
+
+ if rev is not None:
+ self.info("Checking out %s", rev)
os.system("git -C %s checkout --quiet %s" % (
- filename,
- rev,
- ))
-
- return filename
-
- def _download_hg(self, url, filename):
+ filename,
+ rev,
+ ))
+
+ return filename
+
+ def _download_hg(self, url, filename):
filename = filename.split('#', 1)[0]
- url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
-
- self.info("Doing hg clone from %s to %s", url, filename)
- os.system("hg clone --quiet %s %s" % (url, filename))
-
- if rev is not None:
- self.info("Updating to %s", rev)
+ url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
+
+ self.info("Doing hg clone from %s to %s", url, filename)
+ os.system("hg clone --quiet %s %s" % (url, filename))
+
+ if rev is not None:
+ self.info("Updating to %s", rev)
os.system("hg --cwd %s up -C -r %s -q" % (
- filename,
- rev,
- ))
-
- return filename
-
- def debug(self, msg, *args):
- log.debug(msg, *args)
-
- def info(self, msg, *args):
- log.info(msg, *args)
-
- def warn(self, msg, *args):
- log.warn(msg, *args)
-
-
-# This pattern matches a character entity reference (a decimal numeric
-# references, a hexadecimal numeric reference, or a named reference).
-entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
-
-
-def decode_entity(match):
+ filename,
+ rev,
+ ))
+
+ return filename
+
+ def debug(self, msg, *args):
+ log.debug(msg, *args)
+
+ def info(self, msg, *args):
+ log.info(msg, *args)
+
+ def warn(self, msg, *args):
+ log.warn(msg, *args)
+
+
+# This pattern matches a character entity reference (a decimal numeric
+# references, a hexadecimal numeric reference, or a named reference).
+entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
+
+
+def decode_entity(match):
what = match.group(0)
return html.unescape(what)
-
-def htmldecode(text):
+
+def htmldecode(text):
"""
Decode HTML entities in the given text.
@@ -940,145 +940,145 @@ def htmldecode(text):
... '?tokena=A&amp;tokenb=B">package_name-0.1.2.tar.gz')
'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz'
"""
- return entity_sub(decode_entity, text)
-
-
-def socket_timeout(timeout=15):
- def _socket_timeout(func):
- def _socket_timeout(*args, **kwargs):
- old_timeout = socket.getdefaulttimeout()
- socket.setdefaulttimeout(timeout)
- try:
- return func(*args, **kwargs)
- finally:
- socket.setdefaulttimeout(old_timeout)
-
- return _socket_timeout
-
- return _socket_timeout
-
-
-def _encode_auth(auth):
- """
+ return entity_sub(decode_entity, text)
+
+
+def socket_timeout(timeout=15):
+ def _socket_timeout(func):
+ def _socket_timeout(*args, **kwargs):
+ old_timeout = socket.getdefaulttimeout()
+ socket.setdefaulttimeout(timeout)
+ try:
+ return func(*args, **kwargs)
+ finally:
+ socket.setdefaulttimeout(old_timeout)
+
+ return _socket_timeout
+
+ return _socket_timeout
+
+
+def _encode_auth(auth):
+ """
Encode auth from a URL suitable for an HTTP header.
- >>> str(_encode_auth('username%3Apassword'))
- 'dXNlcm5hbWU6cGFzc3dvcmQ='
-
- Long auth strings should not cause a newline to be inserted.
- >>> long_auth = 'username:' + 'password'*10
- >>> chr(10) in str(_encode_auth(long_auth))
- False
- """
- auth_s = urllib.parse.unquote(auth)
- # convert to bytes
- auth_bytes = auth_s.encode()
+ >>> str(_encode_auth('username%3Apassword'))
+ 'dXNlcm5hbWU6cGFzc3dvcmQ='
+
+ Long auth strings should not cause a newline to be inserted.
+ >>> long_auth = 'username:' + 'password'*10
+ >>> chr(10) in str(_encode_auth(long_auth))
+ False
+ """
+ auth_s = urllib.parse.unquote(auth)
+ # convert to bytes
+ auth_bytes = auth_s.encode()
encoded_bytes = base64.b64encode(auth_bytes)
- # convert back to a string
- encoded = encoded_bytes.decode()
- # strip the trailing carriage return
+ # convert back to a string
+ encoded = encoded_bytes.decode()
+ # strip the trailing carriage return
return encoded.replace('\n', '')
-
+
class Credential:
- """
- A username/password pair. Use like a namedtuple.
- """
-
- def __init__(self, username, password):
- self.username = username
- self.password = password
-
- def __iter__(self):
- yield self.username
- yield self.password
-
- def __str__(self):
- return '%(username)s:%(password)s' % vars(self)
-
-
-class PyPIConfig(configparser.RawConfigParser):
- def __init__(self):
- """
- Load from ~/.pypirc
- """
- defaults = dict.fromkeys(['username', 'password', 'repository'], '')
- configparser.RawConfigParser.__init__(self, defaults)
-
- rc = os.path.join(os.path.expanduser('~'), '.pypirc')
- if os.path.exists(rc):
- self.read(rc)
-
- @property
- def creds_by_repository(self):
- sections_with_repositories = [
- section for section in self.sections()
- if self.get(section, 'repository').strip()
- ]
-
- return dict(map(self._get_repo_cred, sections_with_repositories))
-
- def _get_repo_cred(self, section):
- repo = self.get(section, 'repository').strip()
- return repo, Credential(
- self.get(section, 'username').strip(),
- self.get(section, 'password').strip(),
- )
-
- def find_credential(self, url):
- """
- If the URL indicated appears to be a repository defined in this
- config, return the credential for that repository.
- """
- for repository, cred in self.creds_by_repository.items():
- if url.startswith(repository):
- return cred
-
-
-def open_with_auth(url, opener=urllib.request.urlopen):
- """Open a urllib2 request, handling HTTP authentication"""
-
+ """
+ A username/password pair. Use like a namedtuple.
+ """
+
+ def __init__(self, username, password):
+ self.username = username
+ self.password = password
+
+ def __iter__(self):
+ yield self.username
+ yield self.password
+
+ def __str__(self):
+ return '%(username)s:%(password)s' % vars(self)
+
+
+class PyPIConfig(configparser.RawConfigParser):
+ def __init__(self):
+ """
+ Load from ~/.pypirc
+ """
+ defaults = dict.fromkeys(['username', 'password', 'repository'], '')
+ configparser.RawConfigParser.__init__(self, defaults)
+
+ rc = os.path.join(os.path.expanduser('~'), '.pypirc')
+ if os.path.exists(rc):
+ self.read(rc)
+
+ @property
+ def creds_by_repository(self):
+ sections_with_repositories = [
+ section for section in self.sections()
+ if self.get(section, 'repository').strip()
+ ]
+
+ return dict(map(self._get_repo_cred, sections_with_repositories))
+
+ def _get_repo_cred(self, section):
+ repo = self.get(section, 'repository').strip()
+ return repo, Credential(
+ self.get(section, 'username').strip(),
+ self.get(section, 'password').strip(),
+ )
+
+ def find_credential(self, url):
+ """
+ If the URL indicated appears to be a repository defined in this
+ config, return the credential for that repository.
+ """
+ for repository, cred in self.creds_by_repository.items():
+ if url.startswith(repository):
+ return cred
+
+
+def open_with_auth(url, opener=urllib.request.urlopen):
+ """Open a urllib2 request, handling HTTP authentication"""
+
parsed = urllib.parse.urlparse(url)
scheme, netloc, path, params, query, frag = parsed
-
+
# Double scheme does not raise on macOS as revealed by a
- # failing test. We would expect "nonnumeric port". Refs #20.
- if netloc.endswith(':'):
+ # failing test. We would expect "nonnumeric port". Refs #20.
+ if netloc.endswith(':'):
raise http.client.InvalidURL("nonnumeric port: ''")
-
- if scheme in ('http', 'https'):
+
+ if scheme in ('http', 'https'):
auth, address = _splituser(netloc)
- else:
- auth = None
-
- if not auth:
- cred = PyPIConfig().find_credential(url)
- if cred:
- auth = str(cred)
- info = cred.username, url
+ else:
+ auth = None
+
+ if not auth:
+ cred = PyPIConfig().find_credential(url)
+ if cred:
+ auth = str(cred)
+ info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)', *info)
-
- if auth:
- auth = "Basic " + _encode_auth(auth)
+
+ if auth:
+ auth = "Basic " + _encode_auth(auth)
parts = scheme, address, path, params, query, frag
- new_url = urllib.parse.urlunparse(parts)
- request = urllib.request.Request(new_url)
- request.add_header("Authorization", auth)
- else:
- request = urllib.request.Request(url)
-
- request.add_header('User-Agent', user_agent)
- fp = opener(request)
-
- if auth:
- # Put authentication info back into request URL if same host,
- # so that links found on the page will work
- s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
+ new_url = urllib.parse.urlunparse(parts)
+ request = urllib.request.Request(new_url)
+ request.add_header("Authorization", auth)
+ else:
+ request = urllib.request.Request(url)
+
+ request.add_header('User-Agent', user_agent)
+ fp = opener(request)
+
+ if auth:
+ # Put authentication info back into request URL if same host,
+ # so that links found on the page will work
+ s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2 == scheme and h2 == address:
- parts = s2, netloc, path2, param2, query2, frag2
- fp.url = urllib.parse.urlunparse(parts)
-
- return fp
-
+ parts = s2, netloc, path2, param2, query2, frag2
+ fp.url = urllib.parse.urlunparse(parts)
+
+ return fp
+
# copy of urllib.parse._splituser from Python 3.8
def _splituser(host):
@@ -1088,40 +1088,40 @@ def _splituser(host):
return (user if delim else None), host
-# adding a timeout to avoid freezing package_index
-open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
-
-
-def fix_sf_url(url):
+# adding a timeout to avoid freezing package_index
+open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
+
+
+def fix_sf_url(url):
return url # backward compatibility
-
-
-def local_open(url):
- """Read a local path, with special support for directories"""
- scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
- filename = urllib.request.url2pathname(path)
- if os.path.isfile(filename):
- return urllib.request.urlopen(url)
- elif path.endswith('/') and os.path.isdir(filename):
- files = []
- for f in os.listdir(filename):
- filepath = os.path.join(filename, f)
- if f == 'index.html':
- with open(filepath, 'r') as fp:
- body = fp.read()
- break
- elif os.path.isdir(filepath):
- f += '/'
- files.append('<a href="{name}">{name}</a>'.format(name=f))
- else:
+
+
+def local_open(url):
+ """Read a local path, with special support for directories"""
+ scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
+ filename = urllib.request.url2pathname(path)
+ if os.path.isfile(filename):
+ return urllib.request.urlopen(url)
+ elif path.endswith('/') and os.path.isdir(filename):
+ files = []
+ for f in os.listdir(filename):
+ filepath = os.path.join(filename, f)
+ if f == 'index.html':
+ with open(filepath, 'r') as fp:
+ body = fp.read()
+ break
+ elif os.path.isdir(filepath):
+ f += '/'
+ files.append('<a href="{name}">{name}</a>'.format(name=f))
+ else:
tmpl = (
"<html><head><title>{url}</title>"
- "</head><body>{files}</body></html>")
- body = tmpl.format(url=url, files='\n'.join(files))
- status, message = 200, "OK"
- else:
- status, message, body = 404, "Path not found", "Not found"
-
- headers = {'content-type': 'text/html'}
+ "</head><body>{files}</body></html>")
+ body = tmpl.format(url=url, files='\n'.join(files))
+ status, message = 200, "OK"
+ else:
+ status, message, body = 404, "Path not found", "Not found"
+
+ headers = {'content-type': 'text/html'}
body_stream = io.StringIO(body)
- return urllib.error.HTTPError(url, status, message, headers, body_stream)
+ return urllib.error.HTTPError(url, status, message, headers, body_stream)
diff --git a/contrib/python/setuptools/py3/setuptools/sandbox.py b/contrib/python/setuptools/py3/setuptools/sandbox.py
index e1673b87e1..034fc80d20 100644
--- a/contrib/python/setuptools/py3/setuptools/sandbox.py
+++ b/contrib/python/setuptools/py3/setuptools/sandbox.py
@@ -1,203 +1,203 @@
-import os
-import sys
-import tempfile
-import operator
-import functools
-import itertools
-import re
-import contextlib
-import pickle
+import os
+import sys
+import tempfile
+import operator
+import functools
+import itertools
+import re
+import contextlib
+import pickle
import textwrap
import builtins
-
+
import pkg_resources
from distutils.errors import DistutilsError
from pkg_resources import working_set
-
-if sys.platform.startswith('java'):
- import org.python.modules.posix.PosixModule as _os
-else:
- _os = sys.modules[os.name]
-try:
- _file = file
-except NameError:
- _file = None
-_open = open
-
-
-__all__ = [
+
+if sys.platform.startswith('java'):
+ import org.python.modules.posix.PosixModule as _os
+else:
+ _os = sys.modules[os.name]
+try:
+ _file = file
+except NameError:
+ _file = None
+_open = open
+
+
+__all__ = [
"AbstractSandbox",
"DirectorySandbox",
"SandboxViolation",
"run_setup",
-]
-
-
-def _execfile(filename, globals, locals=None):
- """
- Python 3 implementation of execfile.
- """
- mode = 'rb'
- with open(filename, mode) as stream:
- script = stream.read()
- if locals is None:
- locals = globals
- code = compile(script, filename, 'exec')
- exec(code, globals, locals)
-
-
-@contextlib.contextmanager
-def save_argv(repl=None):
- saved = sys.argv[:]
- if repl is not None:
- sys.argv[:] = repl
- try:
- yield saved
- finally:
- sys.argv[:] = saved
-
-
-@contextlib.contextmanager
-def save_path():
- saved = sys.path[:]
- try:
- yield saved
- finally:
- sys.path[:] = saved
-
-
-@contextlib.contextmanager
-def override_temp(replacement):
- """
- Monkey-patch tempfile.tempdir with replacement, ensuring it exists
- """
+]
+
+
+def _execfile(filename, globals, locals=None):
+ """
+ Python 3 implementation of execfile.
+ """
+ mode = 'rb'
+ with open(filename, mode) as stream:
+ script = stream.read()
+ if locals is None:
+ locals = globals
+ code = compile(script, filename, 'exec')
+ exec(code, globals, locals)
+
+
+@contextlib.contextmanager
+def save_argv(repl=None):
+ saved = sys.argv[:]
+ if repl is not None:
+ sys.argv[:] = repl
+ try:
+ yield saved
+ finally:
+ sys.argv[:] = saved
+
+
+@contextlib.contextmanager
+def save_path():
+ saved = sys.path[:]
+ try:
+ yield saved
+ finally:
+ sys.path[:] = saved
+
+
+@contextlib.contextmanager
+def override_temp(replacement):
+ """
+ Monkey-patch tempfile.tempdir with replacement, ensuring it exists
+ """
os.makedirs(replacement, exist_ok=True)
-
- saved = tempfile.tempdir
-
- tempfile.tempdir = replacement
-
- try:
- yield
- finally:
- tempfile.tempdir = saved
-
-
-@contextlib.contextmanager
-def pushd(target):
- saved = os.getcwd()
- os.chdir(target)
- try:
- yield saved
- finally:
- os.chdir(saved)
-
-
-class UnpickleableException(Exception):
- """
- An exception representing another Exception that could not be pickled.
- """
-
- @staticmethod
- def dump(type, exc):
- """
- Always return a dumped (pickled) type and exc. If exc can't be pickled,
- wrap it in UnpickleableException first.
- """
- try:
- return pickle.dumps(type), pickle.dumps(exc)
- except Exception:
- # get UnpickleableException inside the sandbox
- from setuptools.sandbox import UnpickleableException as cls
-
- return cls.dump(cls, cls(repr(exc)))
-
-
-class ExceptionSaver:
- """
- A Context Manager that will save an exception, serialized, and restore it
- later.
- """
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, exc, tb):
- if not exc:
- return
-
- # dump the exception
- self._saved = UnpickleableException.dump(type, exc)
- self._tb = tb
-
- # suppress the exception
- return True
-
- def resume(self):
- "restore and re-raise any exception"
-
- if '_saved' not in vars(self):
- return
-
- type, exc = map(pickle.loads, self._saved)
+
+ saved = tempfile.tempdir
+
+ tempfile.tempdir = replacement
+
+ try:
+ yield
+ finally:
+ tempfile.tempdir = saved
+
+
+@contextlib.contextmanager
+def pushd(target):
+ saved = os.getcwd()
+ os.chdir(target)
+ try:
+ yield saved
+ finally:
+ os.chdir(saved)
+
+
+class UnpickleableException(Exception):
+ """
+ An exception representing another Exception that could not be pickled.
+ """
+
+ @staticmethod
+ def dump(type, exc):
+ """
+ Always return a dumped (pickled) type and exc. If exc can't be pickled,
+ wrap it in UnpickleableException first.
+ """
+ try:
+ return pickle.dumps(type), pickle.dumps(exc)
+ except Exception:
+ # get UnpickleableException inside the sandbox
+ from setuptools.sandbox import UnpickleableException as cls
+
+ return cls.dump(cls, cls(repr(exc)))
+
+
+class ExceptionSaver:
+ """
+ A Context Manager that will save an exception, serialized, and restore it
+ later.
+ """
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, exc, tb):
+ if not exc:
+ return
+
+ # dump the exception
+ self._saved = UnpickleableException.dump(type, exc)
+ self._tb = tb
+
+ # suppress the exception
+ return True
+
+ def resume(self):
+ "restore and re-raise any exception"
+
+ if '_saved' not in vars(self):
+ return
+
+ type, exc = map(pickle.loads, self._saved)
raise exc.with_traceback(self._tb)
-
-
-@contextlib.contextmanager
-def save_modules():
- """
- Context in which imported modules are saved.
-
- Translates exceptions internal to the context into the equivalent exception
- outside the context.
- """
- saved = sys.modules.copy()
- with ExceptionSaver() as saved_exc:
- yield saved
-
- sys.modules.update(saved)
- # remove any modules imported since
- del_modules = (
+
+
+@contextlib.contextmanager
+def save_modules():
+ """
+ Context in which imported modules are saved.
+
+ Translates exceptions internal to the context into the equivalent exception
+ outside the context.
+ """
+ saved = sys.modules.copy()
+ with ExceptionSaver() as saved_exc:
+ yield saved
+
+ sys.modules.update(saved)
+ # remove any modules imported since
+ del_modules = (
mod_name
for mod_name in sys.modules
- if mod_name not in saved
- # exclude any encodings modules. See #285
- and not mod_name.startswith('encodings.')
- )
- _clear_modules(del_modules)
-
- saved_exc.resume()
-
-
-def _clear_modules(module_names):
- for mod_name in list(module_names):
- del sys.modules[mod_name]
-
-
-@contextlib.contextmanager
-def save_pkg_resources_state():
- saved = pkg_resources.__getstate__()
- try:
- yield saved
- finally:
- pkg_resources.__setstate__(saved)
-
-
-@contextlib.contextmanager
-def setup_context(setup_dir):
- temp_dir = os.path.join(setup_dir, 'temp')
- with save_pkg_resources_state():
- with save_modules():
- with save_path():
+ if mod_name not in saved
+ # exclude any encodings modules. See #285
+ and not mod_name.startswith('encodings.')
+ )
+ _clear_modules(del_modules)
+
+ saved_exc.resume()
+
+
+def _clear_modules(module_names):
+ for mod_name in list(module_names):
+ del sys.modules[mod_name]
+
+
+@contextlib.contextmanager
+def save_pkg_resources_state():
+ saved = pkg_resources.__getstate__()
+ try:
+ yield saved
+ finally:
+ pkg_resources.__setstate__(saved)
+
+
+@contextlib.contextmanager
+def setup_context(setup_dir):
+ temp_dir = os.path.join(setup_dir, 'temp')
+ with save_pkg_resources_state():
+ with save_modules():
+ with save_path():
hide_setuptools()
- with save_argv():
- with override_temp(temp_dir):
- with pushd(setup_dir):
- # ensure setuptools commands are available
- __import__('setuptools')
- yield
-
-
+ with save_argv():
+ with override_temp(temp_dir):
+ with pushd(setup_dir):
+ # ensure setuptools commands are available
+ __import__('setuptools')
+ yield
+
+
_MODULES_TO_HIDE = {
'setuptools',
'distutils',
@@ -207,78 +207,78 @@ _MODULES_TO_HIDE = {
}
-def _needs_hiding(mod_name):
- """
- >>> _needs_hiding('setuptools')
- True
- >>> _needs_hiding('pkg_resources')
- True
- >>> _needs_hiding('setuptools_plugin')
- False
- >>> _needs_hiding('setuptools.__init__')
- True
- >>> _needs_hiding('distutils')
- True
- >>> _needs_hiding('os')
- False
- >>> _needs_hiding('Cython')
- True
- """
+def _needs_hiding(mod_name):
+ """
+ >>> _needs_hiding('setuptools')
+ True
+ >>> _needs_hiding('pkg_resources')
+ True
+ >>> _needs_hiding('setuptools_plugin')
+ False
+ >>> _needs_hiding('setuptools.__init__')
+ True
+ >>> _needs_hiding('distutils')
+ True
+ >>> _needs_hiding('os')
+ False
+ >>> _needs_hiding('Cython')
+ True
+ """
base_module = mod_name.split('.', 1)[0]
return base_module in _MODULES_TO_HIDE
-
-
-def hide_setuptools():
- """
- Remove references to setuptools' modules from sys.modules to allow the
- invocation to import the most appropriate setuptools. This technique is
- necessary to avoid issues such as #315 where setuptools upgrading itself
- would fail to find a function declared in the metadata.
- """
+
+
+def hide_setuptools():
+ """
+ Remove references to setuptools' modules from sys.modules to allow the
+ invocation to import the most appropriate setuptools. This technique is
+ necessary to avoid issues such as #315 where setuptools upgrading itself
+ would fail to find a function declared in the metadata.
+ """
_distutils_hack = sys.modules.get('_distutils_hack', None)
if _distutils_hack is not None:
_distutils_hack.remove_shim()
- modules = filter(_needs_hiding, sys.modules)
- _clear_modules(modules)
-
-
-def run_setup(setup_script, args):
- """Run a distutils setup script, sandboxed in its directory"""
- setup_dir = os.path.abspath(os.path.dirname(setup_script))
- with setup_context(setup_dir):
- try:
+ modules = filter(_needs_hiding, sys.modules)
+ _clear_modules(modules)
+
+
+def run_setup(setup_script, args):
+ """Run a distutils setup script, sandboxed in its directory"""
+ setup_dir = os.path.abspath(os.path.dirname(setup_script))
+ with setup_context(setup_dir):
+ try:
sys.argv[:] = [setup_script] + list(args)
- sys.path.insert(0, setup_dir)
- # reset to include setup dir, w/clean callback list
- working_set.__init__()
+ sys.path.insert(0, setup_dir)
+ # reset to include setup dir, w/clean callback list
+ working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
with DirectorySandbox(setup_dir):
ns = dict(__file__=setup_script, __name__='__main__')
- _execfile(setup_script, ns)
- except SystemExit as v:
- if v.args and v.args[0]:
- raise
- # Normal exit, just return
-
-
-class AbstractSandbox:
- """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
-
- _active = False
-
- def __init__(self):
- self._attrs = [
+ _execfile(setup_script, ns)
+ except SystemExit as v:
+ if v.args and v.args[0]:
+ raise
+ # Normal exit, just return
+
+
+class AbstractSandbox:
+ """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
+
+ _active = False
+
+ def __init__(self):
+ self._attrs = [
name
for name in dir(_os)
if not name.startswith('_') and hasattr(self, name)
- ]
-
- def _copy(self, source):
- for name in self._attrs:
+ ]
+
+ def _copy(self, source):
+ for name in self._attrs:
setattr(os, name, getattr(source, name))
-
+
def __enter__(self):
self._copy(self)
if _file:
@@ -293,39 +293,39 @@ class AbstractSandbox:
builtins.open = _open
self._copy(_os)
- def run(self, func):
- """Run 'func' under os sandboxing"""
+ def run(self, func):
+ """Run 'func' under os sandboxing"""
with self:
- return func()
-
- def _mk_dual_path_wrapper(name):
+ return func()
+
+ def _mk_dual_path_wrapper(name):
original = getattr(_os, name)
def wrap(self, src, dst, *args, **kw):
- if self._active:
+ if self._active:
src, dst = self._remap_pair(name, src, dst, *args, **kw)
return original(src, dst, *args, **kw)
- return wrap
-
- for name in ["rename", "link", "symlink"]:
+ return wrap
+
+ for name in ["rename", "link", "symlink"]:
if hasattr(_os, name):
locals()[name] = _mk_dual_path_wrapper(name)
-
- def _mk_single_path_wrapper(name, original=None):
+
+ def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os, name)
def wrap(self, path, *args, **kw):
- if self._active:
+ if self._active:
path = self._remap_input(name, path, *args, **kw)
return original(path, *args, **kw)
- return wrap
-
- if _file:
- _file = _mk_single_path_wrapper('file', _file)
- _open = _mk_single_path_wrapper('open', _open)
- for name in [
+ return wrap
+
+ if _file:
+ _file = _mk_single_path_wrapper('file', _file)
+ _open = _mk_single_path_wrapper('open', _open)
+ for name in [
"stat",
"listdir",
"chdir",
@@ -345,69 +345,69 @@ class AbstractSandbox:
"mknod",
"pathconf",
"access",
- ]:
+ ]:
if hasattr(_os, name):
locals()[name] = _mk_single_path_wrapper(name)
-
- def _mk_single_with_return(name):
+
+ def _mk_single_with_return(name):
original = getattr(_os, name)
def wrap(self, path, *args, **kw):
- if self._active:
+ if self._active:
path = self._remap_input(name, path, *args, **kw)
return self._remap_output(name, original(path, *args, **kw))
return original(path, *args, **kw)
- return wrap
-
- for name in ['readlink', 'tempnam']:
+ return wrap
+
+ for name in ['readlink', 'tempnam']:
if hasattr(_os, name):
locals()[name] = _mk_single_with_return(name)
-
- def _mk_query(name):
+
+ def _mk_query(name):
original = getattr(_os, name)
def wrap(self, *args, **kw):
retval = original(*args, **kw)
- if self._active:
- return self._remap_output(name, retval)
- return retval
+ if self._active:
+ return self._remap_output(name, retval)
+ return retval
+
+ return wrap
- return wrap
-
- for name in ['getcwd', 'tmpnam']:
+ for name in ['getcwd', 'tmpnam']:
if hasattr(_os, name):
locals()[name] = _mk_query(name)
-
+
def _validate_path(self, path):
- """Called to remap or validate any path, whether input or output"""
- return path
-
+ """Called to remap or validate any path, whether input or output"""
+ return path
+
def _remap_input(self, operation, path, *args, **kw):
- """Called for path inputs"""
- return self._validate_path(path)
-
+ """Called for path inputs"""
+ return self._validate_path(path)
+
def _remap_output(self, operation, path):
- """Called for path outputs"""
- return self._validate_path(path)
-
+ """Called for path outputs"""
+ return self._validate_path(path)
+
def _remap_pair(self, operation, src, dst, *args, **kw):
- """Called for path pairs like rename, link, and symlink operations"""
- return (
+ """Called for path pairs like rename, link, and symlink operations"""
+ return (
self._remap_input(operation + '-from', src, *args, **kw),
self._remap_input(operation + '-to', dst, *args, **kw),
- )
-
-
-if hasattr(os, 'devnull'):
+ )
+
+
+if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull]
-else:
- _EXCEPTIONS = []
-
-
-class DirectorySandbox(AbstractSandbox):
- """Restrict operations to a single subdirectory - pseudo-chroot"""
-
+else:
+ _EXCEPTIONS = []
+
+
+class DirectorySandbox(AbstractSandbox):
+ """Restrict operations to a single subdirectory - pseudo-chroot"""
+
write_ops = dict.fromkeys(
[
"open",
@@ -425,106 +425,106 @@ class DirectorySandbox(AbstractSandbox):
"tempnam",
]
)
-
+
_exception_patterns = []
- "exempt writing to paths that match the pattern"
-
- def __init__(self, sandbox, exceptions=_EXCEPTIONS):
- self._sandbox = os.path.normcase(os.path.realpath(sandbox))
+ "exempt writing to paths that match the pattern"
+
+ def __init__(self, sandbox, exceptions=_EXCEPTIONS):
+ self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox, '')
- self._exceptions = [
+ self._exceptions = [
os.path.normcase(os.path.realpath(path)) for path in exceptions
- ]
- AbstractSandbox.__init__(self)
-
- def _violation(self, operation, *args, **kw):
- from setuptools.sandbox import SandboxViolation
-
- raise SandboxViolation(operation, args, kw)
-
- if _file:
-
- def _file(self, path, mode='r', *args, **kw):
- if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
- self._violation("file", path, mode, *args, **kw)
+ ]
+ AbstractSandbox.__init__(self)
+
+ def _violation(self, operation, *args, **kw):
+ from setuptools.sandbox import SandboxViolation
+
+ raise SandboxViolation(operation, args, kw)
+
+ if _file:
+
+ def _file(self, path, mode='r', *args, **kw):
+ if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
+ self._violation("file", path, mode, *args, **kw)
return _file(path, mode, *args, **kw)
-
- def _open(self, path, mode='r', *args, **kw):
- if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
- self._violation("open", path, mode, *args, **kw)
+
+ def _open(self, path, mode='r', *args, **kw):
+ if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
+ self._violation("open", path, mode, *args, **kw)
return _open(path, mode, *args, **kw)
-
- def tmpnam(self):
- self._violation("tmpnam")
-
- def _ok(self, path):
- active = self._active
- try:
- self._active = False
- realpath = os.path.normcase(os.path.realpath(path))
- return (
- self._exempted(realpath)
- or realpath == self._sandbox
- or realpath.startswith(self._prefix)
- )
- finally:
- self._active = active
-
- def _exempted(self, filepath):
- start_matches = (
+
+ def tmpnam(self):
+ self._violation("tmpnam")
+
+ def _ok(self, path):
+ active = self._active
+ try:
+ self._active = False
+ realpath = os.path.normcase(os.path.realpath(path))
+ return (
+ self._exempted(realpath)
+ or realpath == self._sandbox
+ or realpath.startswith(self._prefix)
+ )
+ finally:
+ self._active = active
+
+ def _exempted(self, filepath):
+ start_matches = (
filepath.startswith(exception) for exception in self._exceptions
- )
- pattern_matches = (
+ )
+ pattern_matches = (
re.match(pattern, filepath) for pattern in self._exception_patterns
- )
- candidates = itertools.chain(start_matches, pattern_matches)
- return any(candidates)
-
- def _remap_input(self, operation, path, *args, **kw):
- """Called for path inputs"""
- if operation in self.write_ops and not self._ok(path):
- self._violation(operation, os.path.realpath(path), *args, **kw)
- return path
-
- def _remap_pair(self, operation, src, dst, *args, **kw):
- """Called for path pairs like rename, link, and symlink operations"""
- if not self._ok(src) or not self._ok(dst):
- self._violation(operation, src, dst, *args, **kw)
+ )
+ candidates = itertools.chain(start_matches, pattern_matches)
+ return any(candidates)
+
+ def _remap_input(self, operation, path, *args, **kw):
+ """Called for path inputs"""
+ if operation in self.write_ops and not self._ok(path):
+ self._violation(operation, os.path.realpath(path), *args, **kw)
+ return path
+
+ def _remap_pair(self, operation, src, dst, *args, **kw):
+ """Called for path pairs like rename, link, and symlink operations"""
+ if not self._ok(src) or not self._ok(dst):
+ self._violation(operation, src, dst, *args, **kw)
return (src, dst)
-
- def open(self, file, flags, mode=0o777, *args, **kw):
- """Called for low-level os.open()"""
- if flags & WRITE_FLAGS and not self._ok(file):
- self._violation("os.open", file, flags, mode, *args, **kw)
+
+ def open(self, file, flags, mode=0o777, *args, **kw):
+ """Called for low-level os.open()"""
+ if flags & WRITE_FLAGS and not self._ok(file):
+ self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file, flags, mode, *args, **kw)
-
-WRITE_FLAGS = functools.reduce(
+
+WRITE_FLAGS = functools.reduce(
operator.or_,
[
getattr(_os, a, 0)
for a in "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()
],
-)
-
+)
+
+
+class SandboxViolation(DistutilsError):
+ """A setup script attempted to modify the filesystem outside the sandbox"""
-class SandboxViolation(DistutilsError):
- """A setup script attempted to modify the filesystem outside the sandbox"""
-
tmpl = textwrap.dedent(
"""
SandboxViolation: {cmd}{args!r} {kwargs}
-
+
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
-
+
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.
"""
).lstrip()
-
+
def __str__(self):
cmd, args, kwargs = self.args
return self.tmpl.format(**locals())
diff --git a/contrib/python/setuptools/py3/setuptools/script.tmpl b/contrib/python/setuptools/py3/setuptools/script.tmpl
index df8f68d6b4..ff5efbcab3 100644
--- a/contrib/python/setuptools/py3/setuptools/script.tmpl
+++ b/contrib/python/setuptools/py3/setuptools/script.tmpl
@@ -1,3 +1,3 @@
-# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
-__requires__ = %(spec)r
-__import__('pkg_resources').run_script(%(spec)r, %(script_name)r)
+# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
+__requires__ = %(spec)r
+__import__('pkg_resources').run_script(%(spec)r, %(script_name)r)
diff --git a/contrib/python/setuptools/py3/setuptools/unicode_utils.py b/contrib/python/setuptools/py3/setuptools/unicode_utils.py
index 9ab86b9def..e84e65e3e1 100644
--- a/contrib/python/setuptools/py3/setuptools/unicode_utils.py
+++ b/contrib/python/setuptools/py3/setuptools/unicode_utils.py
@@ -1,42 +1,42 @@
-import unicodedata
-import sys
-
-
-# HFS Plus uses decomposed UTF-8
-def decompose(path):
+import unicodedata
+import sys
+
+
+# HFS Plus uses decomposed UTF-8
+def decompose(path):
if isinstance(path, str):
- return unicodedata.normalize('NFD', path)
- try:
- path = path.decode('utf-8')
- path = unicodedata.normalize('NFD', path)
- path = path.encode('utf-8')
- except UnicodeError:
- pass # Not UTF-8
- return path
-
-
-def filesys_decode(path):
- """
- Ensure that the given path is decoded,
- NONE when no expected encoding works
- """
-
+ return unicodedata.normalize('NFD', path)
+ try:
+ path = path.decode('utf-8')
+ path = unicodedata.normalize('NFD', path)
+ path = path.encode('utf-8')
+ except UnicodeError:
+ pass # Not UTF-8
+ return path
+
+
+def filesys_decode(path):
+ """
+ Ensure that the given path is decoded,
+ NONE when no expected encoding works
+ """
+
if isinstance(path, str):
- return path
-
- fs_enc = sys.getfilesystemencoding() or 'utf-8'
- candidates = fs_enc, 'utf-8'
-
- for enc in candidates:
- try:
- return path.decode(enc)
- except UnicodeDecodeError:
- continue
-
-
-def try_encode(string, enc):
- "turn unicode encoding into a functional routine"
- try:
- return string.encode(enc)
- except UnicodeEncodeError:
- return None
+ return path
+
+ fs_enc = sys.getfilesystemencoding() or 'utf-8'
+ candidates = fs_enc, 'utf-8'
+
+ for enc in candidates:
+ try:
+ return path.decode(enc)
+ except UnicodeDecodeError:
+ continue
+
+
+def try_encode(string, enc):
+ "turn unicode encoding into a functional routine"
+ try:
+ return string.encode(enc)
+ except UnicodeEncodeError:
+ return None
diff --git a/contrib/python/setuptools/py3/setuptools/windows_support.py b/contrib/python/setuptools/py3/setuptools/windows_support.py
index a4a389fede..cb977cff95 100644
--- a/contrib/python/setuptools/py3/setuptools/windows_support.py
+++ b/contrib/python/setuptools/py3/setuptools/windows_support.py
@@ -1,29 +1,29 @@
-import platform
-import ctypes
-
-
-def windows_only(func):
- if platform.system() != 'Windows':
- return lambda *args, **kwargs: None
- return func
-
-
-@windows_only
-def hide_file(path):
- """
- Set the hidden attribute on a file or directory.
-
- From http://stackoverflow.com/questions/19622133/
-
- `path` must be text.
- """
- __import__('ctypes.wintypes')
- SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
- SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
- SetFileAttributes.restype = ctypes.wintypes.BOOL
-
- FILE_ATTRIBUTE_HIDDEN = 0x02
-
- ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
- if not ret:
- raise ctypes.WinError()
+import platform
+import ctypes
+
+
+def windows_only(func):
+ if platform.system() != 'Windows':
+ return lambda *args, **kwargs: None
+ return func
+
+
+@windows_only
+def hide_file(path):
+ """
+ Set the hidden attribute on a file or directory.
+
+ From http://stackoverflow.com/questions/19622133/
+
+ `path` must be text.
+ """
+ __import__('ctypes.wintypes')
+ SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
+ SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
+ SetFileAttributes.restype = ctypes.wintypes.BOOL
+
+ FILE_ATTRIBUTE_HIDDEN = 0x02
+
+ ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
+ if not ret:
+ raise ctypes.WinError()
diff --git a/contrib/python/setuptools/py3/ya.make b/contrib/python/setuptools/py3/ya.make
index 4ef4576cde..5682b6f782 100644
--- a/contrib/python/setuptools/py3/ya.make
+++ b/contrib/python/setuptools/py3/ya.make
@@ -1,9 +1,9 @@
PY3_LIBRARY()
-
+
OWNER(borman orivej g:python-contrib)
VERSION(59.7.0)
-
+
LICENSE(MIT)
PEERDIR(
@@ -16,11 +16,11 @@ NO_CHECK_IMPORTS(
setuptools.*
)
-PY_SRCS(
- TOP_LEVEL
+PY_SRCS(
+ TOP_LEVEL
_distutils_hack/__init__.py
_distutils_hack/override.py
- pkg_resources/__init__.py
+ pkg_resources/__init__.py
pkg_resources/_vendor/__init__.py
pkg_resources/_vendor/appdirs.py
pkg_resources/_vendor/packaging/__about__.py
@@ -36,7 +36,7 @@ PY_SRCS(
pkg_resources/_vendor/packaging/version.py
pkg_resources/_vendor/pyparsing.py
pkg_resources/extern/__init__.py
- setuptools/__init__.py
+ setuptools/__init__.py
setuptools/_deprecation_warning.py
setuptools/_distutils/__init__.py
setuptools/_distutils/_msvccompiler.py
@@ -110,54 +110,54 @@ PY_SRCS(
setuptools/_vendor/packaging/utils.py
setuptools/_vendor/packaging/version.py
setuptools/_vendor/pyparsing.py
- setuptools/archive_util.py
+ setuptools/archive_util.py
setuptools/build_meta.py
- setuptools/command/__init__.py
- setuptools/command/alias.py
- setuptools/command/bdist_egg.py
- setuptools/command/bdist_rpm.py
+ setuptools/command/__init__.py
+ setuptools/command/alias.py
+ setuptools/command/bdist_egg.py
+ setuptools/command/bdist_rpm.py
setuptools/command/build_clib.py
- setuptools/command/build_ext.py
- setuptools/command/build_py.py
- setuptools/command/develop.py
+ setuptools/command/build_ext.py
+ setuptools/command/build_py.py
+ setuptools/command/develop.py
setuptools/command/dist_info.py
- setuptools/command/easy_install.py
- setuptools/command/egg_info.py
- setuptools/command/install.py
- setuptools/command/install_egg_info.py
- setuptools/command/install_lib.py
- setuptools/command/install_scripts.py
+ setuptools/command/easy_install.py
+ setuptools/command/egg_info.py
+ setuptools/command/install.py
+ setuptools/command/install_egg_info.py
+ setuptools/command/install_lib.py
+ setuptools/command/install_scripts.py
setuptools/command/py36compat.py
- setuptools/command/register.py
- setuptools/command/rotate.py
- setuptools/command/saveopts.py
- setuptools/command/sdist.py
- setuptools/command/setopt.py
- setuptools/command/test.py
+ setuptools/command/register.py
+ setuptools/command/rotate.py
+ setuptools/command/saveopts.py
+ setuptools/command/sdist.py
+ setuptools/command/setopt.py
+ setuptools/command/test.py
setuptools/command/upload.py
- setuptools/command/upload_docs.py
+ setuptools/command/upload_docs.py
setuptools/config.py
setuptools/dep_util.py
- setuptools/depends.py
- setuptools/dist.py
+ setuptools/depends.py
+ setuptools/dist.py
setuptools/errors.py
- setuptools/extension.py
+ setuptools/extension.py
setuptools/extern/__init__.py
setuptools/glob.py
setuptools/installer.py
- setuptools/launch.py
+ setuptools/launch.py
setuptools/monkey.py
setuptools/msvc.py
setuptools/namespaces.py
- setuptools/package_index.py
+ setuptools/package_index.py
setuptools/py34compat.py
- setuptools/sandbox.py
- setuptools/unicode_utils.py
- setuptools/version.py
+ setuptools/sandbox.py
+ setuptools/unicode_utils.py
+ setuptools/version.py
setuptools/wheel.py
- setuptools/windows_support.py
-)
-
+ setuptools/windows_support.py
+)
+
RESOURCE_FILES(
PREFIX contrib/python/setuptools/py3/
.dist-info/METADATA
@@ -165,4 +165,4 @@ RESOURCE_FILES(
.dist-info/top_level.txt
)
-END()
+END()
diff --git a/contrib/python/setuptools/ya.make b/contrib/python/setuptools/ya.make
index d70489e494..535a2d61fa 100644
--- a/contrib/python/setuptools/ya.make
+++ b/contrib/python/setuptools/ya.make
@@ -1,9 +1,9 @@
PY23_LIBRARY()
-
+
LICENSE(Service-Py23-Proxy)
OWNER(g:python-contrib)
-
+
IF (PYTHON2)
PEERDIR(contrib/python/setuptools/py2)
ELSE()
@@ -13,7 +13,7 @@ ENDIF()
NO_LINT()
END()
-
+
RECURSE(
py2
py3
diff --git a/contrib/python/traitlets/py2/COPYING.md b/contrib/python/traitlets/py2/COPYING.md
index e314a9d376..39ca730a63 100644
--- a/contrib/python/traitlets/py2/COPYING.md
+++ b/contrib/python/traitlets/py2/COPYING.md
@@ -1,62 +1,62 @@
-# Licensing terms
-
-Traitlets is adapted from enthought.traits, Copyright (c) Enthought, Inc.,
-under the terms of the Modified BSD License.
-
-This project is licensed under the terms of the Modified BSD License
-(also known as New or Revised or 3-Clause BSD), as follows:
-
-- Copyright (c) 2001-, IPython Development Team
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-Redistributions of source code must retain the above copyright notice, this
-list of conditions and the following disclaimer.
-
-Redistributions in binary form must reproduce the above copyright notice, this
-list of conditions and the following disclaimer in the documentation and/or
-other materials provided with the distribution.
-
-Neither the name of the IPython Development Team nor the names of its
-contributors may be used to endorse or promote products derived from this
-software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-## About the IPython Development Team
-
-The IPython Development Team is the set of all contributors to the IPython project.
-This includes all of the IPython subprojects.
-
-The core team that coordinates development on GitHub can be found here:
-https://github.com/jupyter/.
-
-## Our Copyright Policy
-
-IPython uses a shared copyright model. Each contributor maintains copyright
-over their contributions to IPython. But, it is important to note that these
-contributions are typically only changes to the repositories. Thus, the IPython
-source code, in its entirety is not the copyright of any single person or
-institution. Instead, it is the collective copyright of the entire IPython
-Development Team. If individual contributors want to maintain a record of what
-changes/contributions they have specific copyright on, they should indicate
-their copyright in the commit message of the change, when they commit the
-change to one of the IPython repositories.
-
-With this in mind, the following banner should be used in any source code file
-to indicate the copyright and license terms:
-
- # Copyright (c) IPython Development Team.
- # Distributed under the terms of the Modified BSD License.
+# Licensing terms
+
+Traitlets is adapted from enthought.traits, Copyright (c) Enthought, Inc.,
+under the terms of the Modified BSD License.
+
+This project is licensed under the terms of the Modified BSD License
+(also known as New or Revised or 3-Clause BSD), as follows:
+
+- Copyright (c) 2001-, IPython Development Team
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+Neither the name of the IPython Development Team nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+## About the IPython Development Team
+
+The IPython Development Team is the set of all contributors to the IPython project.
+This includes all of the IPython subprojects.
+
+The core team that coordinates development on GitHub can be found here:
+https://github.com/jupyter/.
+
+## Our Copyright Policy
+
+IPython uses a shared copyright model. Each contributor maintains copyright
+over their contributions to IPython. But, it is important to note that these
+contributions are typically only changes to the repositories. Thus, the IPython
+source code, in its entirety is not the copyright of any single person or
+institution. Instead, it is the collective copyright of the entire IPython
+Development Team. If individual contributors want to maintain a record of what
+changes/contributions they have specific copyright on, they should indicate
+their copyright in the commit message of the change, when they commit the
+change to one of the IPython repositories.
+
+With this in mind, the following banner should be used in any source code file
+to indicate the copyright and license terms:
+
+ # Copyright (c) IPython Development Team.
+ # Distributed under the terms of the Modified BSD License.
diff --git a/contrib/python/traitlets/py2/traitlets/__init__.py b/contrib/python/traitlets/py2/traitlets/__init__.py
index 39933e5a79..b609adb565 100644
--- a/contrib/python/traitlets/py2/traitlets/__init__.py
+++ b/contrib/python/traitlets/py2/traitlets/__init__.py
@@ -1,3 +1,3 @@
-from .traitlets import *
-from .utils.importstring import import_item
-from ._version import version_info, __version__
+from .traitlets import *
+from .utils.importstring import import_item
+from ._version import version_info, __version__
diff --git a/contrib/python/traitlets/py2/traitlets/_version.py b/contrib/python/traitlets/py2/traitlets/_version.py
index 6cc5c82a4f..ed16b3c1e1 100644
--- a/contrib/python/traitlets/py2/traitlets/_version.py
+++ b/contrib/python/traitlets/py2/traitlets/_version.py
@@ -1,2 +1,2 @@
version_info = (4, 3, 3)
-__version__ = '.'.join(map(str, version_info))
+__version__ = '.'.join(map(str, version_info))
diff --git a/contrib/python/traitlets/py2/traitlets/config/__init__.py b/contrib/python/traitlets/py2/traitlets/config/__init__.py
index 1531ee5930..0ae7d63171 100644
--- a/contrib/python/traitlets/py2/traitlets/config/__init__.py
+++ b/contrib/python/traitlets/py2/traitlets/config/__init__.py
@@ -1,8 +1,8 @@
-# encoding: utf-8
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from .application import *
-from .configurable import *
-from .loader import Config
+# encoding: utf-8
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from .application import *
+from .configurable import *
+from .loader import Config
diff --git a/contrib/python/traitlets/py2/traitlets/config/application.py b/contrib/python/traitlets/py2/traitlets/config/application.py
index c0467e6c48..d3a4c45e77 100644
--- a/contrib/python/traitlets/py2/traitlets/config/application.py
+++ b/contrib/python/traitlets/py2/traitlets/config/application.py
@@ -1,68 +1,68 @@
-# encoding: utf-8
-"""A base class for a configurable application."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
+# encoding: utf-8
+"""A base class for a configurable application."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from __future__ import print_function
+
from copy import deepcopy
-import json
-import logging
-import os
-import re
-import sys
+import json
+import logging
+import os
+import re
+import sys
from collections import defaultdict, OrderedDict
-
-from decorator import decorator
-
-from traitlets.config.configurable import Configurable, SingletonConfigurable
-from traitlets.config.loader import (
- KVArgParseConfigLoader, PyFileConfigLoader, Config, ArgumentError, ConfigFileNotFound, JSONFileConfigLoader
-)
-
-from traitlets.traitlets import (
+
+from decorator import decorator
+
+from traitlets.config.configurable import Configurable, SingletonConfigurable
+from traitlets.config.loader import (
+ KVArgParseConfigLoader, PyFileConfigLoader, Config, ArgumentError, ConfigFileNotFound, JSONFileConfigLoader
+)
+
+from traitlets.traitlets import (
Bool, Unicode, List, Enum, Dict, Instance, TraitError, observe, observe_compat, default,
-)
-from ipython_genutils.importstring import import_item
-from ipython_genutils.text import indent, wrap_paragraphs, dedent
-from ipython_genutils import py3compat
-
+)
+from ipython_genutils.importstring import import_item
+from ipython_genutils.text import indent, wrap_paragraphs, dedent
+from ipython_genutils import py3compat
+
import six
-#-----------------------------------------------------------------------------
-# Descriptions for the various sections
-#-----------------------------------------------------------------------------
-
-# merge flags&aliases into options
-option_description = """
-Arguments that take values are actually convenience aliases to full
-Configurables, whose aliases are listed on the help line. For more information
-on full configurables, see '--help-all'.
-""".strip() # trim newlines of front and back
-
-keyvalue_description = """
-Parameters are set from command-line arguments of the form:
-`--Class.trait=value`.
-This line is evaluated in Python, so simple expressions are allowed, e.g.::
-`--C.a='range(3)'` For setting C.a=[0,1,2].
-""".strip() # trim newlines of front and back
-
-# sys.argv can be missing, for example when python is embedded. See the docs
-# for details: http://docs.python.org/2/c-api/intro.html#embedding-python
-if not hasattr(sys, "argv"):
- sys.argv = [""]
-
-subcommand_description = """
-Subcommands are launched as `{app} cmd [args]`. For information on using
-subcommand 'cmd', do: `{app} cmd -h`.
-"""
-# get running program name
-
-#-----------------------------------------------------------------------------
-# Application class
-#-----------------------------------------------------------------------------
-
+#-----------------------------------------------------------------------------
+# Descriptions for the various sections
+#-----------------------------------------------------------------------------
+
+# merge flags&aliases into options
+option_description = """
+Arguments that take values are actually convenience aliases to full
+Configurables, whose aliases are listed on the help line. For more information
+on full configurables, see '--help-all'.
+""".strip() # trim newlines of front and back
+
+keyvalue_description = """
+Parameters are set from command-line arguments of the form:
+`--Class.trait=value`.
+This line is evaluated in Python, so simple expressions are allowed, e.g.::
+`--C.a='range(3)'` For setting C.a=[0,1,2].
+""".strip() # trim newlines of front and back
+
+# sys.argv can be missing, for example when python is embedded. See the docs
+# for details: http://docs.python.org/2/c-api/intro.html#embedding-python
+if not hasattr(sys, "argv"):
+ sys.argv = [""]
+
+subcommand_description = """
+Subcommands are launched as `{app} cmd [args]`. For information on using
+subcommand 'cmd', do: `{app} cmd -h`.
+"""
+# get running program name
+
+#-----------------------------------------------------------------------------
+# Application class
+#-----------------------------------------------------------------------------
+
_envvar = os.environ.get('TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR','')
@@ -74,194 +74,194 @@ else:
raise ValueError("Unsupported value for environment variable: 'TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR' is set to '%s' which is none of {'0', '1', 'false', 'true', ''}."% _envvar )
-@decorator
-def catch_config_error(method, app, *args, **kwargs):
- """Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
-
- On a TraitError (generally caused by bad config), this will print the trait's
- message, and exit the app.
-
- For use on init methods, to prevent invoking excepthook on invalid input.
- """
- try:
- return method(app, *args, **kwargs)
- except (TraitError, ArgumentError) as e:
- app.print_help()
- app.log.fatal("Bad config encountered during initialization:")
- app.log.fatal(str(e))
- app.log.debug("Config at the time: %s", app.config)
- app.exit(1)
-
-
-class ApplicationError(Exception):
- pass
-
-
-class LevelFormatter(logging.Formatter):
- """Formatter with additional `highlevel` record
-
- This field is empty if log level is less than highlevel_limit,
- otherwise it is formatted with self.highlevel_format.
-
- Useful for adding 'WARNING' to warning messages,
- without adding 'INFO' to info, etc.
- """
- highlevel_limit = logging.WARN
- highlevel_format = " %(levelname)s |"
-
- def format(self, record):
- if record.levelno >= self.highlevel_limit:
- record.highlevel = self.highlevel_format % record.__dict__
- else:
- record.highlevel = ""
- return super(LevelFormatter, self).format(record)
-
-
-class Application(SingletonConfigurable):
- """A singleton application with full configuration support."""
-
- # The name of the application, will usually match the name of the command
- # line application
- name = Unicode(u'application')
-
- # The description of the application that is printed at the beginning
- # of the help.
- description = Unicode(u'This is an application.')
- # default section descriptions
- option_description = Unicode(option_description)
- keyvalue_description = Unicode(keyvalue_description)
- subcommand_description = Unicode(subcommand_description)
-
- python_config_loader_class = PyFileConfigLoader
- json_config_loader_class = JSONFileConfigLoader
-
- # The usage and example string that goes at the end of the help string.
- examples = Unicode()
-
- # A sequence of Configurable subclasses whose config=True attributes will
- # be exposed at the command line.
- classes = []
-
- def _classes_inc_parents(self):
- """Iterate through configurable classes, including configurable parents
-
- Children should always be after parents, and each class should only be
- yielded once.
- """
- seen = set()
- for c in self.classes:
- # We want to sort parents before children, so we reverse the MRO
- for parent in reversed(c.mro()):
- if issubclass(parent, Configurable) and (parent not in seen):
- seen.add(parent)
- yield parent
-
- # The version string of this application.
- version = Unicode(u'0.0')
-
- # the argv used to initialize the application
- argv = List()
-
+@decorator
+def catch_config_error(method, app, *args, **kwargs):
+ """Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
+
+ On a TraitError (generally caused by bad config), this will print the trait's
+ message, and exit the app.
+
+ For use on init methods, to prevent invoking excepthook on invalid input.
+ """
+ try:
+ return method(app, *args, **kwargs)
+ except (TraitError, ArgumentError) as e:
+ app.print_help()
+ app.log.fatal("Bad config encountered during initialization:")
+ app.log.fatal(str(e))
+ app.log.debug("Config at the time: %s", app.config)
+ app.exit(1)
+
+
+class ApplicationError(Exception):
+ pass
+
+
+class LevelFormatter(logging.Formatter):
+ """Formatter with additional `highlevel` record
+
+ This field is empty if log level is less than highlevel_limit,
+ otherwise it is formatted with self.highlevel_format.
+
+ Useful for adding 'WARNING' to warning messages,
+ without adding 'INFO' to info, etc.
+ """
+ highlevel_limit = logging.WARN
+ highlevel_format = " %(levelname)s |"
+
+ def format(self, record):
+ if record.levelno >= self.highlevel_limit:
+ record.highlevel = self.highlevel_format % record.__dict__
+ else:
+ record.highlevel = ""
+ return super(LevelFormatter, self).format(record)
+
+
+class Application(SingletonConfigurable):
+ """A singleton application with full configuration support."""
+
+ # The name of the application, will usually match the name of the command
+ # line application
+ name = Unicode(u'application')
+
+ # The description of the application that is printed at the beginning
+ # of the help.
+ description = Unicode(u'This is an application.')
+ # default section descriptions
+ option_description = Unicode(option_description)
+ keyvalue_description = Unicode(keyvalue_description)
+ subcommand_description = Unicode(subcommand_description)
+
+ python_config_loader_class = PyFileConfigLoader
+ json_config_loader_class = JSONFileConfigLoader
+
+ # The usage and example string that goes at the end of the help string.
+ examples = Unicode()
+
+ # A sequence of Configurable subclasses whose config=True attributes will
+ # be exposed at the command line.
+ classes = []
+
+ def _classes_inc_parents(self):
+ """Iterate through configurable classes, including configurable parents
+
+ Children should always be after parents, and each class should only be
+ yielded once.
+ """
+ seen = set()
+ for c in self.classes:
+ # We want to sort parents before children, so we reverse the MRO
+ for parent in reversed(c.mro()):
+ if issubclass(parent, Configurable) and (parent not in seen):
+ seen.add(parent)
+ yield parent
+
+ # The version string of this application.
+ version = Unicode(u'0.0')
+
+ # the argv used to initialize the application
+ argv = List()
+
# Whether failing to load config files should prevent startup
raise_config_file_errors = Bool(TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR)
- # The log level for the application
- log_level = Enum((0,10,20,30,40,50,'DEBUG','INFO','WARN','ERROR','CRITICAL'),
- default_value=logging.WARN,
- help="Set the log level by value or name.").tag(config=True)
-
- @observe('log_level')
- @observe_compat
- def _log_level_changed(self, change):
- """Adjust the log level when log_level is set."""
+ # The log level for the application
+ log_level = Enum((0,10,20,30,40,50,'DEBUG','INFO','WARN','ERROR','CRITICAL'),
+ default_value=logging.WARN,
+ help="Set the log level by value or name.").tag(config=True)
+
+ @observe('log_level')
+ @observe_compat
+ def _log_level_changed(self, change):
+ """Adjust the log level when log_level is set."""
new = change.new
if isinstance(new, six.string_types):
- new = getattr(logging, new)
- self.log_level = new
- self.log.setLevel(new)
+ new = getattr(logging, new)
+ self.log_level = new
+ self.log.setLevel(new)
- _log_formatter_cls = LevelFormatter
+ _log_formatter_cls = LevelFormatter
log_datefmt = Unicode("%Y-%m-%d %H:%M:%S",
- help="The date format used by logging formatters for %(asctime)s"
- ).tag(config=True)
-
- log_format = Unicode("[%(name)s]%(highlevel)s %(message)s",
- help="The Logging format template",
- ).tag(config=True)
-
- @observe('log_datefmt', 'log_format')
- @observe_compat
- def _log_format_changed(self, change):
- """Change the log formatter when log_format is set."""
- _log_handler = self.log.handlers[0]
- _log_formatter = self._log_formatter_cls(fmt=self.log_format, datefmt=self.log_datefmt)
- _log_handler.setFormatter(_log_formatter)
-
- @default('log')
- def _log_default(self):
- """Start logging for this application.
-
- The default is to log to stderr using a StreamHandler, if no default
- handler already exists. The log level starts at logging.WARN, but this
- can be adjusted by setting the ``log_level`` attribute.
- """
- log = logging.getLogger(self.__class__.__name__)
- log.setLevel(self.log_level)
- log.propagate = False
- _log = log # copied from Logger.hasHandlers() (new in Python 3.2)
- while _log:
- if _log.handlers:
- return log
- if not _log.propagate:
- break
- else:
- _log = _log.parent
+ help="The date format used by logging formatters for %(asctime)s"
+ ).tag(config=True)
+
+ log_format = Unicode("[%(name)s]%(highlevel)s %(message)s",
+ help="The Logging format template",
+ ).tag(config=True)
+
+ @observe('log_datefmt', 'log_format')
+ @observe_compat
+ def _log_format_changed(self, change):
+ """Change the log formatter when log_format is set."""
+ _log_handler = self.log.handlers[0]
+ _log_formatter = self._log_formatter_cls(fmt=self.log_format, datefmt=self.log_datefmt)
+ _log_handler.setFormatter(_log_formatter)
+
+ @default('log')
+ def _log_default(self):
+ """Start logging for this application.
+
+ The default is to log to stderr using a StreamHandler, if no default
+ handler already exists. The log level starts at logging.WARN, but this
+ can be adjusted by setting the ``log_level`` attribute.
+ """
+ log = logging.getLogger(self.__class__.__name__)
+ log.setLevel(self.log_level)
+ log.propagate = False
+ _log = log # copied from Logger.hasHandlers() (new in Python 3.2)
+ while _log:
+ if _log.handlers:
+ return log
+ if not _log.propagate:
+ break
+ else:
+ _log = _log.parent
if sys.executable and sys.executable.endswith('pythonw.exe'):
- # this should really go to a file, but file-logging is only
- # hooked up in parallel applications
- _log_handler = logging.StreamHandler(open(os.devnull, 'w'))
- else:
- _log_handler = logging.StreamHandler()
- _log_formatter = self._log_formatter_cls(fmt=self.log_format, datefmt=self.log_datefmt)
- _log_handler.setFormatter(_log_formatter)
- log.addHandler(_log_handler)
- return log
-
- # the alias map for configurables
- aliases = Dict({'log-level' : 'Application.log_level'})
-
- # flags for loading Configurables or store_const style flags
- # flags are loaded from this dict by '--key' flags
- # this must be a dict of two-tuples, the first element being the Config/dict
- # and the second being the help string for the flag
- flags = Dict()
- @observe('flags')
- @observe_compat
- def _flags_changed(self, change):
- """ensure flags dict is valid"""
+ # this should really go to a file, but file-logging is only
+ # hooked up in parallel applications
+ _log_handler = logging.StreamHandler(open(os.devnull, 'w'))
+ else:
+ _log_handler = logging.StreamHandler()
+ _log_formatter = self._log_formatter_cls(fmt=self.log_format, datefmt=self.log_datefmt)
+ _log_handler.setFormatter(_log_formatter)
+ log.addHandler(_log_handler)
+ return log
+
+ # the alias map for configurables
+ aliases = Dict({'log-level' : 'Application.log_level'})
+
+ # flags for loading Configurables or store_const style flags
+ # flags are loaded from this dict by '--key' flags
+ # this must be a dict of two-tuples, the first element being the Config/dict
+ # and the second being the help string for the flag
+ flags = Dict()
+ @observe('flags')
+ @observe_compat
+ def _flags_changed(self, change):
+ """ensure flags dict is valid"""
new = change.new
- for key, value in new.items():
- assert len(value) == 2, "Bad flag: %r:%s" % (key, value)
- assert isinstance(value[0], (dict, Config)), "Bad flag: %r:%s" % (key, value)
+ for key, value in new.items():
+ assert len(value) == 2, "Bad flag: %r:%s" % (key, value)
+ assert isinstance(value[0], (dict, Config)), "Bad flag: %r:%s" % (key, value)
assert isinstance(value[1], six.string_types), "Bad flag: %r:%s" % (key, value)
-
-
- # subcommands for launching other applications
- # if this is not empty, this will be a parent Application
- # this must be a dict of two-tuples,
- # the first element being the application class/import string
- # and the second being the help string for the subcommand
- subcommands = Dict()
- # parse_command_line will initialize a subapp, if requested
- subapp = Instance('traitlets.config.application.Application', allow_none=True)
-
- # extra command-line arguments that don't set config values
- extra_args = List(Unicode())
-
+
+
+ # subcommands for launching other applications
+ # if this is not empty, this will be a parent Application
+ # this must be a dict of two-tuples,
+ # the first element being the application class/import string
+ # and the second being the help string for the subcommand
+ subcommands = Dict()
+ # parse_command_line will initialize a subapp, if requested
+ subapp = Instance('traitlets.config.application.Application', allow_none=True)
+
+ # extra command-line arguments that don't set config values
+ extra_args = List(Unicode())
+
cli_config = Instance(Config, (), {},
help="""The subset of our configuration that came from the command-line
-
+
We re-load this configuration after loading config files,
to ensure that it maintains highest priority.
"""
@@ -269,10 +269,10 @@ class Application(SingletonConfigurable):
_loaded_config_files = List()
- def __init__(self, **kwargs):
- SingletonConfigurable.__init__(self, **kwargs)
- # Ensure my class is in self.classes, so my attributes appear in command line
- # options and config files.
+ def __init__(self, **kwargs):
+ SingletonConfigurable.__init__(self, **kwargs)
+ # Ensure my class is in self.classes, so my attributes appear in command line
+ # options and config files.
cls = self.__class__
if cls not in self.classes:
if self.classes is cls.classes:
@@ -281,302 +281,302 @@ class Application(SingletonConfigurable):
else:
self.classes.insert(0, self.__class__)
- @observe('config')
- @observe_compat
- def _config_changed(self, change):
- super(Application, self)._config_changed(change)
- self.log.debug('Config changed:')
+ @observe('config')
+ @observe_compat
+ def _config_changed(self, change):
+ super(Application, self)._config_changed(change)
+ self.log.debug('Config changed:')
self.log.debug(repr(change.new))
-
- @catch_config_error
- def initialize(self, argv=None):
- """Do the basic steps to configure me.
-
- Override in subclasses.
- """
- self.parse_command_line(argv)
-
-
- def start(self):
- """Start the app mainloop.
-
- Override in subclasses.
- """
- if self.subapp is not None:
- return self.subapp.start()
-
- def print_alias_help(self):
- """Print the alias part of the help."""
- if not self.aliases:
- return
-
- lines = []
- classdict = {}
- for cls in self.classes:
- # include all parents (up to, but excluding Configurable) in available names
- for c in cls.mro()[:-3]:
- classdict[c.__name__] = c
-
+
+ @catch_config_error
+ def initialize(self, argv=None):
+ """Do the basic steps to configure me.
+
+ Override in subclasses.
+ """
+ self.parse_command_line(argv)
+
+
+ def start(self):
+ """Start the app mainloop.
+
+ Override in subclasses.
+ """
+ if self.subapp is not None:
+ return self.subapp.start()
+
+ def print_alias_help(self):
+ """Print the alias part of the help."""
+ if not self.aliases:
+ return
+
+ lines = []
+ classdict = {}
+ for cls in self.classes:
+ # include all parents (up to, but excluding Configurable) in available names
+ for c in cls.mro()[:-3]:
+ classdict[c.__name__] = c
+
for alias, longname in self.aliases.items():
- classname, traitname = longname.split('.',1)
- cls = classdict[classname]
-
- trait = cls.class_traits(config=True)[traitname]
- help = cls.class_get_trait_help(trait).splitlines()
- # reformat first line
- help[0] = help[0].replace(longname, alias) + ' (%s)'%longname
- if len(alias) == 1:
- help[0] = help[0].replace('--%s='%alias, '-%s '%alias)
- lines.extend(help)
- # lines.append('')
- print(os.linesep.join(lines))
-
- def print_flag_help(self):
- """Print the flag part of the help."""
- if not self.flags:
- return
-
- lines = []
+ classname, traitname = longname.split('.',1)
+ cls = classdict[classname]
+
+ trait = cls.class_traits(config=True)[traitname]
+ help = cls.class_get_trait_help(trait).splitlines()
+ # reformat first line
+ help[0] = help[0].replace(longname, alias) + ' (%s)'%longname
+ if len(alias) == 1:
+ help[0] = help[0].replace('--%s='%alias, '-%s '%alias)
+ lines.extend(help)
+ # lines.append('')
+ print(os.linesep.join(lines))
+
+ def print_flag_help(self):
+ """Print the flag part of the help."""
+ if not self.flags:
+ return
+
+ lines = []
for m, (cfg,help) in self.flags.items():
- prefix = '--' if len(m) > 1 else '-'
- lines.append(prefix+m)
- lines.append(indent(dedent(help.strip())))
- # lines.append('')
- print(os.linesep.join(lines))
-
- def print_options(self):
- if not self.flags and not self.aliases:
- return
- lines = ['Options']
- lines.append('-'*len(lines[0]))
- lines.append('')
- for p in wrap_paragraphs(self.option_description):
- lines.append(p)
- lines.append('')
- print(os.linesep.join(lines))
- self.print_flag_help()
- self.print_alias_help()
- print()
-
- def print_subcommands(self):
- """Print the subcommand part of the help."""
- if not self.subcommands:
- return
-
- lines = ["Subcommands"]
- lines.append('-'*len(lines[0]))
- lines.append('')
- for p in wrap_paragraphs(self.subcommand_description.format(
- app=self.name)):
- lines.append(p)
- lines.append('')
+ prefix = '--' if len(m) > 1 else '-'
+ lines.append(prefix+m)
+ lines.append(indent(dedent(help.strip())))
+ # lines.append('')
+ print(os.linesep.join(lines))
+
+ def print_options(self):
+ if not self.flags and not self.aliases:
+ return
+ lines = ['Options']
+ lines.append('-'*len(lines[0]))
+ lines.append('')
+ for p in wrap_paragraphs(self.option_description):
+ lines.append(p)
+ lines.append('')
+ print(os.linesep.join(lines))
+ self.print_flag_help()
+ self.print_alias_help()
+ print()
+
+ def print_subcommands(self):
+ """Print the subcommand part of the help."""
+ if not self.subcommands:
+ return
+
+ lines = ["Subcommands"]
+ lines.append('-'*len(lines[0]))
+ lines.append('')
+ for p in wrap_paragraphs(self.subcommand_description.format(
+ app=self.name)):
+ lines.append(p)
+ lines.append('')
for subc, (cls, help) in self.subcommands.items():
- lines.append(subc)
- if help:
- lines.append(indent(dedent(help.strip())))
- lines.append('')
- print(os.linesep.join(lines))
-
- def print_help(self, classes=False):
- """Print the help for each Configurable class in self.classes.
-
- If classes=False (the default), only flags and aliases are printed.
- """
- self.print_description()
- self.print_subcommands()
- self.print_options()
-
- if classes:
- help_classes = self.classes
- if help_classes:
- print("Class parameters")
- print("----------------")
- print()
- for p in wrap_paragraphs(self.keyvalue_description):
- print(p)
- print()
-
- for cls in help_classes:
- cls.class_print_help()
- print()
- else:
- print("To see all available configurables, use `--help-all`")
- print()
-
- self.print_examples()
-
- def document_config_options(self):
- """Generate rST format documentation for the config options this application
-
- Returns a multiline string.
- """
- return '\n'.join(c.class_config_rst_doc()
- for c in self._classes_inc_parents())
-
-
- def print_description(self):
- """Print the application description."""
- for p in wrap_paragraphs(self.description):
- print(p)
- print()
-
- def print_examples(self):
- """Print usage and examples.
-
- This usage string goes at the end of the command line help string
- and should contain examples of the application's usage.
- """
- if self.examples:
- print("Examples")
- print("--------")
- print()
- print(indent(dedent(self.examples.strip())))
- print()
-
- def print_version(self):
- """Print the version string."""
- print(self.version)
-
- @catch_config_error
- def initialize_subcommand(self, subc, argv=None):
- """Initialize a subcommand with argv."""
- subapp,help = self.subcommands.get(subc)
-
+ lines.append(subc)
+ if help:
+ lines.append(indent(dedent(help.strip())))
+ lines.append('')
+ print(os.linesep.join(lines))
+
+ def print_help(self, classes=False):
+ """Print the help for each Configurable class in self.classes.
+
+ If classes=False (the default), only flags and aliases are printed.
+ """
+ self.print_description()
+ self.print_subcommands()
+ self.print_options()
+
+ if classes:
+ help_classes = self.classes
+ if help_classes:
+ print("Class parameters")
+ print("----------------")
+ print()
+ for p in wrap_paragraphs(self.keyvalue_description):
+ print(p)
+ print()
+
+ for cls in help_classes:
+ cls.class_print_help()
+ print()
+ else:
+ print("To see all available configurables, use `--help-all`")
+ print()
+
+ self.print_examples()
+
+ def document_config_options(self):
+ """Generate rST format documentation for the config options this application
+
+ Returns a multiline string.
+ """
+ return '\n'.join(c.class_config_rst_doc()
+ for c in self._classes_inc_parents())
+
+
+ def print_description(self):
+ """Print the application description."""
+ for p in wrap_paragraphs(self.description):
+ print(p)
+ print()
+
+ def print_examples(self):
+ """Print usage and examples.
+
+ This usage string goes at the end of the command line help string
+ and should contain examples of the application's usage.
+ """
+ if self.examples:
+ print("Examples")
+ print("--------")
+ print()
+ print(indent(dedent(self.examples.strip())))
+ print()
+
+ def print_version(self):
+ """Print the version string."""
+ print(self.version)
+
+ @catch_config_error
+ def initialize_subcommand(self, subc, argv=None):
+ """Initialize a subcommand with argv."""
+ subapp,help = self.subcommands.get(subc)
+
if isinstance(subapp, six.string_types):
- subapp = import_item(subapp)
-
- # clear existing instances
- self.__class__.clear_instance()
- # instantiate
+ subapp = import_item(subapp)
+
+ # clear existing instances
+ self.__class__.clear_instance()
+ # instantiate
self.subapp = subapp.instance(parent=self)
- # and initialize subapp
- self.subapp.initialize(argv)
-
- def flatten_flags(self):
- """flatten flags and aliases, so cl-args override as expected.
-
- This prevents issues such as an alias pointing to InteractiveShell,
- but a config file setting the same trait in TerminalInteraciveShell
- getting inappropriate priority over the command-line arg.
-
- Only aliases with exactly one descendent in the class list
- will be promoted.
-
- """
- # build a tree of classes in our list that inherit from a particular
- # it will be a dict by parent classname of classes in our list
- # that are descendents
- mro_tree = defaultdict(list)
- for cls in self.classes:
- clsname = cls.__name__
- for parent in cls.mro()[1:-3]:
- # exclude cls itself and Configurable,HasTraits,object
- mro_tree[parent.__name__].append(clsname)
- # flatten aliases, which have the form:
- # { 'alias' : 'Class.trait' }
- aliases = {}
+ # and initialize subapp
+ self.subapp.initialize(argv)
+
+ def flatten_flags(self):
+ """flatten flags and aliases, so cl-args override as expected.
+
+ This prevents issues such as an alias pointing to InteractiveShell,
+ but a config file setting the same trait in TerminalInteraciveShell
+ getting inappropriate priority over the command-line arg.
+
+ Only aliases with exactly one descendent in the class list
+ will be promoted.
+
+ """
+ # build a tree of classes in our list that inherit from a particular
+ # it will be a dict by parent classname of classes in our list
+ # that are descendents
+ mro_tree = defaultdict(list)
+ for cls in self.classes:
+ clsname = cls.__name__
+ for parent in cls.mro()[1:-3]:
+ # exclude cls itself and Configurable,HasTraits,object
+ mro_tree[parent.__name__].append(clsname)
+ # flatten aliases, which have the form:
+ # { 'alias' : 'Class.trait' }
+ aliases = {}
for alias, cls_trait in self.aliases.items():
- cls,trait = cls_trait.split('.',1)
- children = mro_tree[cls]
- if len(children) == 1:
- # exactly one descendent, promote alias
- cls = children[0]
- aliases[alias] = '.'.join([cls,trait])
-
- # flatten flags, which are of the form:
- # { 'key' : ({'Cls' : {'trait' : value}}, 'help')}
- flags = {}
+ cls,trait = cls_trait.split('.',1)
+ children = mro_tree[cls]
+ if len(children) == 1:
+ # exactly one descendent, promote alias
+ cls = children[0]
+ aliases[alias] = '.'.join([cls,trait])
+
+ # flatten flags, which are of the form:
+ # { 'key' : ({'Cls' : {'trait' : value}}, 'help')}
+ flags = {}
for key, (flagdict, help) in self.flags.items():
- newflag = {}
+ newflag = {}
for cls, subdict in flagdict.items():
- children = mro_tree[cls]
- # exactly one descendent, promote flag section
- if len(children) == 1:
- cls = children[0]
- newflag[cls] = subdict
- flags[key] = (newflag, help)
- return flags, aliases
-
- @catch_config_error
- def parse_command_line(self, argv=None):
- """Parse the command line arguments."""
- argv = sys.argv[1:] if argv is None else argv
- self.argv = [ py3compat.cast_unicode(arg) for arg in argv ]
-
- if argv and argv[0] == 'help':
- # turn `ipython help notebook` into `ipython notebook -h`
- argv = argv[1:] + ['-h']
-
- if self.subcommands and len(argv) > 0:
- # we have subcommands, and one may have been specified
- subc, subargv = argv[0], argv[1:]
- if re.match(r'^\w(\-?\w)*$', subc) and subc in self.subcommands:
- # it's a subcommand, and *not* a flag or class parameter
- return self.initialize_subcommand(subc, subargv)
-
- # Arguments after a '--' argument are for the script IPython may be
- # about to run, not IPython iteslf. For arguments parsed here (help and
- # version), we want to only search the arguments up to the first
- # occurrence of '--', which we're calling interpreted_argv.
- try:
- interpreted_argv = argv[:argv.index('--')]
- except ValueError:
- interpreted_argv = argv
-
- if any(x in interpreted_argv for x in ('-h', '--help-all', '--help')):
- self.print_help('--help-all' in interpreted_argv)
- self.exit(0)
-
- if '--version' in interpreted_argv or '-V' in interpreted_argv:
- self.print_version()
- self.exit(0)
-
- # flatten flags&aliases, so cl-args get appropriate priority:
- flags,aliases = self.flatten_flags()
- loader = KVArgParseConfigLoader(argv=argv, aliases=aliases,
- flags=flags, log=self.log)
+ children = mro_tree[cls]
+ # exactly one descendent, promote flag section
+ if len(children) == 1:
+ cls = children[0]
+ newflag[cls] = subdict
+ flags[key] = (newflag, help)
+ return flags, aliases
+
+ @catch_config_error
+ def parse_command_line(self, argv=None):
+ """Parse the command line arguments."""
+ argv = sys.argv[1:] if argv is None else argv
+ self.argv = [ py3compat.cast_unicode(arg) for arg in argv ]
+
+ if argv and argv[0] == 'help':
+ # turn `ipython help notebook` into `ipython notebook -h`
+ argv = argv[1:] + ['-h']
+
+ if self.subcommands and len(argv) > 0:
+ # we have subcommands, and one may have been specified
+ subc, subargv = argv[0], argv[1:]
+ if re.match(r'^\w(\-?\w)*$', subc) and subc in self.subcommands:
+ # it's a subcommand, and *not* a flag or class parameter
+ return self.initialize_subcommand(subc, subargv)
+
+ # Arguments after a '--' argument are for the script IPython may be
+ # about to run, not IPython iteslf. For arguments parsed here (help and
+ # version), we want to only search the arguments up to the first
+ # occurrence of '--', which we're calling interpreted_argv.
+ try:
+ interpreted_argv = argv[:argv.index('--')]
+ except ValueError:
+ interpreted_argv = argv
+
+ if any(x in interpreted_argv for x in ('-h', '--help-all', '--help')):
+ self.print_help('--help-all' in interpreted_argv)
+ self.exit(0)
+
+ if '--version' in interpreted_argv or '-V' in interpreted_argv:
+ self.print_version()
+ self.exit(0)
+
+ # flatten flags&aliases, so cl-args get appropriate priority:
+ flags,aliases = self.flatten_flags()
+ loader = KVArgParseConfigLoader(argv=argv, aliases=aliases,
+ flags=flags, log=self.log)
self.cli_config = deepcopy(loader.load_config())
self.update_config(self.cli_config)
- # store unparsed args in extra_args
- self.extra_args = loader.extra_args
-
- @classmethod
+ # store unparsed args in extra_args
+ self.extra_args = loader.extra_args
+
+ @classmethod
def _load_config_files(cls, basefilename, path=None, log=None, raise_config_file_errors=False):
- """Load config files (py,json) by filename and path.
-
- yield each config object in turn.
- """
-
- if not isinstance(path, list):
- path = [path]
- for path in path[::-1]:
- # path list is in descending priority order, so load files backwards:
- pyloader = cls.python_config_loader_class(basefilename+'.py', path=path, log=log)
- if log:
+ """Load config files (py,json) by filename and path.
+
+ yield each config object in turn.
+ """
+
+ if not isinstance(path, list):
+ path = [path]
+ for path in path[::-1]:
+ # path list is in descending priority order, so load files backwards:
+ pyloader = cls.python_config_loader_class(basefilename+'.py', path=path, log=log)
+ if log:
log.debug("Looking for %s in %s", basefilename, path or os.getcwd())
- jsonloader = cls.json_config_loader_class(basefilename+'.json', path=path, log=log)
+ jsonloader = cls.json_config_loader_class(basefilename+'.json', path=path, log=log)
loaded = []
filenames = []
- for loader in [pyloader, jsonloader]:
+ for loader in [pyloader, jsonloader]:
config = None
- try:
- config = loader.load_config()
- except ConfigFileNotFound:
- pass
- except Exception:
- # try to get the full filename, but it will be empty in the
- # unlikely event that the error raised before filefind finished
- filename = loader.full_filename or basefilename
- # problem while running the file
+ try:
+ config = loader.load_config()
+ except ConfigFileNotFound:
+ pass
+ except Exception:
+ # try to get the full filename, but it will be empty in the
+ # unlikely event that the error raised before filefind finished
+ filename = loader.full_filename or basefilename
+ # problem while running the file
if raise_config_file_errors:
raise
- if log:
- log.error("Exception while loading config file %s",
- filename, exc_info=True)
- else:
- if log:
- log.debug("Loaded config file: %s", loader.full_filename)
- if config:
+ if log:
+ log.error("Exception while loading config file %s",
+ filename, exc_info=True)
+ else:
+ if log:
+ log.debug("Loaded config file: %s", loader.full_filename)
+ if config:
for filename, earlier_config in zip(filenames, loaded):
collisions = earlier_config.collisions(config)
if collisions and log:
@@ -587,16 +587,16 @@ class Application(SingletonConfigurable):
yield (config, loader.full_filename)
loaded.append(config)
filenames.append(loader.full_filename)
-
+
@property
def loaded_config_files(self):
"""Currently loaded configuration files"""
return self._loaded_config_files[:]
-
- @catch_config_error
- def load_config_file(self, filename, path=None):
- """Load config files by filename and path."""
- filename, ext = os.path.splitext(filename)
+
+ @catch_config_error
+ def load_config_file(self, filename, path=None):
+ """Load config files by filename and path."""
+ filename, ext = os.path.splitext(filename)
new_config = Config()
for (config, filename) in self._load_config_files(filename, path=path, log=self.log,
raise_config_file_errors=self.raise_config_file_errors,
@@ -607,8 +607,8 @@ class Application(SingletonConfigurable):
# add self.cli_config to preserve CLI config priority
new_config.merge(self.cli_config)
self.update_config(new_config)
-
-
+
+
def _classes_in_config_sample(self):
"""
Yields only classes with own traits, and their subclasses.
@@ -641,71 +641,71 @@ class Application(SingletonConfigurable):
if inc_yes:
yield cl
- def generate_config_file(self):
- """generate default config file from Configurables"""
- lines = ["# Configuration file for %s." % self.name]
- lines.append('')
+ def generate_config_file(self):
+ """generate default config file from Configurables"""
+ lines = ["# Configuration file for %s." % self.name]
+ lines.append('')
for cls in self._classes_in_config_sample():
- lines.append(cls.class_config_section())
- return '\n'.join(lines)
-
- def exit(self, exit_status=0):
- self.log.debug("Exiting application: %s" % self.name)
- sys.exit(exit_status)
-
- @classmethod
- def launch_instance(cls, argv=None, **kwargs):
- """Launch a global instance of this Application
-
- If a global instance already exists, this reinitializes and starts it
- """
- app = cls.instance(**kwargs)
- app.initialize(argv)
- app.start()
-
-#-----------------------------------------------------------------------------
-# utility functions, for convenience
-#-----------------------------------------------------------------------------
-
-def boolean_flag(name, configurable, set_help='', unset_help=''):
- """Helper for building basic --trait, --no-trait flags.
-
- Parameters
- ----------
-
- name : str
- The name of the flag.
- configurable : str
- The 'Class.trait' string of the trait to be set/unset with the flag
- set_help : unicode
- help string for --name flag
- unset_help : unicode
- help string for --no-name flag
-
- Returns
- -------
-
- cfg : dict
- A dict with two keys: 'name', and 'no-name', for setting and unsetting
- the trait, respectively.
- """
- # default helpstrings
- set_help = set_help or "set %s=True"%configurable
- unset_help = unset_help or "set %s=False"%configurable
-
- cls,trait = configurable.split('.')
-
- setter = {cls : {trait : True}}
- unsetter = {cls : {trait : False}}
- return {name : (setter, set_help), 'no-'+name : (unsetter, unset_help)}
-
-
-def get_config():
- """Get the config object for the global Application instance, if there is one
-
- otherwise return an empty config object
- """
- if Application.initialized():
- return Application.instance().config
- else:
- return Config()
+ lines.append(cls.class_config_section())
+ return '\n'.join(lines)
+
+ def exit(self, exit_status=0):
+ self.log.debug("Exiting application: %s" % self.name)
+ sys.exit(exit_status)
+
+ @classmethod
+ def launch_instance(cls, argv=None, **kwargs):
+ """Launch a global instance of this Application
+
+ If a global instance already exists, this reinitializes and starts it
+ """
+ app = cls.instance(**kwargs)
+ app.initialize(argv)
+ app.start()
+
+#-----------------------------------------------------------------------------
+# utility functions, for convenience
+#-----------------------------------------------------------------------------
+
+def boolean_flag(name, configurable, set_help='', unset_help=''):
+ """Helper for building basic --trait, --no-trait flags.
+
+ Parameters
+ ----------
+
+ name : str
+ The name of the flag.
+ configurable : str
+ The 'Class.trait' string of the trait to be set/unset with the flag
+ set_help : unicode
+ help string for --name flag
+ unset_help : unicode
+ help string for --no-name flag
+
+ Returns
+ -------
+
+ cfg : dict
+ A dict with two keys: 'name', and 'no-name', for setting and unsetting
+ the trait, respectively.
+ """
+ # default helpstrings
+ set_help = set_help or "set %s=True"%configurable
+ unset_help = unset_help or "set %s=False"%configurable
+
+ cls,trait = configurable.split('.')
+
+ setter = {cls : {trait : True}}
+ unsetter = {cls : {trait : False}}
+ return {name : (setter, set_help), 'no-'+name : (unsetter, unset_help)}
+
+
+def get_config():
+ """Get the config object for the global Application instance, if there is one
+
+ otherwise return an empty config object
+ """
+ if Application.initialized():
+ return Application.instance().config
+ else:
+ return Config()
diff --git a/contrib/python/traitlets/py2/traitlets/config/configurable.py b/contrib/python/traitlets/py2/traitlets/config/configurable.py
index acb81cb208..1174fcf017 100644
--- a/contrib/python/traitlets/py2/traitlets/config/configurable.py
+++ b/contrib/python/traitlets/py2/traitlets/config/configurable.py
@@ -1,191 +1,191 @@
-# encoding: utf-8
-"""A base class for objects that are configurable."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
+# encoding: utf-8
+"""A base class for objects that are configurable."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
from __future__ import print_function, absolute_import
-
-from copy import deepcopy
+
+from copy import deepcopy
import warnings
-
+
from .loader import Config, LazyConfigValue, _is_section_key
-from traitlets.traitlets import HasTraits, Instance, observe, observe_compat, default
-from ipython_genutils.text import indent, dedent, wrap_paragraphs
-
-
-#-----------------------------------------------------------------------------
-# Helper classes for Configurables
-#-----------------------------------------------------------------------------
-
-
-class ConfigurableError(Exception):
- pass
-
-
-class MultipleInstanceError(ConfigurableError):
- pass
-
-#-----------------------------------------------------------------------------
-# Configurable implementation
-#-----------------------------------------------------------------------------
-
-class Configurable(HasTraits):
-
- config = Instance(Config, (), {})
- parent = Instance('traitlets.config.configurable.Configurable', allow_none=True)
-
- def __init__(self, **kwargs):
- """Create a configurable given a config config.
-
- Parameters
- ----------
- config : Config
- If this is empty, default values are used. If config is a
- :class:`Config` instance, it will be used to configure the
- instance.
- parent : Configurable instance, optional
- The parent Configurable instance of this object.
-
- Notes
- -----
- Subclasses of Configurable must call the :meth:`__init__` method of
- :class:`Configurable` *before* doing anything else and using
- :func:`super`::
-
- class MyConfigurable(Configurable):
- def __init__(self, config=None):
- super(MyConfigurable, self).__init__(config=config)
- # Then any other code you need to finish initialization.
-
- This ensures that instances will be configured properly.
- """
- parent = kwargs.pop('parent', None)
- if parent is not None:
- # config is implied from parent
- if kwargs.get('config', None) is None:
- kwargs['config'] = parent.config
- self.parent = parent
-
- config = kwargs.pop('config', None)
-
- # load kwarg traits, other than config
- super(Configurable, self).__init__(**kwargs)
-
- # load config
- if config is not None:
- # We used to deepcopy, but for now we are trying to just save
- # by reference. This *could* have side effects as all components
- # will share config. In fact, I did find such a side effect in
- # _config_changed below. If a config attribute value was a mutable type
- # all instances of a component were getting the same copy, effectively
- # making that a class attribute.
- # self.config = deepcopy(config)
- self.config = config
- else:
- # allow _config_default to return something
- self._load_config(self.config)
-
- # Ensure explicit kwargs are applied after loading config.
- # This is usually redundant, but ensures config doesn't override
- # explicitly assigned values.
- for key, value in kwargs.items():
- setattr(self, key, value)
-
- #-------------------------------------------------------------------------
- # Static trait notifiations
- #-------------------------------------------------------------------------
-
- @classmethod
- def section_names(cls):
- """return section names as a list"""
- return [c.__name__ for c in reversed(cls.__mro__) if
- issubclass(c, Configurable) and issubclass(cls, c)
- ]
-
- def _find_my_config(self, cfg):
- """extract my config from a global Config object
-
- will construct a Config object of only the config values that apply to me
- based on my mro(), as well as those of my parent(s) if they exist.
-
- If I am Bar and my parent is Foo, and their parent is Tim,
- this will return merge following config sections, in this order::
-
- [Bar, Foo.bar, Tim.Foo.Bar]
-
- With the last item being the highest priority.
- """
- cfgs = [cfg]
- if self.parent:
- cfgs.append(self.parent._find_my_config(cfg))
- my_config = Config()
- for c in cfgs:
- for sname in self.section_names():
- # Don't do a blind getattr as that would cause the config to
- # dynamically create the section with name Class.__name__.
- if c._has_section(sname):
- my_config.merge(c[sname])
- return my_config
-
- def _load_config(self, cfg, section_names=None, traits=None):
- """load traits from a Config object"""
-
- if traits is None:
- traits = self.traits(config=True)
- if section_names is None:
- section_names = self.section_names()
-
- my_config = self._find_my_config(cfg)
-
- # hold trait notifications until after all config has been loaded
- with self.hold_trait_notifications():
+from traitlets.traitlets import HasTraits, Instance, observe, observe_compat, default
+from ipython_genutils.text import indent, dedent, wrap_paragraphs
+
+
+#-----------------------------------------------------------------------------
+# Helper classes for Configurables
+#-----------------------------------------------------------------------------
+
+
+class ConfigurableError(Exception):
+ pass
+
+
+class MultipleInstanceError(ConfigurableError):
+ pass
+
+#-----------------------------------------------------------------------------
+# Configurable implementation
+#-----------------------------------------------------------------------------
+
+class Configurable(HasTraits):
+
+ config = Instance(Config, (), {})
+ parent = Instance('traitlets.config.configurable.Configurable', allow_none=True)
+
+ def __init__(self, **kwargs):
+ """Create a configurable given a config config.
+
+ Parameters
+ ----------
+ config : Config
+ If this is empty, default values are used. If config is a
+ :class:`Config` instance, it will be used to configure the
+ instance.
+ parent : Configurable instance, optional
+ The parent Configurable instance of this object.
+
+ Notes
+ -----
+ Subclasses of Configurable must call the :meth:`__init__` method of
+ :class:`Configurable` *before* doing anything else and using
+ :func:`super`::
+
+ class MyConfigurable(Configurable):
+ def __init__(self, config=None):
+ super(MyConfigurable, self).__init__(config=config)
+ # Then any other code you need to finish initialization.
+
+ This ensures that instances will be configured properly.
+ """
+ parent = kwargs.pop('parent', None)
+ if parent is not None:
+ # config is implied from parent
+ if kwargs.get('config', None) is None:
+ kwargs['config'] = parent.config
+ self.parent = parent
+
+ config = kwargs.pop('config', None)
+
+ # load kwarg traits, other than config
+ super(Configurable, self).__init__(**kwargs)
+
+ # load config
+ if config is not None:
+ # We used to deepcopy, but for now we are trying to just save
+ # by reference. This *could* have side effects as all components
+ # will share config. In fact, I did find such a side effect in
+ # _config_changed below. If a config attribute value was a mutable type
+ # all instances of a component were getting the same copy, effectively
+ # making that a class attribute.
+ # self.config = deepcopy(config)
+ self.config = config
+ else:
+ # allow _config_default to return something
+ self._load_config(self.config)
+
+ # Ensure explicit kwargs are applied after loading config.
+ # This is usually redundant, but ensures config doesn't override
+ # explicitly assigned values.
+ for key, value in kwargs.items():
+ setattr(self, key, value)
+
+ #-------------------------------------------------------------------------
+ # Static trait notifiations
+ #-------------------------------------------------------------------------
+
+ @classmethod
+ def section_names(cls):
+ """return section names as a list"""
+ return [c.__name__ for c in reversed(cls.__mro__) if
+ issubclass(c, Configurable) and issubclass(cls, c)
+ ]
+
+ def _find_my_config(self, cfg):
+ """extract my config from a global Config object
+
+ will construct a Config object of only the config values that apply to me
+ based on my mro(), as well as those of my parent(s) if they exist.
+
+ If I am Bar and my parent is Foo, and their parent is Tim,
+ this will return merge following config sections, in this order::
+
+ [Bar, Foo.bar, Tim.Foo.Bar]
+
+ With the last item being the highest priority.
+ """
+ cfgs = [cfg]
+ if self.parent:
+ cfgs.append(self.parent._find_my_config(cfg))
+ my_config = Config()
+ for c in cfgs:
+ for sname in self.section_names():
+ # Don't do a blind getattr as that would cause the config to
+ # dynamically create the section with name Class.__name__.
+ if c._has_section(sname):
+ my_config.merge(c[sname])
+ return my_config
+
+ def _load_config(self, cfg, section_names=None, traits=None):
+ """load traits from a Config object"""
+
+ if traits is None:
+ traits = self.traits(config=True)
+ if section_names is None:
+ section_names = self.section_names()
+
+ my_config = self._find_my_config(cfg)
+
+ # hold trait notifications until after all config has been loaded
+ with self.hold_trait_notifications():
for name, config_value in my_config.items():
- if name in traits:
- if isinstance(config_value, LazyConfigValue):
- # ConfigValue is a wrapper for using append / update on containers
- # without having to copy the initial value
- initial = getattr(self, name)
- config_value = config_value.get_value(initial)
- # We have to do a deepcopy here if we don't deepcopy the entire
- # config object. If we don't, a mutable config_value will be
- # shared by all instances, effectively making it a class attribute.
- setattr(self, name, deepcopy(config_value))
+ if name in traits:
+ if isinstance(config_value, LazyConfigValue):
+ # ConfigValue is a wrapper for using append / update on containers
+ # without having to copy the initial value
+ initial = getattr(self, name)
+ config_value = config_value.get_value(initial)
+ # We have to do a deepcopy here if we don't deepcopy the entire
+ # config object. If we don't, a mutable config_value will be
+ # shared by all instances, effectively making it a class attribute.
+ setattr(self, name, deepcopy(config_value))
elif not _is_section_key(name) and not isinstance(config_value, Config):
- from difflib import get_close_matches
+ from difflib import get_close_matches
if isinstance(self, LoggingConfigurable):
warn = self.log.warning
else:
warn = lambda msg: warnings.warn(msg, stacklevel=9)
- matches = get_close_matches(name, traits)
+ matches = get_close_matches(name, traits)
msg = u"Config option `{option}` not recognized by `{klass}`.".format(
option=name, klass=self.__class__.__name__)
- if len(matches) == 1:
+ if len(matches) == 1:
msg += u" Did you mean `{matches}`?".format(matches=matches[0])
- elif len(matches) >= 1:
+ elif len(matches) >= 1:
msg +=" Did you mean one of: `{matches}`?".format(matches=', '.join(sorted(matches)))
warn(msg)
-
- @observe('config')
- @observe_compat
- def _config_changed(self, change):
- """Update all the class traits having ``config=True`` in metadata.
-
- For any class trait with a ``config`` metadata attribute that is
- ``True``, we update the trait with the value of the corresponding
- config entry.
- """
- # Get all traits with a config metadata entry that is True
- traits = self.traits(config=True)
-
- # We auto-load config section for this class as well as any parent
- # classes that are Configurable subclasses. This starts with Configurable
- # and works down the mro loading the config for each section.
- section_names = self.section_names()
+
+ @observe('config')
+ @observe_compat
+ def _config_changed(self, change):
+ """Update all the class traits having ``config=True`` in metadata.
+
+ For any class trait with a ``config`` metadata attribute that is
+ ``True``, we update the trait with the value of the corresponding
+ config entry.
+ """
+ # Get all traits with a config metadata entry that is True
+ traits = self.traits(config=True)
+
+ # We auto-load config section for this class as well as any parent
+ # classes that are Configurable subclasses. This starts with Configurable
+ # and works down the mro loading the config for each section.
+ section_names = self.section_names()
self._load_config(change.new, traits=traits, section_names=section_names)
-
- def update_config(self, config):
+
+ def update_config(self, config):
"""Update config and load the new values"""
# traitlets prior to 4.2 created a copy of self.config in order to trigger change events.
# Some projects (IPython < 5) relied upon one side effect of this,
@@ -197,236 +197,236 @@ class Configurable(HasTraits):
# load config
self._load_config(config)
# merge it into self.config
- self.config.merge(config)
+ self.config.merge(config)
# TODO: trigger change event if/when dict-update change events take place
# DO NOT trigger full trait-change
-
- @classmethod
- def class_get_help(cls, inst=None):
- """Get the help string for this class in ReST format.
-
- If `inst` is given, it's current trait values will be used in place of
- class defaults.
- """
- assert inst is None or isinstance(inst, cls)
- final_help = []
- final_help.append(u'%s options' % cls.__name__)
- final_help.append(len(final_help[0])*u'-')
- for k, v in sorted(cls.class_traits(config=True).items()):
- help = cls.class_get_trait_help(v, inst)
- final_help.append(help)
- return '\n'.join(final_help)
-
- @classmethod
- def class_get_trait_help(cls, trait, inst=None):
- """Get the help string for a single trait.
-
- If `inst` is given, it's current trait values will be used in place of
- the class default.
- """
- assert inst is None or isinstance(inst, cls)
- lines = []
- header = "--%s.%s=<%s>" % (cls.__name__, trait.name, trait.__class__.__name__)
- lines.append(header)
- if inst is not None:
- lines.append(indent('Current: %r' % getattr(inst, trait.name), 4))
- else:
- try:
- dvr = trait.default_value_repr()
- except Exception:
- dvr = None # ignore defaults we can't construct
- if dvr is not None:
- if len(dvr) > 64:
- dvr = dvr[:61]+'...'
- lines.append(indent('Default: %s' % dvr, 4))
- if 'Enum' in trait.__class__.__name__:
- # include Enum choices
- lines.append(indent('Choices: %r' % (trait.values,)))
-
- help = trait.help
- if help != '':
- help = '\n'.join(wrap_paragraphs(help, 76))
- lines.append(indent(help, 4))
- return '\n'.join(lines)
-
- @classmethod
- def class_print_help(cls, inst=None):
- """Get the help string for a single trait and print it."""
- print(cls.class_get_help(inst))
-
- @classmethod
- def class_config_section(cls):
- """Get the config class config section"""
- def c(s):
- """return a commented, wrapped block."""
- s = '\n\n'.join(wrap_paragraphs(s, 78))
-
+
+ @classmethod
+ def class_get_help(cls, inst=None):
+ """Get the help string for this class in ReST format.
+
+ If `inst` is given, it's current trait values will be used in place of
+ class defaults.
+ """
+ assert inst is None or isinstance(inst, cls)
+ final_help = []
+ final_help.append(u'%s options' % cls.__name__)
+ final_help.append(len(final_help[0])*u'-')
+ for k, v in sorted(cls.class_traits(config=True).items()):
+ help = cls.class_get_trait_help(v, inst)
+ final_help.append(help)
+ return '\n'.join(final_help)
+
+ @classmethod
+ def class_get_trait_help(cls, trait, inst=None):
+ """Get the help string for a single trait.
+
+ If `inst` is given, it's current trait values will be used in place of
+ the class default.
+ """
+ assert inst is None or isinstance(inst, cls)
+ lines = []
+ header = "--%s.%s=<%s>" % (cls.__name__, trait.name, trait.__class__.__name__)
+ lines.append(header)
+ if inst is not None:
+ lines.append(indent('Current: %r' % getattr(inst, trait.name), 4))
+ else:
+ try:
+ dvr = trait.default_value_repr()
+ except Exception:
+ dvr = None # ignore defaults we can't construct
+ if dvr is not None:
+ if len(dvr) > 64:
+ dvr = dvr[:61]+'...'
+ lines.append(indent('Default: %s' % dvr, 4))
+ if 'Enum' in trait.__class__.__name__:
+ # include Enum choices
+ lines.append(indent('Choices: %r' % (trait.values,)))
+
+ help = trait.help
+ if help != '':
+ help = '\n'.join(wrap_paragraphs(help, 76))
+ lines.append(indent(help, 4))
+ return '\n'.join(lines)
+
+ @classmethod
+ def class_print_help(cls, inst=None):
+ """Get the help string for a single trait and print it."""
+ print(cls.class_get_help(inst))
+
+ @classmethod
+ def class_config_section(cls):
+ """Get the config class config section"""
+ def c(s):
+ """return a commented, wrapped block."""
+ s = '\n\n'.join(wrap_paragraphs(s, 78))
+
return '## ' + s.replace('\n', '\n# ')
-
- # section header
- breaker = '#' + '-'*78
+
+ # section header
+ breaker = '#' + '-'*78
parent_classes = ','.join(p.__name__ for p in cls.__bases__)
s = "# %s(%s) configuration" % (cls.__name__, parent_classes)
- lines = [breaker, s, breaker, '']
- # get the description trait
- desc = cls.class_traits().get('description')
- if desc:
- desc = desc.default_value
+ lines = [breaker, s, breaker, '']
+ # get the description trait
+ desc = cls.class_traits().get('description')
+ if desc:
+ desc = desc.default_value
if not desc:
# no description from trait, use __doc__
- desc = getattr(cls, '__doc__', '')
- if desc:
- lines.append(c(desc))
- lines.append('')
-
- for name, trait in sorted(cls.class_own_traits(config=True).items()):
- lines.append(c(trait.help))
+ desc = getattr(cls, '__doc__', '')
+ if desc:
+ lines.append(c(desc))
+ lines.append('')
+
+ for name, trait in sorted(cls.class_own_traits(config=True).items()):
+ lines.append(c(trait.help))
lines.append('#c.%s.%s = %s' % (cls.__name__, name, trait.default_value_repr()))
- lines.append('')
- return '\n'.join(lines)
-
- @classmethod
- def class_config_rst_doc(cls):
- """Generate rST documentation for this class' config options.
-
- Excludes traits defined on parent classes.
- """
- lines = []
- classname = cls.__name__
- for k, trait in sorted(cls.class_own_traits(config=True).items()):
- ttype = trait.__class__.__name__
-
- termline = classname + '.' + trait.name
-
- # Choices or type
- if 'Enum' in ttype:
- # include Enum choices
- termline += ' : ' + '|'.join(repr(x) for x in trait.values)
- else:
- termline += ' : ' + ttype
- lines.append(termline)
-
- # Default value
- try:
- dvr = trait.default_value_repr()
- except Exception:
- dvr = None # ignore defaults we can't construct
- if dvr is not None:
- if len(dvr) > 64:
- dvr = dvr[:61]+'...'
- # Double up backslashes, so they get to the rendered docs
- dvr = dvr.replace('\\n', '\\\\n')
- lines.append(' Default: ``%s``' % dvr)
- lines.append('')
-
- help = trait.help or 'No description'
- lines.append(indent(dedent(help), 4))
-
- # Blank line
- lines.append('')
-
- return '\n'.join(lines)
-
-
-
-class LoggingConfigurable(Configurable):
- """A parent class for Configurables that log.
-
- Subclasses have a log trait, and the default behavior
- is to get the logger from the currently running Application.
- """
-
- log = Instance('logging.Logger')
- @default('log')
- def _log_default(self):
- from traitlets import log
- return log.get_logger()
-
-
-class SingletonConfigurable(LoggingConfigurable):
- """A configurable that only allows one instance.
-
- This class is for classes that should only have one instance of itself
- or *any* subclass. To create and retrieve such a class use the
- :meth:`SingletonConfigurable.instance` method.
- """
-
- _instance = None
-
- @classmethod
- def _walk_mro(cls):
- """Walk the cls.mro() for parent classes that are also singletons
-
- For use in instance()
- """
-
- for subclass in cls.mro():
- if issubclass(cls, subclass) and \
- issubclass(subclass, SingletonConfigurable) and \
- subclass != SingletonConfigurable:
- yield subclass
-
- @classmethod
- def clear_instance(cls):
- """unset _instance for this class and singleton parents.
- """
- if not cls.initialized():
- return
- for subclass in cls._walk_mro():
- if isinstance(subclass._instance, cls):
- # only clear instances that are instances
- # of the calling class
- subclass._instance = None
-
- @classmethod
- def instance(cls, *args, **kwargs):
- """Returns a global instance of this class.
-
- This method create a new instance if none have previously been created
- and returns a previously created instance is one already exists.
-
- The arguments and keyword arguments passed to this method are passed
- on to the :meth:`__init__` method of the class upon instantiation.
-
- Examples
- --------
-
- Create a singleton class using instance, and retrieve it::
-
- >>> from traitlets.config.configurable import SingletonConfigurable
- >>> class Foo(SingletonConfigurable): pass
- >>> foo = Foo.instance()
- >>> foo == Foo.instance()
- True
-
- Create a subclass that is retrived using the base class instance::
-
- >>> class Bar(SingletonConfigurable): pass
- >>> class Bam(Bar): pass
- >>> bam = Bam.instance()
- >>> bam == Bar.instance()
- True
- """
- # Create and save the instance
- if cls._instance is None:
- inst = cls(*args, **kwargs)
- # Now make sure that the instance will also be returned by
- # parent classes' _instance attribute.
- for subclass in cls._walk_mro():
- subclass._instance = inst
-
- if isinstance(cls._instance, cls):
- return cls._instance
- else:
- raise MultipleInstanceError(
- 'Multiple incompatible subclass instances of '
- '%s are being created.' % cls.__name__
- )
-
- @classmethod
- def initialized(cls):
- """Has an instance been created?"""
- return hasattr(cls, "_instance") and cls._instance is not None
-
-
-
+ lines.append('')
+ return '\n'.join(lines)
+
+ @classmethod
+ def class_config_rst_doc(cls):
+ """Generate rST documentation for this class' config options.
+
+ Excludes traits defined on parent classes.
+ """
+ lines = []
+ classname = cls.__name__
+ for k, trait in sorted(cls.class_own_traits(config=True).items()):
+ ttype = trait.__class__.__name__
+
+ termline = classname + '.' + trait.name
+
+ # Choices or type
+ if 'Enum' in ttype:
+ # include Enum choices
+ termline += ' : ' + '|'.join(repr(x) for x in trait.values)
+ else:
+ termline += ' : ' + ttype
+ lines.append(termline)
+
+ # Default value
+ try:
+ dvr = trait.default_value_repr()
+ except Exception:
+ dvr = None # ignore defaults we can't construct
+ if dvr is not None:
+ if len(dvr) > 64:
+ dvr = dvr[:61]+'...'
+ # Double up backslashes, so they get to the rendered docs
+ dvr = dvr.replace('\\n', '\\\\n')
+ lines.append(' Default: ``%s``' % dvr)
+ lines.append('')
+
+ help = trait.help or 'No description'
+ lines.append(indent(dedent(help), 4))
+
+ # Blank line
+ lines.append('')
+
+ return '\n'.join(lines)
+
+
+
+class LoggingConfigurable(Configurable):
+ """A parent class for Configurables that log.
+
+ Subclasses have a log trait, and the default behavior
+ is to get the logger from the currently running Application.
+ """
+
+ log = Instance('logging.Logger')
+ @default('log')
+ def _log_default(self):
+ from traitlets import log
+ return log.get_logger()
+
+
+class SingletonConfigurable(LoggingConfigurable):
+ """A configurable that only allows one instance.
+
+ This class is for classes that should only have one instance of itself
+ or *any* subclass. To create and retrieve such a class use the
+ :meth:`SingletonConfigurable.instance` method.
+ """
+
+ _instance = None
+
+ @classmethod
+ def _walk_mro(cls):
+ """Walk the cls.mro() for parent classes that are also singletons
+
+ For use in instance()
+ """
+
+ for subclass in cls.mro():
+ if issubclass(cls, subclass) and \
+ issubclass(subclass, SingletonConfigurable) and \
+ subclass != SingletonConfigurable:
+ yield subclass
+
+ @classmethod
+ def clear_instance(cls):
+ """unset _instance for this class and singleton parents.
+ """
+ if not cls.initialized():
+ return
+ for subclass in cls._walk_mro():
+ if isinstance(subclass._instance, cls):
+ # only clear instances that are instances
+ # of the calling class
+ subclass._instance = None
+
+ @classmethod
+ def instance(cls, *args, **kwargs):
+ """Returns a global instance of this class.
+
+ This method create a new instance if none have previously been created
+ and returns a previously created instance is one already exists.
+
+ The arguments and keyword arguments passed to this method are passed
+ on to the :meth:`__init__` method of the class upon instantiation.
+
+ Examples
+ --------
+
+ Create a singleton class using instance, and retrieve it::
+
+ >>> from traitlets.config.configurable import SingletonConfigurable
+ >>> class Foo(SingletonConfigurable): pass
+ >>> foo = Foo.instance()
+ >>> foo == Foo.instance()
+ True
+
+ Create a subclass that is retrived using the base class instance::
+
+ >>> class Bar(SingletonConfigurable): pass
+ >>> class Bam(Bar): pass
+ >>> bam = Bam.instance()
+ >>> bam == Bar.instance()
+ True
+ """
+ # Create and save the instance
+ if cls._instance is None:
+ inst = cls(*args, **kwargs)
+ # Now make sure that the instance will also be returned by
+ # parent classes' _instance attribute.
+ for subclass in cls._walk_mro():
+ subclass._instance = inst
+
+ if isinstance(cls._instance, cls):
+ return cls._instance
+ else:
+ raise MultipleInstanceError(
+ 'Multiple incompatible subclass instances of '
+ '%s are being created.' % cls.__name__
+ )
+
+ @classmethod
+ def initialized(cls):
+ """Has an instance been created?"""
+ return hasattr(cls, "_instance") and cls._instance is not None
+
+
+
diff --git a/contrib/python/traitlets/py2/traitlets/config/loader.py b/contrib/python/traitlets/py2/traitlets/config/loader.py
index 883ef695ac..803b36276f 100644
--- a/contrib/python/traitlets/py2/traitlets/config/loader.py
+++ b/contrib/python/traitlets/py2/traitlets/config/loader.py
@@ -1,392 +1,392 @@
-# encoding: utf-8
-"""A simple configuration system."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import argparse
-import copy
-import logging
-import os
-import re
-import sys
-import json
-from ast import literal_eval
-
-from ipython_genutils.path import filefind
-from ipython_genutils import py3compat
-from ipython_genutils.encoding import DEFAULT_ENCODING
+# encoding: utf-8
+"""A simple configuration system."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import argparse
+import copy
+import logging
+import os
+import re
+import sys
+import json
+from ast import literal_eval
+
+from ipython_genutils.path import filefind
+from ipython_genutils import py3compat
+from ipython_genutils.encoding import DEFAULT_ENCODING
from six import text_type
-from traitlets.traitlets import HasTraits, List, Any
-
-#-----------------------------------------------------------------------------
-# Exceptions
-#-----------------------------------------------------------------------------
-
-
-class ConfigError(Exception):
- pass
-
-class ConfigLoaderError(ConfigError):
- pass
-
-class ConfigFileNotFound(ConfigError):
- pass
-
-class ArgumentError(ConfigLoaderError):
- pass
-
-#-----------------------------------------------------------------------------
-# Argparse fix
-#-----------------------------------------------------------------------------
-
-# Unfortunately argparse by default prints help messages to stderr instead of
-# stdout. This makes it annoying to capture long help screens at the command
-# line, since one must know how to pipe stderr, which many users don't know how
-# to do. So we override the print_help method with one that defaults to
-# stdout and use our class instead.
-
-class ArgumentParser(argparse.ArgumentParser):
- """Simple argparse subclass that prints help to stdout by default."""
-
- def print_help(self, file=None):
- if file is None:
- file = sys.stdout
- return super(ArgumentParser, self).print_help(file)
-
- print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
-
-#-----------------------------------------------------------------------------
-# Config class for holding config information
-#-----------------------------------------------------------------------------
-
-class LazyConfigValue(HasTraits):
- """Proxy object for exposing methods on configurable containers
-
- Exposes:
-
- - append, extend, insert on lists
- - update on dicts
- - update, add on sets
- """
-
- _value = None
-
- # list methods
- _extend = List()
- _prepend = List()
-
- def append(self, obj):
- self._extend.append(obj)
-
- def extend(self, other):
- self._extend.extend(other)
-
- def prepend(self, other):
- """like list.extend, but for the front"""
- self._prepend[:0] = other
-
- _inserts = List()
- def insert(self, index, other):
- if not isinstance(index, int):
- raise TypeError("An integer is required")
- self._inserts.append((index, other))
-
- # dict methods
- # update is used for both dict and set
- _update = Any()
- def update(self, other):
- if self._update is None:
- if isinstance(other, dict):
- self._update = {}
- else:
- self._update = set()
- self._update.update(other)
-
- # set methods
- def add(self, obj):
- self.update({obj})
-
- def get_value(self, initial):
- """construct the value from the initial one
-
- after applying any insert / extend / update changes
- """
- if self._value is not None:
- return self._value
- value = copy.deepcopy(initial)
- if isinstance(value, list):
- for idx, obj in self._inserts:
- value.insert(idx, obj)
- value[:0] = self._prepend
- value.extend(self._extend)
-
- elif isinstance(value, dict):
- if self._update:
- value.update(self._update)
- elif isinstance(value, set):
- if self._update:
- value.update(self._update)
- self._value = value
- return value
-
- def to_dict(self):
- """return JSONable dict form of my data
-
- Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
- """
- d = {}
- if self._update:
- d['update'] = self._update
- if self._extend:
- d['extend'] = self._extend
- if self._prepend:
- d['prepend'] = self._prepend
- elif self._inserts:
- d['inserts'] = self._inserts
- return d
-
-
-def _is_section_key(key):
- """Is a Config key a section name (does it start with a capital)?"""
- if key and key[0].upper()==key[0] and not key.startswith('_'):
- return True
- else:
- return False
-
-
-class Config(dict):
- """An attribute based dict that can do smart merges."""
-
- def __init__(self, *args, **kwds):
- dict.__init__(self, *args, **kwds)
- self._ensure_subconfig()
-
- def _ensure_subconfig(self):
- """ensure that sub-dicts that should be Config objects are
-
- casts dicts that are under section keys to Config objects,
- which is necessary for constructing Config objects from dict literals.
- """
- for key in self:
- obj = self[key]
- if _is_section_key(key) \
- and isinstance(obj, dict) \
- and not isinstance(obj, Config):
- setattr(self, key, Config(obj))
-
- def _merge(self, other):
- """deprecated alias, use Config.merge()"""
- self.merge(other)
-
- def merge(self, other):
- """merge another config object into this one"""
- to_update = {}
+from traitlets.traitlets import HasTraits, List, Any
+
+#-----------------------------------------------------------------------------
+# Exceptions
+#-----------------------------------------------------------------------------
+
+
+class ConfigError(Exception):
+ pass
+
+class ConfigLoaderError(ConfigError):
+ pass
+
+class ConfigFileNotFound(ConfigError):
+ pass
+
+class ArgumentError(ConfigLoaderError):
+ pass
+
+#-----------------------------------------------------------------------------
+# Argparse fix
+#-----------------------------------------------------------------------------
+
+# Unfortunately argparse by default prints help messages to stderr instead of
+# stdout. This makes it annoying to capture long help screens at the command
+# line, since one must know how to pipe stderr, which many users don't know how
+# to do. So we override the print_help method with one that defaults to
+# stdout and use our class instead.
+
+class ArgumentParser(argparse.ArgumentParser):
+ """Simple argparse subclass that prints help to stdout by default."""
+
+ def print_help(self, file=None):
+ if file is None:
+ file = sys.stdout
+ return super(ArgumentParser, self).print_help(file)
+
+ print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
+
+#-----------------------------------------------------------------------------
+# Config class for holding config information
+#-----------------------------------------------------------------------------
+
+class LazyConfigValue(HasTraits):
+ """Proxy object for exposing methods on configurable containers
+
+ Exposes:
+
+ - append, extend, insert on lists
+ - update on dicts
+ - update, add on sets
+ """
+
+ _value = None
+
+ # list methods
+ _extend = List()
+ _prepend = List()
+
+ def append(self, obj):
+ self._extend.append(obj)
+
+ def extend(self, other):
+ self._extend.extend(other)
+
+ def prepend(self, other):
+ """like list.extend, but for the front"""
+ self._prepend[:0] = other
+
+ _inserts = List()
+ def insert(self, index, other):
+ if not isinstance(index, int):
+ raise TypeError("An integer is required")
+ self._inserts.append((index, other))
+
+ # dict methods
+ # update is used for both dict and set
+ _update = Any()
+ def update(self, other):
+ if self._update is None:
+ if isinstance(other, dict):
+ self._update = {}
+ else:
+ self._update = set()
+ self._update.update(other)
+
+ # set methods
+ def add(self, obj):
+ self.update({obj})
+
+ def get_value(self, initial):
+ """construct the value from the initial one
+
+ after applying any insert / extend / update changes
+ """
+ if self._value is not None:
+ return self._value
+ value = copy.deepcopy(initial)
+ if isinstance(value, list):
+ for idx, obj in self._inserts:
+ value.insert(idx, obj)
+ value[:0] = self._prepend
+ value.extend(self._extend)
+
+ elif isinstance(value, dict):
+ if self._update:
+ value.update(self._update)
+ elif isinstance(value, set):
+ if self._update:
+ value.update(self._update)
+ self._value = value
+ return value
+
+ def to_dict(self):
+ """return JSONable dict form of my data
+
+ Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
+ """
+ d = {}
+ if self._update:
+ d['update'] = self._update
+ if self._extend:
+ d['extend'] = self._extend
+ if self._prepend:
+ d['prepend'] = self._prepend
+ elif self._inserts:
+ d['inserts'] = self._inserts
+ return d
+
+
+def _is_section_key(key):
+ """Is a Config key a section name (does it start with a capital)?"""
+ if key and key[0].upper()==key[0] and not key.startswith('_'):
+ return True
+ else:
+ return False
+
+
+class Config(dict):
+ """An attribute based dict that can do smart merges."""
+
+ def __init__(self, *args, **kwds):
+ dict.__init__(self, *args, **kwds)
+ self._ensure_subconfig()
+
+ def _ensure_subconfig(self):
+ """ensure that sub-dicts that should be Config objects are
+
+ casts dicts that are under section keys to Config objects,
+ which is necessary for constructing Config objects from dict literals.
+ """
+ for key in self:
+ obj = self[key]
+ if _is_section_key(key) \
+ and isinstance(obj, dict) \
+ and not isinstance(obj, Config):
+ setattr(self, key, Config(obj))
+
+ def _merge(self, other):
+ """deprecated alias, use Config.merge()"""
+ self.merge(other)
+
+ def merge(self, other):
+ """merge another config object into this one"""
+ to_update = {}
for k, v in other.items():
- if k not in self:
- to_update[k] = v
- else: # I have this key
- if isinstance(v, Config) and isinstance(self[k], Config):
- # Recursively merge common sub Configs
- self[k].merge(v)
- else:
- # Plain updates for non-Configs
- to_update[k] = v
-
- self.update(to_update)
-
- def collisions(self, other):
- """Check for collisions between two config objects.
-
- Returns a dict of the form {"Class": {"trait": "collision message"}}`,
- indicating which values have been ignored.
-
- An empty dict indicates no collisions.
- """
- collisions = {}
- for section in self:
- if section not in other:
- continue
- mine = self[section]
- theirs = other[section]
- for key in mine:
- if key in theirs and mine[key] != theirs[key]:
- collisions.setdefault(section, {})
- collisions[section][key] = "%r ignored, using %r" % (mine[key], theirs[key])
- return collisions
-
- def __contains__(self, key):
- # allow nested contains of the form `"Section.key" in config`
- if '.' in key:
- first, remainder = key.split('.', 1)
- if first not in self:
- return False
- return remainder in self[first]
-
- return super(Config, self).__contains__(key)
-
- # .has_key is deprecated for dictionaries.
- has_key = __contains__
-
- def _has_section(self, key):
- return _is_section_key(key) and key in self
-
- def copy(self):
- return type(self)(dict.copy(self))
-
- def __copy__(self):
- return self.copy()
-
- def __deepcopy__(self, memo):
- new_config = type(self)()
- for key, value in self.items():
- if isinstance(value, (Config, LazyConfigValue)):
- # deep copy config objects
- value = copy.deepcopy(value, memo)
- elif type(value) in {dict, list, set, tuple}:
- # shallow copy plain container traits
- value = copy.copy(value)
- new_config[key] = value
- return new_config
-
- def __getitem__(self, key):
- try:
- return dict.__getitem__(self, key)
- except KeyError:
- if _is_section_key(key):
- c = Config()
- dict.__setitem__(self, key, c)
- return c
- elif not key.startswith('_'):
- # undefined, create lazy value, used for container methods
- v = LazyConfigValue()
- dict.__setitem__(self, key, v)
- return v
- else:
- raise KeyError
-
- def __setitem__(self, key, value):
- if _is_section_key(key):
- if not isinstance(value, Config):
- raise ValueError('values whose keys begin with an uppercase '
- 'char must be Config instances: %r, %r' % (key, value))
- dict.__setitem__(self, key, value)
-
- def __getattr__(self, key):
- if key.startswith('__'):
- return dict.__getattr__(self, key)
- try:
- return self.__getitem__(key)
- except KeyError as e:
- raise AttributeError(e)
-
- def __setattr__(self, key, value):
- if key.startswith('__'):
- return dict.__setattr__(self, key, value)
- try:
- self.__setitem__(key, value)
- except KeyError as e:
- raise AttributeError(e)
-
- def __delattr__(self, key):
- if key.startswith('__'):
- return dict.__delattr__(self, key)
- try:
- dict.__delitem__(self, key)
- except KeyError as e:
- raise AttributeError(e)
-
-
-#-----------------------------------------------------------------------------
-# Config loading classes
-#-----------------------------------------------------------------------------
-
-
-class ConfigLoader(object):
- """A object for loading configurations from just about anywhere.
-
- The resulting configuration is packaged as a :class:`Config`.
-
- Notes
- -----
- A :class:`ConfigLoader` does one thing: load a config from a source
- (file, command line arguments) and returns the data as a :class:`Config` object.
- There are lots of things that :class:`ConfigLoader` does not do. It does
- not implement complex logic for finding config files. It does not handle
- default values or merge multiple configs. These things need to be
- handled elsewhere.
- """
-
- def _log_default(self):
- from traitlets.log import get_logger
- return get_logger()
-
- def __init__(self, log=None):
- """A base class for config loaders.
-
- log : instance of :class:`logging.Logger` to use.
- By default loger of :meth:`traitlets.config.application.Application.instance()`
- will be used
-
- Examples
- --------
-
- >>> cl = ConfigLoader()
- >>> config = cl.load_config()
- >>> config
- {}
- """
- self.clear()
- if log is None:
- self.log = self._log_default()
- self.log.debug('Using default logger')
- else:
- self.log = log
-
- def clear(self):
- self.config = Config()
-
- def load_config(self):
- """Load a config from somewhere, return a :class:`Config` instance.
-
- Usually, this will cause self.config to be set and then returned.
- However, in most cases, :meth:`ConfigLoader.clear` should be called
- to erase any previous state.
- """
- self.clear()
- return self.config
-
-
-class FileConfigLoader(ConfigLoader):
- """A base class for file based configurations.
-
- As we add more file based config loaders, the common logic should go
- here.
- """
-
- def __init__(self, filename, path=None, **kw):
- """Build a config loader for a filename and path.
-
- Parameters
- ----------
- filename : str
- The file name of the config file.
- path : str, list, tuple
- The path to search for the config file on, or a sequence of
- paths to try in order.
- """
- super(FileConfigLoader, self).__init__(**kw)
- self.filename = filename
- self.path = path
- self.full_filename = ''
-
- def _find_file(self):
- """Try to find the file by searching the paths."""
- self.full_filename = filefind(self.filename, self.path)
-
-class JSONFileConfigLoader(FileConfigLoader):
+ if k not in self:
+ to_update[k] = v
+ else: # I have this key
+ if isinstance(v, Config) and isinstance(self[k], Config):
+ # Recursively merge common sub Configs
+ self[k].merge(v)
+ else:
+ # Plain updates for non-Configs
+ to_update[k] = v
+
+ self.update(to_update)
+
+ def collisions(self, other):
+ """Check for collisions between two config objects.
+
+ Returns a dict of the form {"Class": {"trait": "collision message"}}`,
+ indicating which values have been ignored.
+
+ An empty dict indicates no collisions.
+ """
+ collisions = {}
+ for section in self:
+ if section not in other:
+ continue
+ mine = self[section]
+ theirs = other[section]
+ for key in mine:
+ if key in theirs and mine[key] != theirs[key]:
+ collisions.setdefault(section, {})
+ collisions[section][key] = "%r ignored, using %r" % (mine[key], theirs[key])
+ return collisions
+
+ def __contains__(self, key):
+ # allow nested contains of the form `"Section.key" in config`
+ if '.' in key:
+ first, remainder = key.split('.', 1)
+ if first not in self:
+ return False
+ return remainder in self[first]
+
+ return super(Config, self).__contains__(key)
+
+ # .has_key is deprecated for dictionaries.
+ has_key = __contains__
+
+ def _has_section(self, key):
+ return _is_section_key(key) and key in self
+
+ def copy(self):
+ return type(self)(dict.copy(self))
+
+ def __copy__(self):
+ return self.copy()
+
+ def __deepcopy__(self, memo):
+ new_config = type(self)()
+ for key, value in self.items():
+ if isinstance(value, (Config, LazyConfigValue)):
+ # deep copy config objects
+ value = copy.deepcopy(value, memo)
+ elif type(value) in {dict, list, set, tuple}:
+ # shallow copy plain container traits
+ value = copy.copy(value)
+ new_config[key] = value
+ return new_config
+
+ def __getitem__(self, key):
+ try:
+ return dict.__getitem__(self, key)
+ except KeyError:
+ if _is_section_key(key):
+ c = Config()
+ dict.__setitem__(self, key, c)
+ return c
+ elif not key.startswith('_'):
+ # undefined, create lazy value, used for container methods
+ v = LazyConfigValue()
+ dict.__setitem__(self, key, v)
+ return v
+ else:
+ raise KeyError
+
+ def __setitem__(self, key, value):
+ if _is_section_key(key):
+ if not isinstance(value, Config):
+ raise ValueError('values whose keys begin with an uppercase '
+ 'char must be Config instances: %r, %r' % (key, value))
+ dict.__setitem__(self, key, value)
+
+ def __getattr__(self, key):
+ if key.startswith('__'):
+ return dict.__getattr__(self, key)
+ try:
+ return self.__getitem__(key)
+ except KeyError as e:
+ raise AttributeError(e)
+
+ def __setattr__(self, key, value):
+ if key.startswith('__'):
+ return dict.__setattr__(self, key, value)
+ try:
+ self.__setitem__(key, value)
+ except KeyError as e:
+ raise AttributeError(e)
+
+ def __delattr__(self, key):
+ if key.startswith('__'):
+ return dict.__delattr__(self, key)
+ try:
+ dict.__delitem__(self, key)
+ except KeyError as e:
+ raise AttributeError(e)
+
+
+#-----------------------------------------------------------------------------
+# Config loading classes
+#-----------------------------------------------------------------------------
+
+
+class ConfigLoader(object):
+ """A object for loading configurations from just about anywhere.
+
+ The resulting configuration is packaged as a :class:`Config`.
+
+ Notes
+ -----
+ A :class:`ConfigLoader` does one thing: load a config from a source
+ (file, command line arguments) and returns the data as a :class:`Config` object.
+ There are lots of things that :class:`ConfigLoader` does not do. It does
+ not implement complex logic for finding config files. It does not handle
+ default values or merge multiple configs. These things need to be
+ handled elsewhere.
+ """
+
+ def _log_default(self):
+ from traitlets.log import get_logger
+ return get_logger()
+
+ def __init__(self, log=None):
+ """A base class for config loaders.
+
+ log : instance of :class:`logging.Logger` to use.
+ By default loger of :meth:`traitlets.config.application.Application.instance()`
+ will be used
+
+ Examples
+ --------
+
+ >>> cl = ConfigLoader()
+ >>> config = cl.load_config()
+ >>> config
+ {}
+ """
+ self.clear()
+ if log is None:
+ self.log = self._log_default()
+ self.log.debug('Using default logger')
+ else:
+ self.log = log
+
+ def clear(self):
+ self.config = Config()
+
+ def load_config(self):
+ """Load a config from somewhere, return a :class:`Config` instance.
+
+ Usually, this will cause self.config to be set and then returned.
+ However, in most cases, :meth:`ConfigLoader.clear` should be called
+ to erase any previous state.
+ """
+ self.clear()
+ return self.config
+
+
+class FileConfigLoader(ConfigLoader):
+ """A base class for file based configurations.
+
+ As we add more file based config loaders, the common logic should go
+ here.
+ """
+
+ def __init__(self, filename, path=None, **kw):
+ """Build a config loader for a filename and path.
+
+ Parameters
+ ----------
+ filename : str
+ The file name of the config file.
+ path : str, list, tuple
+ The path to search for the config file on, or a sequence of
+ paths to try in order.
+ """
+ super(FileConfigLoader, self).__init__(**kw)
+ self.filename = filename
+ self.path = path
+ self.full_filename = ''
+
+ def _find_file(self):
+ """Try to find the file by searching the paths."""
+ self.full_filename = filefind(self.filename, self.path)
+
+class JSONFileConfigLoader(FileConfigLoader):
"""A JSON file loader for config
-
+
Can also act as a context manager that rewrite the configuration file to disk on exit.
Example::
@@ -396,36 +396,36 @@ class JSONFileConfigLoader(FileConfigLoader):
"""
- def load_config(self):
- """Load the config from a file and return it as a Config object."""
- self.clear()
- try:
- self._find_file()
- except IOError as e:
- raise ConfigFileNotFound(str(e))
- dct = self._read_file_as_dict()
- self.config = self._convert_to_config(dct)
- return self.config
-
- def _read_file_as_dict(self):
- with open(self.full_filename) as f:
- return json.load(f)
-
- def _convert_to_config(self, dictionary):
- if 'version' in dictionary:
- version = dictionary.pop('version')
- else:
- version = 1
-
- if version == 1:
- return Config(dictionary)
- else:
- raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
-
+ def load_config(self):
+ """Load the config from a file and return it as a Config object."""
+ self.clear()
+ try:
+ self._find_file()
+ except IOError as e:
+ raise ConfigFileNotFound(str(e))
+ dct = self._read_file_as_dict()
+ self.config = self._convert_to_config(dct)
+ return self.config
+
+ def _read_file_as_dict(self):
+ with open(self.full_filename) as f:
+ return json.load(f)
+
+ def _convert_to_config(self, dictionary):
+ if 'version' in dictionary:
+ version = dictionary.pop('version')
+ else:
+ version = 1
+
+ if version == 1:
+ return Config(dictionary)
+ else:
+ raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
+
def __enter__(self):
self.load_config()
return self.config
-
+
def __exit__(self, exc_type, exc_value, traceback):
"""
Exit the context manager but do not handle any errors.
@@ -440,418 +440,418 @@ class JSONFileConfigLoader(FileConfigLoader):
-class PyFileConfigLoader(FileConfigLoader):
- """A config loader for pure python files.
-
- This is responsible for locating a Python config file by filename and
- path, then executing it to construct a Config object.
- """
-
- def load_config(self):
- """Load the config from a file and return it as a Config object."""
- self.clear()
- try:
- self._find_file()
- except IOError as e:
- raise ConfigFileNotFound(str(e))
- self._read_file_as_dict()
- return self.config
-
- def load_subconfig(self, fname, path=None):
- """Injected into config file namespace as load_subconfig"""
- if path is None:
- path = self.path
-
- loader = self.__class__(fname, path)
- try:
- sub_config = loader.load_config()
- except ConfigFileNotFound:
- # Pass silently if the sub config is not there,
- # treat it as an empty config file.
- pass
- else:
- self.config.merge(sub_config)
-
- def _read_file_as_dict(self):
- """Load the config file into self.config, with recursive loading."""
- def get_config():
- """Unnecessary now, but a deprecation warning is more trouble than it's worth."""
- return self.config
-
- namespace = dict(
- c=self.config,
- load_subconfig=self.load_subconfig,
- get_config=get_config,
- __file__=self.full_filename,
- )
- fs_encoding = sys.getfilesystemencoding() or 'ascii'
- conf_filename = self.full_filename.encode(fs_encoding)
- py3compat.execfile(conf_filename, namespace)
-
-
-class CommandLineConfigLoader(ConfigLoader):
- """A config loader for command line arguments.
-
- As we add more command line based loaders, the common logic should go
- here.
- """
-
- def _exec_config_str(self, lhs, rhs):
- """execute self.config.<lhs> = <rhs>
-
- * expands ~ with expanduser
- * tries to assign with literal_eval, otherwise assigns with just the string,
- allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
- equivalent are `--C.a=4` and `--C.a='4'`.
- """
- rhs = os.path.expanduser(rhs)
- try:
- # Try to see if regular Python syntax will work. This
- # won't handle strings as the quote marks are removed
- # by the system shell.
- value = literal_eval(rhs)
- except (NameError, SyntaxError, ValueError):
- # This case happens if the rhs is a string.
- value = rhs
-
- exec(u'self.config.%s = value' % lhs)
-
- def _load_flag(self, cfg):
- """update self.config from a flag, which can be a dict or Config"""
- if isinstance(cfg, (dict, Config)):
- # don't clobber whole config sections, update
- # each section from config:
+class PyFileConfigLoader(FileConfigLoader):
+ """A config loader for pure python files.
+
+ This is responsible for locating a Python config file by filename and
+ path, then executing it to construct a Config object.
+ """
+
+ def load_config(self):
+ """Load the config from a file and return it as a Config object."""
+ self.clear()
+ try:
+ self._find_file()
+ except IOError as e:
+ raise ConfigFileNotFound(str(e))
+ self._read_file_as_dict()
+ return self.config
+
+ def load_subconfig(self, fname, path=None):
+ """Injected into config file namespace as load_subconfig"""
+ if path is None:
+ path = self.path
+
+ loader = self.__class__(fname, path)
+ try:
+ sub_config = loader.load_config()
+ except ConfigFileNotFound:
+ # Pass silently if the sub config is not there,
+ # treat it as an empty config file.
+ pass
+ else:
+ self.config.merge(sub_config)
+
+ def _read_file_as_dict(self):
+ """Load the config file into self.config, with recursive loading."""
+ def get_config():
+ """Unnecessary now, but a deprecation warning is more trouble than it's worth."""
+ return self.config
+
+ namespace = dict(
+ c=self.config,
+ load_subconfig=self.load_subconfig,
+ get_config=get_config,
+ __file__=self.full_filename,
+ )
+ fs_encoding = sys.getfilesystemencoding() or 'ascii'
+ conf_filename = self.full_filename.encode(fs_encoding)
+ py3compat.execfile(conf_filename, namespace)
+
+
+class CommandLineConfigLoader(ConfigLoader):
+ """A config loader for command line arguments.
+
+ As we add more command line based loaders, the common logic should go
+ here.
+ """
+
+ def _exec_config_str(self, lhs, rhs):
+ """execute self.config.<lhs> = <rhs>
+
+ * expands ~ with expanduser
+ * tries to assign with literal_eval, otherwise assigns with just the string,
+ allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
+ equivalent are `--C.a=4` and `--C.a='4'`.
+ """
+ rhs = os.path.expanduser(rhs)
+ try:
+ # Try to see if regular Python syntax will work. This
+ # won't handle strings as the quote marks are removed
+ # by the system shell.
+ value = literal_eval(rhs)
+ except (NameError, SyntaxError, ValueError):
+ # This case happens if the rhs is a string.
+ value = rhs
+
+ exec(u'self.config.%s = value' % lhs)
+
+ def _load_flag(self, cfg):
+ """update self.config from a flag, which can be a dict or Config"""
+ if isinstance(cfg, (dict, Config)):
+ # don't clobber whole config sections, update
+ # each section from config:
for sec,c in cfg.items():
- self.config[sec].update(c)
- else:
- raise TypeError("Invalid flag: %r" % cfg)
-
-# raw --identifier=value pattern
-# but *also* accept '-' as wordsep, for aliases
-# accepts: --foo=a
-# --Class.trait=value
-# --alias-name=value
-# rejects: -foo=value
-# --foo
-# --Class.trait
-kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
-
-# just flags, no assignments, with two *or one* leading '-'
-# accepts: --foo
-# -foo-bar-again
-# rejects: --anything=anything
-# --two.word
-
-flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
-
-class KeyValueConfigLoader(CommandLineConfigLoader):
- """A config loader that loads key value pairs from the command line.
-
- This allows command line options to be gives in the following form::
-
- ipython --profile="foo" --InteractiveShell.autocall=False
- """
-
- def __init__(self, argv=None, aliases=None, flags=None, **kw):
- """Create a key value pair config loader.
-
- Parameters
- ----------
- argv : list
- A list that has the form of sys.argv[1:] which has unicode
- elements of the form u"key=value". If this is None (default),
- then sys.argv[1:] will be used.
- aliases : dict
- A dict of aliases for configurable traits.
- Keys are the short aliases, Values are the resolved trait.
- Of the form: `{'alias' : 'Configurable.trait'}`
- flags : dict
- A dict of flags, keyed by str name. Vaues can be Config objects,
- dicts, or "key=value" strings. If Config or dict, when the flag
- is triggered, The flag is loaded as `self.config.update(m)`.
-
- Returns
- -------
- config : Config
- The resulting Config object.
-
- Examples
- --------
-
- >>> from traitlets.config.loader import KeyValueConfigLoader
- >>> cl = KeyValueConfigLoader()
- >>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
- >>> sorted(d.items())
- [('A', {'name': 'brian'}), ('B', {'number': 0})]
- """
- super(KeyValueConfigLoader, self).__init__(**kw)
- if argv is None:
- argv = sys.argv[1:]
- self.argv = argv
- self.aliases = aliases or {}
- self.flags = flags or {}
-
-
- def clear(self):
- super(KeyValueConfigLoader, self).clear()
- self.extra_args = []
-
-
- def _decode_argv(self, argv, enc=None):
- """decode argv if bytes, using stdin.encoding, falling back on default enc"""
- uargv = []
- if enc is None:
- enc = DEFAULT_ENCODING
- for arg in argv:
+ self.config[sec].update(c)
+ else:
+ raise TypeError("Invalid flag: %r" % cfg)
+
+# raw --identifier=value pattern
+# but *also* accept '-' as wordsep, for aliases
+# accepts: --foo=a
+# --Class.trait=value
+# --alias-name=value
+# rejects: -foo=value
+# --foo
+# --Class.trait
+kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
+
+# just flags, no assignments, with two *or one* leading '-'
+# accepts: --foo
+# -foo-bar-again
+# rejects: --anything=anything
+# --two.word
+
+flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
+
+class KeyValueConfigLoader(CommandLineConfigLoader):
+ """A config loader that loads key value pairs from the command line.
+
+ This allows command line options to be gives in the following form::
+
+ ipython --profile="foo" --InteractiveShell.autocall=False
+ """
+
+ def __init__(self, argv=None, aliases=None, flags=None, **kw):
+ """Create a key value pair config loader.
+
+ Parameters
+ ----------
+ argv : list
+ A list that has the form of sys.argv[1:] which has unicode
+ elements of the form u"key=value". If this is None (default),
+ then sys.argv[1:] will be used.
+ aliases : dict
+ A dict of aliases for configurable traits.
+ Keys are the short aliases, Values are the resolved trait.
+ Of the form: `{'alias' : 'Configurable.trait'}`
+ flags : dict
+ A dict of flags, keyed by str name. Vaues can be Config objects,
+ dicts, or "key=value" strings. If Config or dict, when the flag
+ is triggered, The flag is loaded as `self.config.update(m)`.
+
+ Returns
+ -------
+ config : Config
+ The resulting Config object.
+
+ Examples
+ --------
+
+ >>> from traitlets.config.loader import KeyValueConfigLoader
+ >>> cl = KeyValueConfigLoader()
+ >>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
+ >>> sorted(d.items())
+ [('A', {'name': 'brian'}), ('B', {'number': 0})]
+ """
+ super(KeyValueConfigLoader, self).__init__(**kw)
+ if argv is None:
+ argv = sys.argv[1:]
+ self.argv = argv
+ self.aliases = aliases or {}
+ self.flags = flags or {}
+
+
+ def clear(self):
+ super(KeyValueConfigLoader, self).clear()
+ self.extra_args = []
+
+
+ def _decode_argv(self, argv, enc=None):
+ """decode argv if bytes, using stdin.encoding, falling back on default enc"""
+ uargv = []
+ if enc is None:
+ enc = DEFAULT_ENCODING
+ for arg in argv:
if not isinstance(arg, text_type):
- # only decode if not already decoded
- arg = arg.decode(enc)
- uargv.append(arg)
- return uargv
-
-
- def load_config(self, argv=None, aliases=None, flags=None):
- """Parse the configuration and generate the Config object.
-
- After loading, any arguments that are not key-value or
- flags will be stored in self.extra_args - a list of
- unparsed command-line arguments. This is used for
- arguments such as input files or subcommands.
-
- Parameters
- ----------
- argv : list, optional
- A list that has the form of sys.argv[1:] which has unicode
- elements of the form u"key=value". If this is None (default),
- then self.argv will be used.
- aliases : dict
- A dict of aliases for configurable traits.
- Keys are the short aliases, Values are the resolved trait.
- Of the form: `{'alias' : 'Configurable.trait'}`
- flags : dict
- A dict of flags, keyed by str name. Values can be Config objects
- or dicts. When the flag is triggered, The config is loaded as
- `self.config.update(cfg)`.
- """
- self.clear()
- if argv is None:
- argv = self.argv
- if aliases is None:
- aliases = self.aliases
- if flags is None:
- flags = self.flags
-
- # ensure argv is a list of unicode strings:
- uargv = self._decode_argv(argv)
- for idx,raw in enumerate(uargv):
- # strip leading '-'
- item = raw.lstrip('-')
-
- if raw == '--':
- # don't parse arguments after '--'
- # this is useful for relaying arguments to scripts, e.g.
- # ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
- self.extra_args.extend(uargv[idx+1:])
- break
-
- if kv_pattern.match(raw):
- lhs,rhs = item.split('=',1)
- # Substitute longnames for aliases.
- if lhs in aliases:
- lhs = aliases[lhs]
- if '.' not in lhs:
- # probably a mistyped alias, but not technically illegal
- self.log.warning("Unrecognized alias: '%s', it will probably have no effect.", raw)
- try:
- self._exec_config_str(lhs, rhs)
- except Exception:
- raise ArgumentError("Invalid argument: '%s'" % raw)
-
- elif flag_pattern.match(raw):
- if item in flags:
- cfg,help = flags[item]
- self._load_flag(cfg)
- else:
- raise ArgumentError("Unrecognized flag: '%s'"%raw)
- elif raw.startswith('-'):
- kv = '--'+item
- if kv_pattern.match(kv):
- raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
- else:
- raise ArgumentError("Invalid argument: '%s'"%raw)
- else:
- # keep all args that aren't valid in a list,
- # in case our parent knows what to do with them.
- self.extra_args.append(item)
- return self.config
-
-class ArgParseConfigLoader(CommandLineConfigLoader):
- """A loader that uses the argparse module to load from the command line."""
-
- def __init__(self, argv=None, aliases=None, flags=None, log=None, *parser_args, **parser_kw):
- """Create a config loader for use with argparse.
-
- Parameters
- ----------
-
- argv : optional, list
- If given, used to read command-line arguments from, otherwise
- sys.argv[1:] is used.
-
- parser_args : tuple
- A tuple of positional arguments that will be passed to the
- constructor of :class:`argparse.ArgumentParser`.
-
- parser_kw : dict
- A tuple of keyword arguments that will be passed to the
- constructor of :class:`argparse.ArgumentParser`.
-
- Returns
- -------
- config : Config
- The resulting Config object.
- """
- super(CommandLineConfigLoader, self).__init__(log=log)
- self.clear()
- if argv is None:
- argv = sys.argv[1:]
- self.argv = argv
- self.aliases = aliases or {}
- self.flags = flags or {}
-
- self.parser_args = parser_args
- self.version = parser_kw.pop("version", None)
- kwargs = dict(argument_default=argparse.SUPPRESS)
- kwargs.update(parser_kw)
- self.parser_kw = kwargs
-
- def load_config(self, argv=None, aliases=None, flags=None):
- """Parse command line arguments and return as a Config object.
-
- Parameters
- ----------
-
- args : optional, list
- If given, a list with the structure of sys.argv[1:] to parse
- arguments from. If not given, the instance's self.argv attribute
- (given at construction time) is used."""
- self.clear()
- if argv is None:
- argv = self.argv
- if aliases is None:
- aliases = self.aliases
- if flags is None:
- flags = self.flags
- self._create_parser(aliases, flags)
- self._parse_args(argv)
- self._convert_to_config()
- return self.config
-
- def get_extra_args(self):
- if hasattr(self, 'extra_args'):
- return self.extra_args
- else:
- return []
-
- def _create_parser(self, aliases=None, flags=None):
- self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
- self._add_arguments(aliases, flags)
-
- def _add_arguments(self, aliases=None, flags=None):
- raise NotImplementedError("subclasses must implement _add_arguments")
-
- def _parse_args(self, args):
- """self.parser->self.parsed_data"""
- # decode sys.argv to support unicode command-line options
- enc = DEFAULT_ENCODING
- uargs = [py3compat.cast_unicode(a, enc) for a in args]
- self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
-
- def _convert_to_config(self):
- """self.parsed_data->self.config"""
+ # only decode if not already decoded
+ arg = arg.decode(enc)
+ uargv.append(arg)
+ return uargv
+
+
+ def load_config(self, argv=None, aliases=None, flags=None):
+ """Parse the configuration and generate the Config object.
+
+ After loading, any arguments that are not key-value or
+ flags will be stored in self.extra_args - a list of
+ unparsed command-line arguments. This is used for
+ arguments such as input files or subcommands.
+
+ Parameters
+ ----------
+ argv : list, optional
+ A list that has the form of sys.argv[1:] which has unicode
+ elements of the form u"key=value". If this is None (default),
+ then self.argv will be used.
+ aliases : dict
+ A dict of aliases for configurable traits.
+ Keys are the short aliases, Values are the resolved trait.
+ Of the form: `{'alias' : 'Configurable.trait'}`
+ flags : dict
+ A dict of flags, keyed by str name. Values can be Config objects
+ or dicts. When the flag is triggered, The config is loaded as
+ `self.config.update(cfg)`.
+ """
+ self.clear()
+ if argv is None:
+ argv = self.argv
+ if aliases is None:
+ aliases = self.aliases
+ if flags is None:
+ flags = self.flags
+
+ # ensure argv is a list of unicode strings:
+ uargv = self._decode_argv(argv)
+ for idx,raw in enumerate(uargv):
+ # strip leading '-'
+ item = raw.lstrip('-')
+
+ if raw == '--':
+ # don't parse arguments after '--'
+ # this is useful for relaying arguments to scripts, e.g.
+ # ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
+ self.extra_args.extend(uargv[idx+1:])
+ break
+
+ if kv_pattern.match(raw):
+ lhs,rhs = item.split('=',1)
+ # Substitute longnames for aliases.
+ if lhs in aliases:
+ lhs = aliases[lhs]
+ if '.' not in lhs:
+ # probably a mistyped alias, but not technically illegal
+ self.log.warning("Unrecognized alias: '%s', it will probably have no effect.", raw)
+ try:
+ self._exec_config_str(lhs, rhs)
+ except Exception:
+ raise ArgumentError("Invalid argument: '%s'" % raw)
+
+ elif flag_pattern.match(raw):
+ if item in flags:
+ cfg,help = flags[item]
+ self._load_flag(cfg)
+ else:
+ raise ArgumentError("Unrecognized flag: '%s'"%raw)
+ elif raw.startswith('-'):
+ kv = '--'+item
+ if kv_pattern.match(kv):
+ raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
+ else:
+ raise ArgumentError("Invalid argument: '%s'"%raw)
+ else:
+ # keep all args that aren't valid in a list,
+ # in case our parent knows what to do with them.
+ self.extra_args.append(item)
+ return self.config
+
+class ArgParseConfigLoader(CommandLineConfigLoader):
+ """A loader that uses the argparse module to load from the command line."""
+
+ def __init__(self, argv=None, aliases=None, flags=None, log=None, *parser_args, **parser_kw):
+ """Create a config loader for use with argparse.
+
+ Parameters
+ ----------
+
+ argv : optional, list
+ If given, used to read command-line arguments from, otherwise
+ sys.argv[1:] is used.
+
+ parser_args : tuple
+ A tuple of positional arguments that will be passed to the
+ constructor of :class:`argparse.ArgumentParser`.
+
+ parser_kw : dict
+ A tuple of keyword arguments that will be passed to the
+ constructor of :class:`argparse.ArgumentParser`.
+
+ Returns
+ -------
+ config : Config
+ The resulting Config object.
+ """
+ super(CommandLineConfigLoader, self).__init__(log=log)
+ self.clear()
+ if argv is None:
+ argv = sys.argv[1:]
+ self.argv = argv
+ self.aliases = aliases or {}
+ self.flags = flags or {}
+
+ self.parser_args = parser_args
+ self.version = parser_kw.pop("version", None)
+ kwargs = dict(argument_default=argparse.SUPPRESS)
+ kwargs.update(parser_kw)
+ self.parser_kw = kwargs
+
+ def load_config(self, argv=None, aliases=None, flags=None):
+ """Parse command line arguments and return as a Config object.
+
+ Parameters
+ ----------
+
+ args : optional, list
+ If given, a list with the structure of sys.argv[1:] to parse
+ arguments from. If not given, the instance's self.argv attribute
+ (given at construction time) is used."""
+ self.clear()
+ if argv is None:
+ argv = self.argv
+ if aliases is None:
+ aliases = self.aliases
+ if flags is None:
+ flags = self.flags
+ self._create_parser(aliases, flags)
+ self._parse_args(argv)
+ self._convert_to_config()
+ return self.config
+
+ def get_extra_args(self):
+ if hasattr(self, 'extra_args'):
+ return self.extra_args
+ else:
+ return []
+
+ def _create_parser(self, aliases=None, flags=None):
+ self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
+ self._add_arguments(aliases, flags)
+
+ def _add_arguments(self, aliases=None, flags=None):
+ raise NotImplementedError("subclasses must implement _add_arguments")
+
+ def _parse_args(self, args):
+ """self.parser->self.parsed_data"""
+ # decode sys.argv to support unicode command-line options
+ enc = DEFAULT_ENCODING
+ uargs = [py3compat.cast_unicode(a, enc) for a in args]
+ self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
+
+ def _convert_to_config(self):
+ """self.parsed_data->self.config"""
for k, v in vars(self.parsed_data).items():
- exec("self.config.%s = v"%k, locals(), globals())
-
-class KVArgParseConfigLoader(ArgParseConfigLoader):
- """A config loader that loads aliases and flags with argparse,
- but will use KVLoader for the rest. This allows better parsing
- of common args, such as `ipython -c 'print 5'`, but still gets
- arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
-
- def _add_arguments(self, aliases=None, flags=None):
- self.alias_flags = {}
- # print aliases, flags
- if aliases is None:
- aliases = self.aliases
- if flags is None:
- flags = self.flags
- paa = self.parser.add_argument
+ exec("self.config.%s = v"%k, locals(), globals())
+
+class KVArgParseConfigLoader(ArgParseConfigLoader):
+ """A config loader that loads aliases and flags with argparse,
+ but will use KVLoader for the rest. This allows better parsing
+ of common args, such as `ipython -c 'print 5'`, but still gets
+ arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
+
+ def _add_arguments(self, aliases=None, flags=None):
+ self.alias_flags = {}
+ # print aliases, flags
+ if aliases is None:
+ aliases = self.aliases
+ if flags is None:
+ flags = self.flags
+ paa = self.parser.add_argument
for key,value in aliases.items():
- if key in flags:
- # flags
- nargs = '?'
- else:
- nargs = None
- if len(key) is 1:
+ if key in flags:
+ # flags
+ nargs = '?'
+ else:
+ nargs = None
+ if len(key) is 1:
paa('-'+key, '--'+key, type=text_type, dest=value, nargs=nargs)
- else:
+ else:
paa('--'+key, type=text_type, dest=value, nargs=nargs)
for key, (value, help) in flags.items():
- if key in self.aliases:
- #
- self.alias_flags[self.aliases[key]] = value
- continue
- if len(key) is 1:
- paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
- else:
- paa('--'+key, action='append_const', dest='_flags', const=value)
-
- def _convert_to_config(self):
- """self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
- # remove subconfigs list from namespace before transforming the Namespace
- if '_flags' in self.parsed_data:
- subcs = self.parsed_data._flags
- del self.parsed_data._flags
- else:
- subcs = []
-
+ if key in self.aliases:
+ #
+ self.alias_flags[self.aliases[key]] = value
+ continue
+ if len(key) is 1:
+ paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
+ else:
+ paa('--'+key, action='append_const', dest='_flags', const=value)
+
+ def _convert_to_config(self):
+ """self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
+ # remove subconfigs list from namespace before transforming the Namespace
+ if '_flags' in self.parsed_data:
+ subcs = self.parsed_data._flags
+ del self.parsed_data._flags
+ else:
+ subcs = []
+
for k, v in vars(self.parsed_data).items():
- if v is None:
- # it was a flag that shares the name of an alias
- subcs.append(self.alias_flags[k])
- else:
- # eval the KV assignment
- self._exec_config_str(k, v)
-
- for subc in subcs:
- self._load_flag(subc)
-
- if self.extra_args:
- sub_parser = KeyValueConfigLoader(log=self.log)
- sub_parser.load_config(self.extra_args)
- self.config.merge(sub_parser.config)
- self.extra_args = sub_parser.extra_args
-
-
-def load_pyconfig_files(config_files, path):
- """Load multiple Python config files, merging each of them in turn.
-
- Parameters
- ==========
- config_files : list of str
- List of config files names to load and merge into the config.
- path : unicode
- The full path to the location of the config files.
- """
- config = Config()
- for cf in config_files:
- loader = PyFileConfigLoader(cf, path=path)
- try:
- next_config = loader.load_config()
- except ConfigFileNotFound:
- pass
- except:
- raise
- else:
- config.merge(next_config)
- return config
+ if v is None:
+ # it was a flag that shares the name of an alias
+ subcs.append(self.alias_flags[k])
+ else:
+ # eval the KV assignment
+ self._exec_config_str(k, v)
+
+ for subc in subcs:
+ self._load_flag(subc)
+
+ if self.extra_args:
+ sub_parser = KeyValueConfigLoader(log=self.log)
+ sub_parser.load_config(self.extra_args)
+ self.config.merge(sub_parser.config)
+ self.extra_args = sub_parser.extra_args
+
+
+def load_pyconfig_files(config_files, path):
+ """Load multiple Python config files, merging each of them in turn.
+
+ Parameters
+ ==========
+ config_files : list of str
+ List of config files names to load and merge into the config.
+ path : unicode
+ The full path to the location of the config files.
+ """
+ config = Config()
+ for cf in config_files:
+ loader = PyFileConfigLoader(cf, path=path)
+ try:
+ next_config = loader.load_config()
+ except ConfigFileNotFound:
+ pass
+ except:
+ raise
+ else:
+ config.merge(next_config)
+ return config
diff --git a/contrib/python/traitlets/py2/traitlets/config/manager.py b/contrib/python/traitlets/py2/traitlets/config/manager.py
index 89dc167943..5e5ebde9af 100644
--- a/contrib/python/traitlets/py2/traitlets/config/manager.py
+++ b/contrib/python/traitlets/py2/traitlets/config/manager.py
@@ -1,88 +1,88 @@
-"""Manager to read and modify config data in JSON files.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-import errno
-import io
-import json
-import os
-
+"""Manager to read and modify config data in JSON files.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+import errno
+import io
+import json
+import os
+
from six import PY3
-from traitlets.config import LoggingConfigurable
-from traitlets.traitlets import Unicode
-
-
-def recursive_update(target, new):
- """Recursively update one dictionary using another.
-
- None values will delete their keys.
- """
- for k, v in new.items():
- if isinstance(v, dict):
- if k not in target:
- target[k] = {}
- recursive_update(target[k], v)
- if not target[k]:
- # Prune empty subdicts
- del target[k]
-
- elif v is None:
- target.pop(k, None)
-
- else:
- target[k] = v
-
-
-class BaseJSONConfigManager(LoggingConfigurable):
- """General JSON config manager
-
- Deals with persisting/storing config in a json file
- """
-
- config_dir = Unicode('.')
-
- def ensure_config_dir_exists(self):
- try:
- os.makedirs(self.config_dir, 0o755)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
-
- def file_name(self, section_name):
- return os.path.join(self.config_dir, section_name+'.json')
-
- def get(self, section_name):
- """Retrieve the config data for the specified section.
-
- Returns the data as a dictionary, or an empty dictionary if the file
- doesn't exist.
- """
- filename = self.file_name(section_name)
- if os.path.isfile(filename):
- with io.open(filename, encoding='utf-8') as f:
- return json.load(f)
- else:
- return {}
-
- def set(self, section_name, data):
- """Store the given config data.
- """
- filename = self.file_name(section_name)
- self.ensure_config_dir_exists()
-
- if PY3:
- f = io.open(filename, 'w', encoding='utf-8')
- else:
- f = open(filename, 'wb')
- with f:
- json.dump(data, f, indent=2)
-
- def update(self, section_name, new_data):
- """Modify the config section by recursively updating it with new_data.
-
- Returns the modified config data as a dictionary.
- """
- data = self.get(section_name)
- recursive_update(data, new_data)
- self.set(section_name, data)
- return data
+from traitlets.config import LoggingConfigurable
+from traitlets.traitlets import Unicode
+
+
+def recursive_update(target, new):
+ """Recursively update one dictionary using another.
+
+ None values will delete their keys.
+ """
+ for k, v in new.items():
+ if isinstance(v, dict):
+ if k not in target:
+ target[k] = {}
+ recursive_update(target[k], v)
+ if not target[k]:
+ # Prune empty subdicts
+ del target[k]
+
+ elif v is None:
+ target.pop(k, None)
+
+ else:
+ target[k] = v
+
+
+class BaseJSONConfigManager(LoggingConfigurable):
+ """General JSON config manager
+
+ Deals with persisting/storing config in a json file
+ """
+
+ config_dir = Unicode('.')
+
+ def ensure_config_dir_exists(self):
+ try:
+ os.makedirs(self.config_dir, 0o755)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ def file_name(self, section_name):
+ return os.path.join(self.config_dir, section_name+'.json')
+
+ def get(self, section_name):
+ """Retrieve the config data for the specified section.
+
+ Returns the data as a dictionary, or an empty dictionary if the file
+ doesn't exist.
+ """
+ filename = self.file_name(section_name)
+ if os.path.isfile(filename):
+ with io.open(filename, encoding='utf-8') as f:
+ return json.load(f)
+ else:
+ return {}
+
+ def set(self, section_name, data):
+ """Store the given config data.
+ """
+ filename = self.file_name(section_name)
+ self.ensure_config_dir_exists()
+
+ if PY3:
+ f = io.open(filename, 'w', encoding='utf-8')
+ else:
+ f = open(filename, 'wb')
+ with f:
+ json.dump(data, f, indent=2)
+
+ def update(self, section_name, new_data):
+ """Modify the config section by recursively updating it with new_data.
+
+ Returns the modified config data as a dictionary.
+ """
+ data = self.get(section_name)
+ recursive_update(data, new_data)
+ self.set(section_name, data)
+ return data
diff --git a/contrib/python/traitlets/py2/traitlets/log.py b/contrib/python/traitlets/py2/traitlets/log.py
index 559735bd1a..af86b325f5 100644
--- a/contrib/python/traitlets/py2/traitlets/log.py
+++ b/contrib/python/traitlets/py2/traitlets/log.py
@@ -1,27 +1,27 @@
-"""Grab the global logger instance."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import logging
-
-_logger = None
-
-def get_logger():
- """Grab the global logger instance.
+"""Grab the global logger instance."""
- If a global Application is instantiated, grab its logger.
- Otherwise, grab the root logger.
- """
- global _logger
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
- if _logger is None:
- from .config import Application
- if Application.initialized():
- _logger = Application.instance().log
- else:
+import logging
+
+_logger = None
+
+def get_logger():
+ """Grab the global logger instance.
+
+ If a global Application is instantiated, grab its logger.
+ Otherwise, grab the root logger.
+ """
+ global _logger
+
+ if _logger is None:
+ from .config import Application
+ if Application.initialized():
+ _logger = Application.instance().log
+ else:
_logger = logging.getLogger('traitlets')
# Add a NullHandler to silence warnings about not being
# initialized, per best practice for libraries.
_logger.addHandler(logging.NullHandler())
- return _logger
+ return _logger
diff --git a/contrib/python/traitlets/py2/traitlets/traitlets.py b/contrib/python/traitlets/py2/traitlets/traitlets.py
index 233c047dc2..c07daf7400 100644
--- a/contrib/python/traitlets/py2/traitlets/traitlets.py
+++ b/contrib/python/traitlets/py2/traitlets/traitlets.py
@@ -1,98 +1,98 @@
-# encoding: utf-8
-"""
-A lightweight Traits like module.
-
-This is designed to provide a lightweight, simple, pure Python version of
-many of the capabilities of enthought.traits. This includes:
-
-* Validation
-* Type specification with defaults
-* Static and dynamic notification
-* Basic predefined types
-* An API that is similar to enthought.traits
-
-We don't support:
-
-* Delegation
-* Automatic GUI generation
-* A full set of trait types. Most importantly, we don't provide container
- traits (list, dict, tuple) that can trigger notifications if their
- contents change.
-* API compatibility with enthought.traits
-
-There are also some important difference in our design:
-
-* enthought.traits does not validate default values. We do.
-
-We choose to create this module because we need these capabilities, but
-we need them to be pure Python so they work in all Python implementations,
-including Jython and IronPython.
-
-Inheritance diagram:
-
-.. inheritance-diagram:: traitlets.traitlets
- :parts: 3
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-#
-# Adapted from enthought.traits, Copyright (c) Enthought, Inc.,
-# also under the terms of the Modified BSD License.
-
-import contextlib
-import inspect
+# encoding: utf-8
+"""
+A lightweight Traits like module.
+
+This is designed to provide a lightweight, simple, pure Python version of
+many of the capabilities of enthought.traits. This includes:
+
+* Validation
+* Type specification with defaults
+* Static and dynamic notification
+* Basic predefined types
+* An API that is similar to enthought.traits
+
+We don't support:
+
+* Delegation
+* Automatic GUI generation
+* A full set of trait types. Most importantly, we don't provide container
+ traits (list, dict, tuple) that can trigger notifications if their
+ contents change.
+* API compatibility with enthought.traits
+
+There are also some important difference in our design:
+
+* enthought.traits does not validate default values. We do.
+
+We choose to create this module because we need these capabilities, but
+we need them to be pure Python so they work in all Python implementations,
+including Jython and IronPython.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: traitlets.traitlets
+ :parts: 3
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+#
+# Adapted from enthought.traits, Copyright (c) Enthought, Inc.,
+# also under the terms of the Modified BSD License.
+
+import contextlib
+import inspect
import os
-import re
-import sys
-import types
+import re
+import sys
+import types
import enum
-try:
- from types import ClassType, InstanceType
- ClassTypes = (ClassType, type)
-except:
- ClassTypes = (type,)
-from warnings import warn, warn_explicit
-
+try:
+ from types import ClassType, InstanceType
+ ClassTypes = (ClassType, type)
+except:
+ ClassTypes = (type,)
+from warnings import warn, warn_explicit
+
import six
-
-from .utils.getargspec import getargspec
-from .utils.importstring import import_item
-from .utils.sentinel import Sentinel
+
+from .utils.getargspec import getargspec
+from .utils.importstring import import_item
+from .utils.sentinel import Sentinel
from .utils.bunch import Bunch
-
-SequenceTypes = (list, tuple, set, frozenset)
-
-#-----------------------------------------------------------------------------
-# Basic classes
-#-----------------------------------------------------------------------------
-
-
-Undefined = Sentinel('Undefined', 'traitlets',
-'''
-Used in Traitlets to specify that no defaults are set in kwargs
-'''
-)
-
-All = Sentinel('All', 'traitlets',
-'''
-Used in Traitlets to listen to all types of notification or to notifications
-from all trait attributes.
-'''
-)
-
-# Deprecated alias
-NoDefaultSpecified = Undefined
-
-class TraitError(Exception):
- pass
-
-#-----------------------------------------------------------------------------
-# Utilities
-#-----------------------------------------------------------------------------
-
+
+SequenceTypes = (list, tuple, set, frozenset)
+
+#-----------------------------------------------------------------------------
+# Basic classes
+#-----------------------------------------------------------------------------
+
+
+Undefined = Sentinel('Undefined', 'traitlets',
+'''
+Used in Traitlets to specify that no defaults are set in kwargs
+'''
+)
+
+All = Sentinel('All', 'traitlets',
+'''
+Used in Traitlets to listen to all types of notification or to notifications
+from all trait attributes.
+'''
+)
+
+# Deprecated alias
+NoDefaultSpecified = Undefined
+
+class TraitError(Exception):
+ pass
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
from ipython_genutils.py3compat import cast_unicode_py2
-
+
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
def isidentifier(s):
@@ -117,331 +117,331 @@ def _should_warn(key):
else:
return False
-def _deprecated_method(method, cls, method_name, msg):
- """Show deprecation warning about a magic method definition.
-
- Uses warn_explicit to bind warning to method definition instead of triggering code,
- which isn't relevant.
- """
+def _deprecated_method(method, cls, method_name, msg):
+ """Show deprecation warning about a magic method definition.
+
+ Uses warn_explicit to bind warning to method definition instead of triggering code,
+ which isn't relevant.
+ """
warn_msg = "{classname}.{method_name} is deprecated in traitlets 4.1: {msg}".format(
- classname=cls.__name__, method_name=method_name, msg=msg
- )
-
- for parent in inspect.getmro(cls):
- if method_name in parent.__dict__:
- cls = parent
- break
+ classname=cls.__name__, method_name=method_name, msg=msg
+ )
+
+ for parent in inspect.getmro(cls):
+ if method_name in parent.__dict__:
+ cls = parent
+ break
# limit deprecation messages to once per package
package_name = cls.__module__.split('.', 1)[0]
key = (package_name, msg)
if not _should_warn(key):
return
- try:
- fname = inspect.getsourcefile(method) or "<unknown>"
- lineno = inspect.getsourcelines(method)[1] or 0
+ try:
+ fname = inspect.getsourcefile(method) or "<unknown>"
+ lineno = inspect.getsourcelines(method)[1] or 0
except (IOError, TypeError) as e:
- # Failed to inspect for some reason
- warn(warn_msg + ('\n(inspection failed) %s' % e), DeprecationWarning)
- else:
- warn_explicit(warn_msg, DeprecationWarning, fname, lineno)
-
-def class_of(object):
- """ Returns a string containing the class name of an object with the
- correct indefinite article ('a' or 'an') preceding it (e.g., 'an Image',
- 'a PlotValue').
- """
+ # Failed to inspect for some reason
+ warn(warn_msg + ('\n(inspection failed) %s' % e), DeprecationWarning)
+ else:
+ warn_explicit(warn_msg, DeprecationWarning, fname, lineno)
+
+def class_of(object):
+ """ Returns a string containing the class name of an object with the
+ correct indefinite article ('a' or 'an') preceding it (e.g., 'an Image',
+ 'a PlotValue').
+ """
if isinstance( object, six.string_types ):
- return add_article( object )
-
- return add_article( object.__class__.__name__ )
-
-
-def add_article(name):
- """ Returns a string containing the correct indefinite article ('a' or 'an')
- prefixed to the specified string.
- """
- if name[:1].lower() in 'aeiou':
- return 'an ' + name
-
- return 'a ' + name
-
-
-def repr_type(obj):
- """ Return a string representation of a value and its type for readable
- error messages.
- """
- the_type = type(obj)
+ return add_article( object )
+
+ return add_article( object.__class__.__name__ )
+
+
+def add_article(name):
+ """ Returns a string containing the correct indefinite article ('a' or 'an')
+ prefixed to the specified string.
+ """
+ if name[:1].lower() in 'aeiou':
+ return 'an ' + name
+
+ return 'a ' + name
+
+
+def repr_type(obj):
+ """ Return a string representation of a value and its type for readable
+ error messages.
+ """
+ the_type = type(obj)
if six.PY2 and the_type is InstanceType:
- # Old-style class.
- the_type = obj.__class__
- msg = '%r %r' % (obj, the_type)
- return msg
-
-
-def is_trait(t):
- """ Returns whether the given value is an instance or subclass of TraitType.
- """
- return (isinstance(t, TraitType) or
- (isinstance(t, type) and issubclass(t, TraitType)))
-
-
-def parse_notifier_name(names):
- """Convert the name argument to a list of names.
-
- Examples
- --------
-
- >>> parse_notifier_name([])
- [All]
- >>> parse_notifier_name('a')
- ['a']
- >>> parse_notifier_name(['a', 'b'])
- ['a', 'b']
- >>> parse_notifier_name(All)
- [All]
- """
+ # Old-style class.
+ the_type = obj.__class__
+ msg = '%r %r' % (obj, the_type)
+ return msg
+
+
+def is_trait(t):
+ """ Returns whether the given value is an instance or subclass of TraitType.
+ """
+ return (isinstance(t, TraitType) or
+ (isinstance(t, type) and issubclass(t, TraitType)))
+
+
+def parse_notifier_name(names):
+ """Convert the name argument to a list of names.
+
+ Examples
+ --------
+
+ >>> parse_notifier_name([])
+ [All]
+ >>> parse_notifier_name('a')
+ ['a']
+ >>> parse_notifier_name(['a', 'b'])
+ ['a', 'b']
+ >>> parse_notifier_name(All)
+ [All]
+ """
if names is All or isinstance(names, six.string_types):
- return [names]
+ return [names]
else:
- if not names or All in names:
- return [All]
- for n in names:
+ if not names or All in names:
+ return [All]
+ for n in names:
if not isinstance(n, six.string_types):
raise TypeError("names must be strings, not %r" % n)
- return names
-
-
-class _SimpleTest:
- def __init__ ( self, value ): self.value = value
- def __call__ ( self, test ):
- return test == self.value
- def __repr__(self):
- return "<SimpleTest(%r)" % self.value
- def __str__(self):
- return self.__repr__()
-
-
-def getmembers(object, predicate=None):
- """A safe version of inspect.getmembers that handles missing attributes.
-
- This is useful when there are descriptor based attributes that for
- some reason raise AttributeError even though they exist. This happens
- in zope.inteface with the __provides__ attribute.
- """
- results = []
- for key in dir(object):
- try:
- value = getattr(object, key)
- except AttributeError:
- pass
- else:
- if not predicate or predicate(value):
- results.append((key, value))
- results.sort()
- return results
-
-def _validate_link(*tuples):
- """Validate arguments for traitlet link functions"""
- for t in tuples:
- if not len(t) == 2:
- raise TypeError("Each linked traitlet must be specified as (HasTraits, 'trait_name'), not %r" % t)
- obj, trait_name = t
- if not isinstance(obj, HasTraits):
- raise TypeError("Each object must be HasTraits, not %r" % type(obj))
- if not trait_name in obj.traits():
- raise TypeError("%r has no trait %r" % (obj, trait_name))
-
-class link(object):
- """Link traits from different objects together so they remain in sync.
-
- Parameters
- ----------
- source : (object / attribute name) pair
- target : (object / attribute name) pair
-
- Examples
- --------
-
- >>> c = link((src, 'value'), (tgt, 'value'))
- >>> src.value = 5 # updates other objects as well
- """
- updating = False
-
- def __init__(self, source, target):
- _validate_link(source, target)
- self.source, self.target = source, target
- try:
- setattr(target[0], target[1], getattr(source[0], source[1]))
- finally:
- source[0].observe(self._update_target, names=source[1])
- target[0].observe(self._update_source, names=target[1])
-
- @contextlib.contextmanager
- def _busy_updating(self):
- self.updating = True
- try:
- yield
- finally:
- self.updating = False
-
- def _update_target(self, change):
- if self.updating:
- return
- with self._busy_updating():
+ return names
+
+
+class _SimpleTest:
+ def __init__ ( self, value ): self.value = value
+ def __call__ ( self, test ):
+ return test == self.value
+ def __repr__(self):
+ return "<SimpleTest(%r)" % self.value
+ def __str__(self):
+ return self.__repr__()
+
+
+def getmembers(object, predicate=None):
+ """A safe version of inspect.getmembers that handles missing attributes.
+
+ This is useful when there are descriptor based attributes that for
+ some reason raise AttributeError even though they exist. This happens
+ in zope.inteface with the __provides__ attribute.
+ """
+ results = []
+ for key in dir(object):
+ try:
+ value = getattr(object, key)
+ except AttributeError:
+ pass
+ else:
+ if not predicate or predicate(value):
+ results.append((key, value))
+ results.sort()
+ return results
+
+def _validate_link(*tuples):
+ """Validate arguments for traitlet link functions"""
+ for t in tuples:
+ if not len(t) == 2:
+ raise TypeError("Each linked traitlet must be specified as (HasTraits, 'trait_name'), not %r" % t)
+ obj, trait_name = t
+ if not isinstance(obj, HasTraits):
+ raise TypeError("Each object must be HasTraits, not %r" % type(obj))
+ if not trait_name in obj.traits():
+ raise TypeError("%r has no trait %r" % (obj, trait_name))
+
+class link(object):
+ """Link traits from different objects together so they remain in sync.
+
+ Parameters
+ ----------
+ source : (object / attribute name) pair
+ target : (object / attribute name) pair
+
+ Examples
+ --------
+
+ >>> c = link((src, 'value'), (tgt, 'value'))
+ >>> src.value = 5 # updates other objects as well
+ """
+ updating = False
+
+ def __init__(self, source, target):
+ _validate_link(source, target)
+ self.source, self.target = source, target
+ try:
+ setattr(target[0], target[1], getattr(source[0], source[1]))
+ finally:
+ source[0].observe(self._update_target, names=source[1])
+ target[0].observe(self._update_source, names=target[1])
+
+ @contextlib.contextmanager
+ def _busy_updating(self):
+ self.updating = True
+ try:
+ yield
+ finally:
+ self.updating = False
+
+ def _update_target(self, change):
+ if self.updating:
+ return
+ with self._busy_updating():
setattr(self.target[0], self.target[1], change.new)
-
- def _update_source(self, change):
- if self.updating:
- return
- with self._busy_updating():
+
+ def _update_source(self, change):
+ if self.updating:
+ return
+ with self._busy_updating():
setattr(self.source[0], self.source[1], change.new)
-
- def unlink(self):
- self.source[0].unobserve(self._update_target, names=self.source[1])
- self.target[0].unobserve(self._update_source, names=self.target[1])
- self.source, self.target = None, None
-
-
-class directional_link(object):
- """Link the trait of a source object with traits of target objects.
-
- Parameters
- ----------
- source : (object, attribute name) pair
- target : (object, attribute name) pair
- transform: callable (optional)
- Data transformation between source and target.
-
- Examples
- --------
-
- >>> c = directional_link((src, 'value'), (tgt, 'value'))
- >>> src.value = 5 # updates target objects
- >>> tgt.value = 6 # does not update source object
- """
- updating = False
-
- def __init__(self, source, target, transform=None):
- self._transform = transform if transform else lambda x: x
- _validate_link(source, target)
- self.source, self.target = source, target
- try:
- setattr(target[0], target[1],
- self._transform(getattr(source[0], source[1])))
- finally:
- self.source[0].observe(self._update, names=self.source[1])
-
- @contextlib.contextmanager
- def _busy_updating(self):
- self.updating = True
- try:
- yield
- finally:
- self.updating = False
-
- def _update(self, change):
- if self.updating:
- return
- with self._busy_updating():
- setattr(self.target[0], self.target[1],
+
+ def unlink(self):
+ self.source[0].unobserve(self._update_target, names=self.source[1])
+ self.target[0].unobserve(self._update_source, names=self.target[1])
+ self.source, self.target = None, None
+
+
+class directional_link(object):
+ """Link the trait of a source object with traits of target objects.
+
+ Parameters
+ ----------
+ source : (object, attribute name) pair
+ target : (object, attribute name) pair
+ transform: callable (optional)
+ Data transformation between source and target.
+
+ Examples
+ --------
+
+ >>> c = directional_link((src, 'value'), (tgt, 'value'))
+ >>> src.value = 5 # updates target objects
+ >>> tgt.value = 6 # does not update source object
+ """
+ updating = False
+
+ def __init__(self, source, target, transform=None):
+ self._transform = transform if transform else lambda x: x
+ _validate_link(source, target)
+ self.source, self.target = source, target
+ try:
+ setattr(target[0], target[1],
+ self._transform(getattr(source[0], source[1])))
+ finally:
+ self.source[0].observe(self._update, names=self.source[1])
+
+ @contextlib.contextmanager
+ def _busy_updating(self):
+ self.updating = True
+ try:
+ yield
+ finally:
+ self.updating = False
+
+ def _update(self, change):
+ if self.updating:
+ return
+ with self._busy_updating():
+ setattr(self.target[0], self.target[1],
self._transform(change.new))
-
- def unlink(self):
- self.source[0].unobserve(self._update, names=self.source[1])
- self.source, self.target = None, None
-
-dlink = directional_link
-
-
-#-----------------------------------------------------------------------------
+
+ def unlink(self):
+ self.source[0].unobserve(self._update, names=self.source[1])
+ self.source, self.target = None, None
+
+dlink = directional_link
+
+
+#-----------------------------------------------------------------------------
# Base Descriptor Class
-#-----------------------------------------------------------------------------
-
-
-class BaseDescriptor(object):
- """Base descriptor class
-
- Notes
- -----
- This implements Python's descriptor prototol.
-
- This class is the base class for all such descriptors. The
- only magic we use is a custom metaclass for the main :class:`HasTraits`
- class that does the following:
-
- 1. Sets the :attr:`name` attribute of every :class:`BaseDescriptor`
- instance in the class dict to the name of the attribute.
- 2. Sets the :attr:`this_class` attribute of every :class:`BaseDescriptor`
- instance in the class dict to the *class* that declared the trait.
- This is used by the :class:`This` trait to allow subclasses to
- accept superclasses for :class:`This` values.
- """
-
- name = None
- this_class = None
-
- def class_init(self, cls, name):
- """Part of the initialization which may depend on the underlying
- HasDescriptors class.
-
- It is typically overloaded for specific types.
-
- This method is called by :meth:`MetaHasDescriptors.__init__`
- passing the class (`cls`) and `name` under which the descriptor
- has been assigned.
- """
- self.this_class = cls
- self.name = name
-
- def instance_init(self, obj):
- """Part of the initialization which may depend on the underlying
- HasDescriptors instance.
-
- It is typically overloaded for specific types.
-
- This method is called by :meth:`HasTraits.__new__` and in the
- :meth:`BaseDescriptor.instance_init` method of descriptors holding
- other descriptors.
- """
- pass
-
-
-class TraitType(BaseDescriptor):
- """A base class for all trait types.
- """
-
- metadata = {}
- default_value = Undefined
- allow_none = False
- read_only = False
- info_text = 'any value'
-
+#-----------------------------------------------------------------------------
+
+
+class BaseDescriptor(object):
+ """Base descriptor class
+
+ Notes
+ -----
+ This implements Python's descriptor prototol.
+
+ This class is the base class for all such descriptors. The
+ only magic we use is a custom metaclass for the main :class:`HasTraits`
+ class that does the following:
+
+ 1. Sets the :attr:`name` attribute of every :class:`BaseDescriptor`
+ instance in the class dict to the name of the attribute.
+ 2. Sets the :attr:`this_class` attribute of every :class:`BaseDescriptor`
+ instance in the class dict to the *class* that declared the trait.
+ This is used by the :class:`This` trait to allow subclasses to
+ accept superclasses for :class:`This` values.
+ """
+
+ name = None
+ this_class = None
+
+ def class_init(self, cls, name):
+ """Part of the initialization which may depend on the underlying
+ HasDescriptors class.
+
+ It is typically overloaded for specific types.
+
+ This method is called by :meth:`MetaHasDescriptors.__init__`
+ passing the class (`cls`) and `name` under which the descriptor
+ has been assigned.
+ """
+ self.this_class = cls
+ self.name = name
+
+ def instance_init(self, obj):
+ """Part of the initialization which may depend on the underlying
+ HasDescriptors instance.
+
+ It is typically overloaded for specific types.
+
+ This method is called by :meth:`HasTraits.__new__` and in the
+ :meth:`BaseDescriptor.instance_init` method of descriptors holding
+ other descriptors.
+ """
+ pass
+
+
+class TraitType(BaseDescriptor):
+ """A base class for all trait types.
+ """
+
+ metadata = {}
+ default_value = Undefined
+ allow_none = False
+ read_only = False
+ info_text = 'any value'
+
def __init__(self, default_value=Undefined, allow_none=False, read_only=None, help=None,
config=None, **kwargs):
- """Declare a traitlet.
-
- If *allow_none* is True, None is a valid value in addition to any
- values that are normally valid. The default is up to the subclass.
- For most trait types, the default value for ``allow_none`` is False.
-
- Extra metadata can be associated with the traitlet using the .tag() convenience method
- or by using the traitlet instance's .metadata dictionary.
- """
- if default_value is not Undefined:
- self.default_value = default_value
+ """Declare a traitlet.
+
+ If *allow_none* is True, None is a valid value in addition to any
+ values that are normally valid. The default is up to the subclass.
+ For most trait types, the default value for ``allow_none`` is False.
+
+ Extra metadata can be associated with the traitlet using the .tag() convenience method
+ or by using the traitlet instance's .metadata dictionary.
+ """
+ if default_value is not Undefined:
+ self.default_value = default_value
if allow_none:
- self.allow_none = allow_none
- if read_only is not None:
- self.read_only = read_only
- self.help = help if help is not None else ''
-
+ self.allow_none = allow_none
+ if read_only is not None:
+ self.read_only = read_only
+ self.help = help if help is not None else ''
+
if len(kwargs) > 0:
- stacklevel = 1
- f = inspect.currentframe()
- # count supers to determine stacklevel for warning
- while f.f_code.co_name == '__init__':
- stacklevel += 1
- f = f.f_back
+ stacklevel = 1
+ f = inspect.currentframe()
+ # count supers to determine stacklevel for warning
+ while f.f_code.co_name == '__init__':
+ stacklevel += 1
+ f = f.f_back
mod = f.f_globals.get('__name__') or ''
pkg = mod.split('.', 1)[0]
key = tuple(['metadata-tag', pkg] + sorted(kwargs))
@@ -450,365 +450,365 @@ class TraitType(BaseDescriptor):
"With traitlets 4.1, metadata should be set using the .tag() method, "
"e.g., Int().tag(key1='value1', key2='value2')" % (kwargs,),
DeprecationWarning, stacklevel=stacklevel)
- if len(self.metadata) > 0:
- self.metadata = self.metadata.copy()
+ if len(self.metadata) > 0:
+ self.metadata = self.metadata.copy()
self.metadata.update(kwargs)
- else:
+ else:
self.metadata = kwargs
- else:
- self.metadata = self.metadata.copy()
+ else:
+ self.metadata = self.metadata.copy()
if config is not None:
self.metadata['config'] = config
-
- # We add help to the metadata during a deprecation period so that
- # code that looks for the help string there can find it.
- if help is not None:
- self.metadata['help'] = help
-
- def get_default_value(self):
- """DEPRECATED: Retrieve the static default value for this trait.
-
- Use self.default_value instead
- """
+
+ # We add help to the metadata during a deprecation period so that
+ # code that looks for the help string there can find it.
+ if help is not None:
+ self.metadata['help'] = help
+
+ def get_default_value(self):
+ """DEPRECATED: Retrieve the static default value for this trait.
+
+ Use self.default_value instead
+ """
warn("get_default_value is deprecated in traitlets 4.0: use the .default_value attribute", DeprecationWarning,
- stacklevel=2)
- return self.default_value
-
- def init_default_value(self, obj):
- """DEPRECATED: Set the static default value for the trait type.
- """
+ stacklevel=2)
+ return self.default_value
+
+ def init_default_value(self, obj):
+ """DEPRECATED: Set the static default value for the trait type.
+ """
warn("init_default_value is deprecated in traitlets 4.0, and may be removed in the future", DeprecationWarning,
- stacklevel=2)
- value = self._validate(obj, self.default_value)
- obj._trait_values[self.name] = value
- return value
-
- def _dynamic_default_callable(self, obj):
- """Retrieve a callable to calculate the default for this traitlet.
-
- This looks for:
-
+ stacklevel=2)
+ value = self._validate(obj, self.default_value)
+ obj._trait_values[self.name] = value
+ return value
+
+ def _dynamic_default_callable(self, obj):
+ """Retrieve a callable to calculate the default for this traitlet.
+
+ This looks for:
+
* default generators registered with the @default descriptor.
- * obj._{name}_default() on the class with the traitlet, or a subclass
- that obj belongs to.
- * trait.make_dynamic_default, which is defined by Instance
-
- If neither exist, it returns None
- """
- # Traitlets without a name are not on the instance, e.g. in List or Union
- if self.name:
-
- # Only look for default handlers in classes derived from self.this_class.
- mro = type(obj).mro()
- meth_name = '_%s_default' % self.name
- for cls in mro[:mro.index(self.this_class) + 1]:
- if hasattr(cls, '_trait_default_generators'):
- default_handler = cls._trait_default_generators.get(self.name)
- if default_handler is not None and default_handler.this_class == cls:
- return types.MethodType(default_handler.func, obj)
-
- if meth_name in cls.__dict__:
- method = getattr(obj, meth_name)
- return method
-
- return getattr(self, 'make_dynamic_default', None)
-
- def instance_init(self, obj):
- # If no dynamic initialiser is present, and the trait implementation or
- # use provides a static default, transfer that to obj._trait_values.
+ * obj._{name}_default() on the class with the traitlet, or a subclass
+ that obj belongs to.
+ * trait.make_dynamic_default, which is defined by Instance
+
+ If neither exist, it returns None
+ """
+ # Traitlets without a name are not on the instance, e.g. in List or Union
+ if self.name:
+
+ # Only look for default handlers in classes derived from self.this_class.
+ mro = type(obj).mro()
+ meth_name = '_%s_default' % self.name
+ for cls in mro[:mro.index(self.this_class) + 1]:
+ if hasattr(cls, '_trait_default_generators'):
+ default_handler = cls._trait_default_generators.get(self.name)
+ if default_handler is not None and default_handler.this_class == cls:
+ return types.MethodType(default_handler.func, obj)
+
+ if meth_name in cls.__dict__:
+ method = getattr(obj, meth_name)
+ return method
+
+ return getattr(self, 'make_dynamic_default', None)
+
+ def instance_init(self, obj):
+ # If no dynamic initialiser is present, and the trait implementation or
+ # use provides a static default, transfer that to obj._trait_values.
with obj.cross_validation_lock:
if (self._dynamic_default_callable(obj) is None) \
and (self.default_value is not Undefined):
v = self._validate(obj, self.default_value)
if self.name is not None:
obj._trait_values[self.name] = v
-
+
def get(self, obj, cls=None):
- try:
- value = obj._trait_values[self.name]
- except KeyError:
- # Check for a dynamic initializer.
- dynamic_default = self._dynamic_default_callable(obj)
- if dynamic_default is None:
- raise TraitError("No default value found for %s trait of %r"
- % (self.name, obj))
- value = self._validate(obj, dynamic_default())
- obj._trait_values[self.name] = value
- return value
- except Exception:
- # This should never be reached.
- raise TraitError('Unexpected error in TraitType: '
- 'default value not set properly')
- else:
- return value
-
- def __get__(self, obj, cls=None):
- """Get the value of the trait by self.name for the instance.
-
- Default values are instantiated when :meth:`HasTraits.__new__`
- is called. Thus by the time this method gets called either the
- default value or a user defined value (they called :meth:`__set__`)
- is in the :class:`HasTraits` instance.
- """
- if obj is None:
- return self
- else:
- return self.get(obj, cls)
-
- def set(self, obj, value):
- new_value = self._validate(obj, value)
- try:
- old_value = obj._trait_values[self.name]
- except KeyError:
- old_value = self.default_value
-
- obj._trait_values[self.name] = new_value
- try:
- silent = bool(old_value == new_value)
- except:
- # if there is an error in comparing, default to notify
- silent = False
- if silent is not True:
- # we explicitly compare silent to True just in case the equality
- # comparison above returns something other than True/False
- obj._notify_trait(self.name, old_value, new_value)
-
- def __set__(self, obj, value):
- """Set the value of the trait by self.name for the instance.
-
- Values pass through a validation stage where errors are raised when
- impropper types, or types that cannot be coerced, are encountered.
- """
- if self.read_only:
- raise TraitError('The "%s" trait is read-only.' % self.name)
- else:
- self.set(obj, value)
-
- def _validate(self, obj, value):
- if value is None and self.allow_none:
- return value
- if hasattr(self, 'validate'):
- value = self.validate(obj, value)
- if obj._cross_validation_lock is False:
- value = self._cross_validate(obj, value)
- return value
-
- def _cross_validate(self, obj, value):
- if self.name in obj._trait_validators:
+ try:
+ value = obj._trait_values[self.name]
+ except KeyError:
+ # Check for a dynamic initializer.
+ dynamic_default = self._dynamic_default_callable(obj)
+ if dynamic_default is None:
+ raise TraitError("No default value found for %s trait of %r"
+ % (self.name, obj))
+ value = self._validate(obj, dynamic_default())
+ obj._trait_values[self.name] = value
+ return value
+ except Exception:
+ # This should never be reached.
+ raise TraitError('Unexpected error in TraitType: '
+ 'default value not set properly')
+ else:
+ return value
+
+ def __get__(self, obj, cls=None):
+ """Get the value of the trait by self.name for the instance.
+
+ Default values are instantiated when :meth:`HasTraits.__new__`
+ is called. Thus by the time this method gets called either the
+ default value or a user defined value (they called :meth:`__set__`)
+ is in the :class:`HasTraits` instance.
+ """
+ if obj is None:
+ return self
+ else:
+ return self.get(obj, cls)
+
+ def set(self, obj, value):
+ new_value = self._validate(obj, value)
+ try:
+ old_value = obj._trait_values[self.name]
+ except KeyError:
+ old_value = self.default_value
+
+ obj._trait_values[self.name] = new_value
+ try:
+ silent = bool(old_value == new_value)
+ except:
+ # if there is an error in comparing, default to notify
+ silent = False
+ if silent is not True:
+ # we explicitly compare silent to True just in case the equality
+ # comparison above returns something other than True/False
+ obj._notify_trait(self.name, old_value, new_value)
+
+ def __set__(self, obj, value):
+ """Set the value of the trait by self.name for the instance.
+
+ Values pass through a validation stage where errors are raised when
+ impropper types, or types that cannot be coerced, are encountered.
+ """
+ if self.read_only:
+ raise TraitError('The "%s" trait is read-only.' % self.name)
+ else:
+ self.set(obj, value)
+
+ def _validate(self, obj, value):
+ if value is None and self.allow_none:
+ return value
+ if hasattr(self, 'validate'):
+ value = self.validate(obj, value)
+ if obj._cross_validation_lock is False:
+ value = self._cross_validate(obj, value)
+ return value
+
+ def _cross_validate(self, obj, value):
+ if self.name in obj._trait_validators:
proposal = Bunch({'trait': self, 'value': value, 'owner': obj})
- value = obj._trait_validators[self.name](obj, proposal)
- elif hasattr(obj, '_%s_validate' % self.name):
- meth_name = '_%s_validate' % self.name
- cross_validate = getattr(obj, meth_name)
- _deprecated_method(cross_validate, obj.__class__, meth_name,
- "use @validate decorator instead.")
- value = cross_validate(value, self)
- return value
-
- def __or__(self, other):
- if isinstance(other, Union):
- return Union([self] + other.trait_types)
- else:
- return Union([self, other])
-
- def info(self):
- return self.info_text
-
- def error(self, obj, value):
- if obj is not None:
- e = "The '%s' trait of %s instance must be %s, but a value of %s was specified." \
- % (self.name, class_of(obj),
- self.info(), repr_type(value))
- else:
- e = "The '%s' trait must be %s, but a value of %r was specified." \
- % (self.name, self.info(), repr_type(value))
- raise TraitError(e)
-
- def get_metadata(self, key, default=None):
- """DEPRECATED: Get a metadata value.
-
- Use .metadata[key] or .metadata.get(key, default) instead.
- """
- if key == 'help':
- msg = "use the instance .help string directly, like x.help"
- else:
- msg = "use the instance .metadata dictionary directly, like x.metadata[key] or x.metadata.get(key, default)"
+ value = obj._trait_validators[self.name](obj, proposal)
+ elif hasattr(obj, '_%s_validate' % self.name):
+ meth_name = '_%s_validate' % self.name
+ cross_validate = getattr(obj, meth_name)
+ _deprecated_method(cross_validate, obj.__class__, meth_name,
+ "use @validate decorator instead.")
+ value = cross_validate(value, self)
+ return value
+
+ def __or__(self, other):
+ if isinstance(other, Union):
+ return Union([self] + other.trait_types)
+ else:
+ return Union([self, other])
+
+ def info(self):
+ return self.info_text
+
+ def error(self, obj, value):
+ if obj is not None:
+ e = "The '%s' trait of %s instance must be %s, but a value of %s was specified." \
+ % (self.name, class_of(obj),
+ self.info(), repr_type(value))
+ else:
+ e = "The '%s' trait must be %s, but a value of %r was specified." \
+ % (self.name, self.info(), repr_type(value))
+ raise TraitError(e)
+
+ def get_metadata(self, key, default=None):
+ """DEPRECATED: Get a metadata value.
+
+ Use .metadata[key] or .metadata.get(key, default) instead.
+ """
+ if key == 'help':
+ msg = "use the instance .help string directly, like x.help"
+ else:
+ msg = "use the instance .metadata dictionary directly, like x.metadata[key] or x.metadata.get(key, default)"
warn("Deprecated in traitlets 4.1, " + msg, DeprecationWarning, stacklevel=2)
- return self.metadata.get(key, default)
-
- def set_metadata(self, key, value):
- """DEPRECATED: Set a metadata key/value.
-
- Use .metadata[key] = value instead.
- """
- if key == 'help':
- msg = "use the instance .help string directly, like x.help = value"
- else:
- msg = "use the instance .metadata dictionary directly, like x.metadata[key] = value"
+ return self.metadata.get(key, default)
+
+ def set_metadata(self, key, value):
+ """DEPRECATED: Set a metadata key/value.
+
+ Use .metadata[key] = value instead.
+ """
+ if key == 'help':
+ msg = "use the instance .help string directly, like x.help = value"
+ else:
+ msg = "use the instance .metadata dictionary directly, like x.metadata[key] = value"
warn("Deprecated in traitlets 4.1, " + msg, DeprecationWarning, stacklevel=2)
- self.metadata[key] = value
-
- def tag(self, **metadata):
- """Sets metadata and returns self.
-
- This allows convenient metadata tagging when initializing the trait, such as:
-
- >>> Int(0).tag(config=True, sync=True)
- """
+ self.metadata[key] = value
+
+ def tag(self, **metadata):
+ """Sets metadata and returns self.
+
+ This allows convenient metadata tagging when initializing the trait, such as:
+
+ >>> Int(0).tag(config=True, sync=True)
+ """
maybe_constructor_keywords = set(metadata.keys()).intersection({'help','allow_none', 'read_only', 'default_value'})
if maybe_constructor_keywords:
warn('The following attributes are set in using `tag`, but seem to be constructor keywords arguments: %s '%
maybe_constructor_keywords, UserWarning, stacklevel=2)
- self.metadata.update(metadata)
- return self
-
- def default_value_repr(self):
- return repr(self.default_value)
-
-#-----------------------------------------------------------------------------
-# The HasTraits implementation
-#-----------------------------------------------------------------------------
-
-class _CallbackWrapper(object):
- """An object adapting a on_trait_change callback into an observe callback.
-
- The comparison operator __eq__ is implemented to enable removal of wrapped
- callbacks.
- """
-
- def __init__(self, cb):
- self.cb = cb
- # Bound methods have an additional 'self' argument.
- offset = -1 if isinstance(self.cb, types.MethodType) else 0
- self.nargs = len(getargspec(cb)[0]) + offset
- if (self.nargs > 4):
- raise TraitError('a trait changed callback must have 0-4 arguments.')
-
- def __eq__(self, other):
- # The wrapper is equal to the wrapped element
- if isinstance(other, _CallbackWrapper):
- return self.cb == other.cb
- else:
- return self.cb == other
-
- def __call__(self, change):
- # The wrapper is callable
- if self.nargs == 0:
- self.cb()
- elif self.nargs == 1:
+ self.metadata.update(metadata)
+ return self
+
+ def default_value_repr(self):
+ return repr(self.default_value)
+
+#-----------------------------------------------------------------------------
+# The HasTraits implementation
+#-----------------------------------------------------------------------------
+
+class _CallbackWrapper(object):
+ """An object adapting a on_trait_change callback into an observe callback.
+
+ The comparison operator __eq__ is implemented to enable removal of wrapped
+ callbacks.
+ """
+
+ def __init__(self, cb):
+ self.cb = cb
+ # Bound methods have an additional 'self' argument.
+ offset = -1 if isinstance(self.cb, types.MethodType) else 0
+ self.nargs = len(getargspec(cb)[0]) + offset
+ if (self.nargs > 4):
+ raise TraitError('a trait changed callback must have 0-4 arguments.')
+
+ def __eq__(self, other):
+ # The wrapper is equal to the wrapped element
+ if isinstance(other, _CallbackWrapper):
+ return self.cb == other.cb
+ else:
+ return self.cb == other
+
+ def __call__(self, change):
+ # The wrapper is callable
+ if self.nargs == 0:
+ self.cb()
+ elif self.nargs == 1:
self.cb(change.name)
- elif self.nargs == 2:
+ elif self.nargs == 2:
self.cb(change.name, change.new)
- elif self.nargs == 3:
+ elif self.nargs == 3:
self.cb(change.name, change.old, change.new)
- elif self.nargs == 4:
+ elif self.nargs == 4:
self.cb(change.name, change.old, change.new, change.owner)
-
-def _callback_wrapper(cb):
- if isinstance(cb, _CallbackWrapper):
- return cb
- else:
- return _CallbackWrapper(cb)
-
-
-class MetaHasDescriptors(type):
- """A metaclass for HasDescriptors.
-
- This metaclass makes sure that any TraitType class attributes are
- instantiated and sets their name attribute.
- """
-
- def __new__(mcls, name, bases, classdict):
- """Create the HasDescriptors class."""
+
+def _callback_wrapper(cb):
+ if isinstance(cb, _CallbackWrapper):
+ return cb
+ else:
+ return _CallbackWrapper(cb)
+
+
+class MetaHasDescriptors(type):
+ """A metaclass for HasDescriptors.
+
+ This metaclass makes sure that any TraitType class attributes are
+ instantiated and sets their name attribute.
+ """
+
+ def __new__(mcls, name, bases, classdict):
+ """Create the HasDescriptors class."""
for k, v in classdict.items():
- # ----------------------------------------------------------------
- # Support of deprecated behavior allowing for TraitType types
- # to be used instead of TraitType instances.
- if inspect.isclass(v) and issubclass(v, TraitType):
+ # ----------------------------------------------------------------
+ # Support of deprecated behavior allowing for TraitType types
+ # to be used instead of TraitType instances.
+ if inspect.isclass(v) and issubclass(v, TraitType):
warn("Traits should be given as instances, not types (for example, `Int()`, not `Int`)."
" Passing types is deprecated in traitlets 4.1.",
- DeprecationWarning, stacklevel=2)
- classdict[k] = v()
- # ----------------------------------------------------------------
-
- return super(MetaHasDescriptors, mcls).__new__(mcls, name, bases, classdict)
-
- def __init__(cls, name, bases, classdict):
- """Finish initializing the HasDescriptors class."""
- super(MetaHasDescriptors, cls).__init__(name, bases, classdict)
- cls.setup_class(classdict)
-
- def setup_class(cls, classdict):
- """Setup descriptor instance on the class
-
- This sets the :attr:`this_class` and :attr:`name` attributes of each
- BaseDescriptor in the class dict of the newly created ``cls`` before
- calling their :attr:`class_init` method.
- """
+ DeprecationWarning, stacklevel=2)
+ classdict[k] = v()
+ # ----------------------------------------------------------------
+
+ return super(MetaHasDescriptors, mcls).__new__(mcls, name, bases, classdict)
+
+ def __init__(cls, name, bases, classdict):
+ """Finish initializing the HasDescriptors class."""
+ super(MetaHasDescriptors, cls).__init__(name, bases, classdict)
+ cls.setup_class(classdict)
+
+ def setup_class(cls, classdict):
+ """Setup descriptor instance on the class
+
+ This sets the :attr:`this_class` and :attr:`name` attributes of each
+ BaseDescriptor in the class dict of the newly created ``cls`` before
+ calling their :attr:`class_init` method.
+ """
for k, v in classdict.items():
- if isinstance(v, BaseDescriptor):
- v.class_init(cls, k)
-
-
-class MetaHasTraits(MetaHasDescriptors):
- """A metaclass for HasTraits."""
-
- def setup_class(cls, classdict):
- cls._trait_default_generators = {}
- super(MetaHasTraits, cls).setup_class(classdict)
-
-
-def observe(*names, **kwargs):
- """A decorator which can be used to observe Traits on a class.
-
+ if isinstance(v, BaseDescriptor):
+ v.class_init(cls, k)
+
+
+class MetaHasTraits(MetaHasDescriptors):
+ """A metaclass for HasTraits."""
+
+ def setup_class(cls, classdict):
+ cls._trait_default_generators = {}
+ super(MetaHasTraits, cls).setup_class(classdict)
+
+
+def observe(*names, **kwargs):
+ """A decorator which can be used to observe Traits on a class.
+
The handler passed to the decorator will be called with one ``change``
dict argument. The change dictionary at least holds a 'type' key and a
'name' key, corresponding respectively to the type of notification and the
name of the attribute that triggered the notification.
-
- Other keys may be passed depending on the value of 'type'. In the case
- where type is 'change', we also have the following keys:
- * ``owner`` : the HasTraits instance
- * ``old`` : the old value of the modified trait attribute
- * ``new`` : the new value of the modified trait attribute
- * ``name`` : the name of the modified trait attribute.
-
- Parameters
- ----------
- *names
- The str names of the Traits to observe on the object.
+
+ Other keys may be passed depending on the value of 'type'. In the case
+ where type is 'change', we also have the following keys:
+ * ``owner`` : the HasTraits instance
+ * ``old`` : the old value of the modified trait attribute
+ * ``new`` : the new value of the modified trait attribute
+ * ``name`` : the name of the modified trait attribute.
+
+ Parameters
+ ----------
+ *names
+ The str names of the Traits to observe on the object.
type: str, kwarg-only
The type of event to observe (e.g. 'change')
- """
+ """
if not names:
raise TypeError("Please specify at least one trait name to observe.")
for name in names:
if name is not All and not isinstance(name, six.string_types):
raise TypeError("trait names to observe must be strings or All, not %r" % name)
- return ObserveHandler(names, type=kwargs.get('type', 'change'))
-
-
-def observe_compat(func):
- """Backward-compatibility shim decorator for observers
-
- Use with:
-
- @observe('name')
- @observe_compat
- def _foo_changed(self, change):
- ...
-
- With this, `super()._foo_changed(self, name, old, new)` in subclasses will still work.
- Allows adoption of new observer API without breaking subclasses that override and super.
- """
- def compatible_observer(self, change_or_name, old=Undefined, new=Undefined):
- if isinstance(change_or_name, dict):
- change = change_or_name
- else:
- clsname = self.__class__.__name__
+ return ObserveHandler(names, type=kwargs.get('type', 'change'))
+
+
+def observe_compat(func):
+ """Backward-compatibility shim decorator for observers
+
+ Use with:
+
+ @observe('name')
+ @observe_compat
+ def _foo_changed(self, change):
+ ...
+
+ With this, `super()._foo_changed(self, name, old, new)` in subclasses will still work.
+ Allows adoption of new observer API without breaking subclasses that override and super.
+ """
+ def compatible_observer(self, change_or_name, old=Undefined, new=Undefined):
+ if isinstance(change_or_name, dict):
+ change = change_or_name
+ else:
+ clsname = self.__class__.__name__
warn("A parent of %s._%s_changed has adopted the new (traitlets 4.1) @observe(change) API" % (
- clsname, change_or_name), DeprecationWarning)
+ clsname, change_or_name), DeprecationWarning)
change = Bunch(
type='change',
old=old,
@@ -816,182 +816,182 @@ def observe_compat(func):
name=change_or_name,
owner=self,
)
- return func(self, change)
- return compatible_observer
-
-
-def validate(*names):
- """A decorator to register cross validator of HasTraits object's state
- when a Trait is set.
-
- The handler passed to the decorator must have one ``proposal`` dict argument.
- The proposal dictionary must hold the following keys:
- * ``owner`` : the HasTraits instance
- * ``value`` : the proposed value for the modified trait attribute
- * ``trait`` : the TraitType instance associated with the attribute
-
- Parameters
- ----------
- names
- The str names of the Traits to validate.
-
- Notes
- -----
+ return func(self, change)
+ return compatible_observer
+
+
+def validate(*names):
+ """A decorator to register cross validator of HasTraits object's state
+ when a Trait is set.
+
+ The handler passed to the decorator must have one ``proposal`` dict argument.
+ The proposal dictionary must hold the following keys:
+ * ``owner`` : the HasTraits instance
+ * ``value`` : the proposed value for the modified trait attribute
+ * ``trait`` : the TraitType instance associated with the attribute
+
+ Parameters
+ ----------
+ names
+ The str names of the Traits to validate.
+
+ Notes
+ -----
Since the owner has access to the ``HasTraits`` instance via the 'owner' key,
- the registered cross validator could potentially make changes to attributes
- of the ``HasTraits`` instance. However, we recommend not to do so. The reason
- is that the cross-validation of attributes may run in arbitrary order when
+ the registered cross validator could potentially make changes to attributes
+ of the ``HasTraits`` instance. However, we recommend not to do so. The reason
+ is that the cross-validation of attributes may run in arbitrary order when
exiting the ``hold_trait_notifications`` context, and such changes may not
- commute.
- """
+ commute.
+ """
if not names:
raise TypeError("Please specify at least one trait name to validate.")
for name in names:
if name is not All and not isinstance(name, six.string_types):
raise TypeError("trait names to validate must be strings or All, not %r" % name)
- return ValidateHandler(names)
-
-
-def default(name):
- """ A decorator which assigns a dynamic default for a Trait on a HasTraits object.
-
- Parameters
- ----------
- name
- The str name of the Trait on the object whose default should be generated.
-
- Notes
- -----
- Unlike observers and validators which are properties of the HasTraits
- instance, default value generators are class-level properties.
-
- Besides, default generators are only invoked if they are registered in
- subclasses of `this_type`.
-
- ::
-
- class A(HasTraits):
- bar = Int()
-
- @default('bar')
- def get_bar_default(self):
- return 11
-
-
- class B(A):
- bar = Float() # This trait ignores the default generator defined in
- # the base class A
-
-
- class C(B):
-
- @default('bar')
- def some_other_default(self): # This default generator should not be
- return 3.0 # ignored since it is defined in a
- # class derived from B.a.this_class.
- """
+ return ValidateHandler(names)
+
+
+def default(name):
+ """ A decorator which assigns a dynamic default for a Trait on a HasTraits object.
+
+ Parameters
+ ----------
+ name
+ The str name of the Trait on the object whose default should be generated.
+
+ Notes
+ -----
+ Unlike observers and validators which are properties of the HasTraits
+ instance, default value generators are class-level properties.
+
+ Besides, default generators are only invoked if they are registered in
+ subclasses of `this_type`.
+
+ ::
+
+ class A(HasTraits):
+ bar = Int()
+
+ @default('bar')
+ def get_bar_default(self):
+ return 11
+
+
+ class B(A):
+ bar = Float() # This trait ignores the default generator defined in
+ # the base class A
+
+
+ class C(B):
+
+ @default('bar')
+ def some_other_default(self): # This default generator should not be
+ return 3.0 # ignored since it is defined in a
+ # class derived from B.a.this_class.
+ """
if not isinstance(name, six.string_types):
raise TypeError("Trait name must be a string or All, not %r" % name)
- return DefaultHandler(name)
-
-
-class EventHandler(BaseDescriptor):
-
- def _init_call(self, func):
- self.func = func
- return self
-
- def __call__(self, *args, **kwargs):
+ return DefaultHandler(name)
+
+
+class EventHandler(BaseDescriptor):
+
+ def _init_call(self, func):
+ self.func = func
+ return self
+
+ def __call__(self, *args, **kwargs):
"""Pass `*args` and `**kwargs` to the handler's function if it exists."""
- if hasattr(self, 'func'):
- return self.func(*args, **kwargs)
- else:
- return self._init_call(*args, **kwargs)
-
- def __get__(self, inst, cls=None):
- if inst is None:
- return self
- return types.MethodType(self.func, inst)
-
-
-class ObserveHandler(EventHandler):
-
- def __init__(self, names, type):
- self.trait_names = names
- self.type = type
-
- def instance_init(self, inst):
- inst.observe(self, self.trait_names, type=self.type)
-
-
-class ValidateHandler(EventHandler):
-
- def __init__(self, names):
- self.trait_names = names
-
- def instance_init(self, inst):
- inst._register_validator(self, self.trait_names)
-
-
-class DefaultHandler(EventHandler):
-
- def __init__(self, name):
- self.trait_name = name
-
- def class_init(self, cls, name):
- super(DefaultHandler, self).class_init(cls, name)
- cls._trait_default_generators[self.trait_name] = self
-
-
+ if hasattr(self, 'func'):
+ return self.func(*args, **kwargs)
+ else:
+ return self._init_call(*args, **kwargs)
+
+ def __get__(self, inst, cls=None):
+ if inst is None:
+ return self
+ return types.MethodType(self.func, inst)
+
+
+class ObserveHandler(EventHandler):
+
+ def __init__(self, names, type):
+ self.trait_names = names
+ self.type = type
+
+ def instance_init(self, inst):
+ inst.observe(self, self.trait_names, type=self.type)
+
+
+class ValidateHandler(EventHandler):
+
+ def __init__(self, names):
+ self.trait_names = names
+
+ def instance_init(self, inst):
+ inst._register_validator(self, self.trait_names)
+
+
+class DefaultHandler(EventHandler):
+
+ def __init__(self, name):
+ self.trait_name = name
+
+ def class_init(self, cls, name):
+ super(DefaultHandler, self).class_init(cls, name)
+ cls._trait_default_generators[self.trait_name] = self
+
+
class HasDescriptors(six.with_metaclass(MetaHasDescriptors, object)):
- """The base class for all classes that have descriptors.
- """
-
+ """The base class for all classes that have descriptors.
+ """
+
def __new__(cls, *args, **kwargs):
- # This is needed because object.__new__ only accepts
- # the cls argument.
- new_meth = super(HasDescriptors, cls).__new__
- if new_meth is object.__new__:
- inst = new_meth(cls)
- else:
+ # This is needed because object.__new__ only accepts
+ # the cls argument.
+ new_meth = super(HasDescriptors, cls).__new__
+ if new_meth is object.__new__:
+ inst = new_meth(cls)
+ else:
inst = new_meth(cls, *args, **kwargs)
inst.setup_instance(*args, **kwargs)
- return inst
-
+ return inst
+
def setup_instance(self, *args, **kwargs):
"""
This is called **before** self.__init__ is called.
"""
self._cross_validation_lock = False
- cls = self.__class__
- for key in dir(cls):
- # Some descriptors raise AttributeError like zope.interface's
- # __provides__ attributes even though they exist. This causes
- # AttributeErrors even though they are listed in dir(cls).
- try:
- value = getattr(cls, key)
- except AttributeError:
- pass
- else:
- if isinstance(value, BaseDescriptor):
- value.instance_init(self)
-
-
+ cls = self.__class__
+ for key in dir(cls):
+ # Some descriptors raise AttributeError like zope.interface's
+ # __provides__ attributes even though they exist. This causes
+ # AttributeErrors even though they are listed in dir(cls).
+ try:
+ value = getattr(cls, key)
+ except AttributeError:
+ pass
+ else:
+ if isinstance(value, BaseDescriptor):
+ value.instance_init(self)
+
+
class HasTraits(six.with_metaclass(MetaHasTraits, HasDescriptors)):
-
+
def setup_instance(self, *args, **kwargs):
- self._trait_values = {}
- self._trait_notifiers = {}
- self._trait_validators = {}
+ self._trait_values = {}
+ self._trait_notifiers = {}
+ self._trait_validators = {}
super(HasTraits, self).setup_instance(*args, **kwargs)
-
+
def __init__(self, *args, **kwargs):
- # Allow trait values to be set using keyword arguments.
- # We need to use setattr for this to trigger validation and
- # notifications.
+ # Allow trait values to be set using keyword arguments.
+ # We need to use setattr for this to trigger validation and
+ # notifications.
super_args = args
super_kwargs = {}
- with self.hold_trait_notifications():
+ with self.hold_trait_notifications():
for key, value in kwargs.items():
if self.has_trait(key):
setattr(self, key, value)
@@ -1017,35 +1017,35 @@ class HasTraits(six.with_metaclass(MetaHasTraits, HasDescriptors)):
DeprecationWarning,
stacklevel=2,
)
-
- def __getstate__(self):
- d = self.__dict__.copy()
- # event handlers stored on an instance are
- # expected to be reinstantiated during a
- # recall of instance_init during __setstate__
- d['_trait_notifiers'] = {}
- d['_trait_validators'] = {}
- return d
-
- def __setstate__(self, state):
- self.__dict__ = state.copy()
-
- # event handlers are reassigned to self
- cls = self.__class__
- for key in dir(cls):
- # Some descriptors raise AttributeError like zope.interface's
- # __provides__ attributes even though they exist. This causes
- # AttributeErrors even though they are listed in dir(cls).
- try:
- value = getattr(cls, key)
- except AttributeError:
- pass
- else:
- if isinstance(value, EventHandler):
- value.instance_init(self)
-
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ # event handlers stored on an instance are
+ # expected to be reinstantiated during a
+ # recall of instance_init during __setstate__
+ d['_trait_notifiers'] = {}
+ d['_trait_validators'] = {}
+ return d
+
+ def __setstate__(self, state):
+ self.__dict__ = state.copy()
+
+ # event handlers are reassigned to self
+ cls = self.__class__
+ for key in dir(cls):
+ # Some descriptors raise AttributeError like zope.interface's
+ # __provides__ attributes even though they exist. This causes
+ # AttributeErrors even though they are listed in dir(cls).
+ try:
+ value = getattr(cls, key)
+ except AttributeError:
+ pass
+ else:
+ if isinstance(value, EventHandler):
+ value.instance_init(self)
+
@property
- @contextlib.contextmanager
+ @contextlib.contextmanager
def cross_validation_lock(self):
"""
A contextmanager for running a block with our cross validation lock set
@@ -1065,72 +1065,72 @@ class HasTraits(six.with_metaclass(MetaHasTraits, HasDescriptors)):
self._cross_validation_lock = False
@contextlib.contextmanager
- def hold_trait_notifications(self):
- """Context manager for bundling trait change notifications and cross
- validation.
-
- Use this when doing multiple trait assignments (init, config), to avoid
- race conditions in trait notifiers requesting other trait values.
- All trait notifications will fire after all values have been assigned.
- """
+ def hold_trait_notifications(self):
+ """Context manager for bundling trait change notifications and cross
+ validation.
+
+ Use this when doing multiple trait assignments (init, config), to avoid
+ race conditions in trait notifiers requesting other trait values.
+ All trait notifications will fire after all values have been assigned.
+ """
if self._cross_validation_lock:
- yield
- return
- else:
- cache = {}
- notify_change = self.notify_change
-
- def compress(past_changes, change):
- """Merges the provided change with the last if possible."""
- if past_changes is None:
- return [change]
- else:
+ yield
+ return
+ else:
+ cache = {}
+ notify_change = self.notify_change
+
+ def compress(past_changes, change):
+ """Merges the provided change with the last if possible."""
+ if past_changes is None:
+ return [change]
+ else:
if past_changes[-1]['type'] == 'change' and change.type == 'change':
past_changes[-1]['new'] = change.new
- else:
- # In case of changes other than 'change', append the notification.
- past_changes.append(change)
- return past_changes
-
- def hold(change):
+ else:
+ # In case of changes other than 'change', append the notification.
+ past_changes.append(change)
+ return past_changes
+
+ def hold(change):
name = change.name
- cache[name] = compress(cache.get(name), change)
-
- try:
- # Replace notify_change with `hold`, caching and compressing
- # notifications, disable cross validation and yield.
- self.notify_change = hold
- self._cross_validation_lock = True
- yield
- # Cross validate final values when context is released.
- for name in list(cache.keys()):
- trait = getattr(self.__class__, name)
- value = trait._cross_validate(self, getattr(self, name))
+ cache[name] = compress(cache.get(name), change)
+
+ try:
+ # Replace notify_change with `hold`, caching and compressing
+ # notifications, disable cross validation and yield.
+ self.notify_change = hold
+ self._cross_validation_lock = True
+ yield
+ # Cross validate final values when context is released.
+ for name in list(cache.keys()):
+ trait = getattr(self.__class__, name)
+ value = trait._cross_validate(self, getattr(self, name))
self.set_trait(name, value)
- except TraitError as e:
- # Roll back in case of TraitError during final cross validation.
- self.notify_change = lambda x: None
- for name, changes in cache.items():
- for change in changes[::-1]:
- # TODO: Separate in a rollback function per notification type.
+ except TraitError as e:
+ # Roll back in case of TraitError during final cross validation.
+ self.notify_change = lambda x: None
+ for name, changes in cache.items():
+ for change in changes[::-1]:
+ # TODO: Separate in a rollback function per notification type.
if change.type == 'change':
if change.old is not Undefined:
self.set_trait(name, change.old)
- else:
- self._trait_values.pop(name)
- cache = {}
- raise e
- finally:
- self._cross_validation_lock = False
+ else:
+ self._trait_values.pop(name)
+ cache = {}
+ raise e
+ finally:
+ self._cross_validation_lock = False
# Restore method retrieval from class
del self.notify_change
-
- # trigger delayed notifications
- for changes in cache.values():
- for change in changes:
- self.notify_change(change)
-
- def _notify_trait(self, name, old_value, new_value):
+
+ # trigger delayed notifications
+ for changes in cache.values():
+ for change in changes:
+ self.notify_change(change)
+
+ def _notify_trait(self, name, old_value, new_value):
self.notify_change(Bunch(
name=name,
old=old_value,
@@ -1138,194 +1138,194 @@ class HasTraits(six.with_metaclass(MetaHasTraits, HasDescriptors)):
owner=self,
type='change',
))
-
- def notify_change(self, change):
+
+ def notify_change(self, change):
if not isinstance(change, Bunch):
# cast to bunch if given a dict
change = Bunch(change)
name, type = change.name, change.type
-
- callables = []
- callables.extend(self._trait_notifiers.get(name, {}).get(type, []))
- callables.extend(self._trait_notifiers.get(name, {}).get(All, []))
- callables.extend(self._trait_notifiers.get(All, {}).get(type, []))
- callables.extend(self._trait_notifiers.get(All, {}).get(All, []))
-
- # Now static ones
- magic_name = '_%s_changed' % name
- if hasattr(self, magic_name):
- class_value = getattr(self.__class__, magic_name)
- if not isinstance(class_value, ObserveHandler):
- _deprecated_method(class_value, self.__class__, magic_name,
- "use @observe and @unobserve instead.")
- cb = getattr(self, magic_name)
- # Only append the magic method if it was not manually registered
- if cb not in callables:
- callables.append(_callback_wrapper(cb))
-
- # Call them all now
- # Traits catches and logs errors here. I allow them to raise
- for c in callables:
- # Bound methods have an additional 'self' argument.
-
- if isinstance(c, _CallbackWrapper):
- c = c.__call__
+
+ callables = []
+ callables.extend(self._trait_notifiers.get(name, {}).get(type, []))
+ callables.extend(self._trait_notifiers.get(name, {}).get(All, []))
+ callables.extend(self._trait_notifiers.get(All, {}).get(type, []))
+ callables.extend(self._trait_notifiers.get(All, {}).get(All, []))
+
+ # Now static ones
+ magic_name = '_%s_changed' % name
+ if hasattr(self, magic_name):
+ class_value = getattr(self.__class__, magic_name)
+ if not isinstance(class_value, ObserveHandler):
+ _deprecated_method(class_value, self.__class__, magic_name,
+ "use @observe and @unobserve instead.")
+ cb = getattr(self, magic_name)
+ # Only append the magic method if it was not manually registered
+ if cb not in callables:
+ callables.append(_callback_wrapper(cb))
+
+ # Call them all now
+ # Traits catches and logs errors here. I allow them to raise
+ for c in callables:
+ # Bound methods have an additional 'self' argument.
+
+ if isinstance(c, _CallbackWrapper):
+ c = c.__call__
elif isinstance(c, EventHandler) and c.name is not None:
- c = getattr(self, c.name)
-
- c(change)
-
- def _add_notifiers(self, handler, name, type):
- if name not in self._trait_notifiers:
- nlist = []
- self._trait_notifiers[name] = {type: nlist}
- else:
- if type not in self._trait_notifiers[name]:
- nlist = []
- self._trait_notifiers[name][type] = nlist
- else:
- nlist = self._trait_notifiers[name][type]
- if handler not in nlist:
- nlist.append(handler)
-
- def _remove_notifiers(self, handler, name, type):
- try:
- if handler is None:
- del self._trait_notifiers[name][type]
- else:
- self._trait_notifiers[name][type].remove(handler)
- except KeyError:
- pass
-
- def on_trait_change(self, handler=None, name=None, remove=False):
- """DEPRECATED: Setup a handler to be called when a trait changes.
-
- This is used to setup dynamic notifications of trait changes.
-
- Static handlers can be created by creating methods on a HasTraits
- subclass with the naming convention '_[traitname]_changed'. Thus,
- to create static handler for the trait 'a', create the method
- _a_changed(self, name, old, new) (fewer arguments can be used, see
- below).
-
- If `remove` is True and `handler` is not specified, all change
- handlers for the specified name are uninstalled.
-
- Parameters
- ----------
- handler : callable, None
- A callable that is called when a trait changes. Its
- signature can be handler(), handler(name), handler(name, new),
- handler(name, old, new), or handler(name, old, new, self).
- name : list, str, None
- If None, the handler will apply to all traits. If a list
- of str, handler will apply to all names in the list. If a
- str, the handler will apply just to that name.
- remove : bool
- If False (the default), then install the handler. If True
- then unintall it.
- """
+ c = getattr(self, c.name)
+
+ c(change)
+
+ def _add_notifiers(self, handler, name, type):
+ if name not in self._trait_notifiers:
+ nlist = []
+ self._trait_notifiers[name] = {type: nlist}
+ else:
+ if type not in self._trait_notifiers[name]:
+ nlist = []
+ self._trait_notifiers[name][type] = nlist
+ else:
+ nlist = self._trait_notifiers[name][type]
+ if handler not in nlist:
+ nlist.append(handler)
+
+ def _remove_notifiers(self, handler, name, type):
+ try:
+ if handler is None:
+ del self._trait_notifiers[name][type]
+ else:
+ self._trait_notifiers[name][type].remove(handler)
+ except KeyError:
+ pass
+
+ def on_trait_change(self, handler=None, name=None, remove=False):
+ """DEPRECATED: Setup a handler to be called when a trait changes.
+
+ This is used to setup dynamic notifications of trait changes.
+
+ Static handlers can be created by creating methods on a HasTraits
+ subclass with the naming convention '_[traitname]_changed'. Thus,
+ to create static handler for the trait 'a', create the method
+ _a_changed(self, name, old, new) (fewer arguments can be used, see
+ below).
+
+ If `remove` is True and `handler` is not specified, all change
+ handlers for the specified name are uninstalled.
+
+ Parameters
+ ----------
+ handler : callable, None
+ A callable that is called when a trait changes. Its
+ signature can be handler(), handler(name), handler(name, new),
+ handler(name, old, new), or handler(name, old, new, self).
+ name : list, str, None
+ If None, the handler will apply to all traits. If a list
+ of str, handler will apply to all names in the list. If a
+ str, the handler will apply just to that name.
+ remove : bool
+ If False (the default), then install the handler. If True
+ then unintall it.
+ """
warn("on_trait_change is deprecated in traitlets 4.1: use observe instead",
- DeprecationWarning, stacklevel=2)
- if name is None:
- name = All
- if remove:
- self.unobserve(_callback_wrapper(handler), names=name)
- else:
- self.observe(_callback_wrapper(handler), names=name)
-
- def observe(self, handler, names=All, type='change'):
- """Setup a handler to be called when a trait changes.
-
- This is used to setup dynamic notifications of trait changes.
-
- Parameters
- ----------
- handler : callable
- A callable that is called when a trait changes. Its
+ DeprecationWarning, stacklevel=2)
+ if name is None:
+ name = All
+ if remove:
+ self.unobserve(_callback_wrapper(handler), names=name)
+ else:
+ self.observe(_callback_wrapper(handler), names=name)
+
+ def observe(self, handler, names=All, type='change'):
+ """Setup a handler to be called when a trait changes.
+
+ This is used to setup dynamic notifications of trait changes.
+
+ Parameters
+ ----------
+ handler : callable
+ A callable that is called when a trait changes. Its
signature should be ``handler(change)``, where ``change`` is a
dictionary. The change dictionary at least holds a 'type' key.
- * ``type``: the type of notification.
- Other keys may be passed depending on the value of 'type'. In the
- case where type is 'change', we also have the following keys:
- * ``owner`` : the HasTraits instance
- * ``old`` : the old value of the modified trait attribute
- * ``new`` : the new value of the modified trait attribute
- * ``name`` : the name of the modified trait attribute.
- names : list, str, All
- If names is All, the handler will apply to all traits. If a list
- of str, handler will apply to all names in the list. If a
- str, the handler will apply just to that name.
- type : str, All (default: 'change')
- The type of notification to filter by. If equal to All, then all
- notifications are passed to the observe handler.
- """
- names = parse_notifier_name(names)
- for n in names:
- self._add_notifiers(handler, n, type)
-
- def unobserve(self, handler, names=All, type='change'):
- """Remove a trait change handler.
-
+ * ``type``: the type of notification.
+ Other keys may be passed depending on the value of 'type'. In the
+ case where type is 'change', we also have the following keys:
+ * ``owner`` : the HasTraits instance
+ * ``old`` : the old value of the modified trait attribute
+ * ``new`` : the new value of the modified trait attribute
+ * ``name`` : the name of the modified trait attribute.
+ names : list, str, All
+ If names is All, the handler will apply to all traits. If a list
+ of str, handler will apply to all names in the list. If a
+ str, the handler will apply just to that name.
+ type : str, All (default: 'change')
+ The type of notification to filter by. If equal to All, then all
+ notifications are passed to the observe handler.
+ """
+ names = parse_notifier_name(names)
+ for n in names:
+ self._add_notifiers(handler, n, type)
+
+ def unobserve(self, handler, names=All, type='change'):
+ """Remove a trait change handler.
+
This is used to unregister handlers to trait change notifications.
-
- Parameters
- ----------
- handler : callable
- The callable called when a trait attribute changes.
- names : list, str, All (default: All)
- The names of the traits for which the specified handler should be
- uninstalled. If names is All, the specified handler is uninstalled
- from the list of notifiers corresponding to all changes.
- type : str or All (default: 'change')
- The type of notification to filter by. If All, the specified handler
- is uninstalled from the list of notifiers corresponding to all types.
- """
- names = parse_notifier_name(names)
- for n in names:
- self._remove_notifiers(handler, n, type)
-
- def unobserve_all(self, name=All):
- """Remove trait change handlers of any type for the specified name.
- If name is not specified, removes all trait notifiers."""
- if name is All:
- self._trait_notifiers = {}
- else:
- try:
- del self._trait_notifiers[name]
- except KeyError:
- pass
-
- def _register_validator(self, handler, names):
+
+ Parameters
+ ----------
+ handler : callable
+ The callable called when a trait attribute changes.
+ names : list, str, All (default: All)
+ The names of the traits for which the specified handler should be
+ uninstalled. If names is All, the specified handler is uninstalled
+ from the list of notifiers corresponding to all changes.
+ type : str or All (default: 'change')
+ The type of notification to filter by. If All, the specified handler
+ is uninstalled from the list of notifiers corresponding to all types.
+ """
+ names = parse_notifier_name(names)
+ for n in names:
+ self._remove_notifiers(handler, n, type)
+
+ def unobserve_all(self, name=All):
+ """Remove trait change handlers of any type for the specified name.
+ If name is not specified, removes all trait notifiers."""
+ if name is All:
+ self._trait_notifiers = {}
+ else:
+ try:
+ del self._trait_notifiers[name]
+ except KeyError:
+ pass
+
+ def _register_validator(self, handler, names):
"""Setup a handler to be called when a trait should be cross validated.
-
- This is used to setup dynamic notifications for cross-validation.
-
- If a validator is already registered for any of the provided names, a
+
+ This is used to setup dynamic notifications for cross-validation.
+
+ If a validator is already registered for any of the provided names, a
TraitError is raised and no new validator is registered.
-
- Parameters
- ----------
- handler : callable
- A callable that is called when the given trait is cross-validated.
+
+ Parameters
+ ----------
+ handler : callable
+ A callable that is called when the given trait is cross-validated.
Its signature is handler(proposal), where proposal is a Bunch (dictionary with attribute access)
with the following attributes/keys:
- * ``owner`` : the HasTraits instance
- * ``value`` : the proposed value for the modified trait attribute
- * ``trait`` : the TraitType instance associated with the attribute
- names : List of strings
- The names of the traits that should be cross-validated
- """
- for name in names:
- magic_name = '_%s_validate' % name
- if hasattr(self, magic_name):
- class_value = getattr(self.__class__, magic_name)
- if not isinstance(class_value, ValidateHandler):
- _deprecated_method(class_value, self.__class, magic_name,
- "use @validate decorator instead.")
- for name in names:
- self._trait_validators[name] = handler
-
+ * ``owner`` : the HasTraits instance
+ * ``value`` : the proposed value for the modified trait attribute
+ * ``trait`` : the TraitType instance associated with the attribute
+ names : List of strings
+ The names of the traits that should be cross-validated
+ """
+ for name in names:
+ magic_name = '_%s_validate' % name
+ if hasattr(self, magic_name):
+ class_value = getattr(self.__class__, magic_name)
+ if not isinstance(class_value, ValidateHandler):
+ _deprecated_method(class_value, self.__class, magic_name,
+ "use @validate decorator instead.")
+ for name in names:
+ self._trait_validators[name] = handler
+
def add_traits(self, **traits):
"""Dynamically add trait attributes to the HasTraits instance."""
self.__class__ = type(self.__class__.__name__, (self.__class__,),
@@ -1342,123 +1342,123 @@ class HasTraits(six.with_metaclass(MetaHasTraits, HasDescriptors)):
else:
getattr(cls, name).set(self, value)
- @classmethod
- def class_trait_names(cls, **metadata):
- """Get a list of all the names of this class' traits.
-
- This method is just like the :meth:`trait_names` method,
- but is unbound.
- """
+ @classmethod
+ def class_trait_names(cls, **metadata):
+ """Get a list of all the names of this class' traits.
+
+ This method is just like the :meth:`trait_names` method,
+ but is unbound.
+ """
return list(cls.class_traits(**metadata))
-
- @classmethod
- def class_traits(cls, **metadata):
- """Get a ``dict`` of all the traits of this class. The dictionary
- is keyed on the name and the values are the TraitType objects.
-
- This method is just like the :meth:`traits` method, but is unbound.
-
- The TraitTypes returned don't know anything about the values
- that the various HasTrait's instances are holding.
-
- The metadata kwargs allow functions to be passed in which
- filter traits based on metadata values. The functions should
- take a single value as an argument and return a boolean. If
- any function returns False, then the trait is not included in
- the output. If a metadata key doesn't exist, None will be passed
- to the function.
- """
- traits = dict([memb for memb in getmembers(cls) if
- isinstance(memb[1], TraitType)])
-
- if len(metadata) == 0:
- return traits
-
- result = {}
- for name, trait in traits.items():
- for meta_name, meta_eval in metadata.items():
- if type(meta_eval) is not types.FunctionType:
- meta_eval = _SimpleTest(meta_eval)
- if not meta_eval(trait.metadata.get(meta_name, None)):
- break
- else:
- result[name] = trait
-
- return result
-
- @classmethod
- def class_own_traits(cls, **metadata):
- """Get a dict of all the traitlets defined on this class, not a parent.
-
- Works like `class_traits`, except for excluding traits from parents.
- """
- sup = super(cls, cls)
- return {n: t for (n, t) in cls.class_traits(**metadata).items()
- if getattr(sup, n, None) is not t}
-
- def has_trait(self, name):
- """Returns True if the object has a trait with the specified name."""
- return isinstance(getattr(self.__class__, name, None), TraitType)
-
- def trait_names(self, **metadata):
- """Get a list of all the names of this class' traits."""
+
+ @classmethod
+ def class_traits(cls, **metadata):
+ """Get a ``dict`` of all the traits of this class. The dictionary
+ is keyed on the name and the values are the TraitType objects.
+
+ This method is just like the :meth:`traits` method, but is unbound.
+
+ The TraitTypes returned don't know anything about the values
+ that the various HasTrait's instances are holding.
+
+ The metadata kwargs allow functions to be passed in which
+ filter traits based on metadata values. The functions should
+ take a single value as an argument and return a boolean. If
+ any function returns False, then the trait is not included in
+ the output. If a metadata key doesn't exist, None will be passed
+ to the function.
+ """
+ traits = dict([memb for memb in getmembers(cls) if
+ isinstance(memb[1], TraitType)])
+
+ if len(metadata) == 0:
+ return traits
+
+ result = {}
+ for name, trait in traits.items():
+ for meta_name, meta_eval in metadata.items():
+ if type(meta_eval) is not types.FunctionType:
+ meta_eval = _SimpleTest(meta_eval)
+ if not meta_eval(trait.metadata.get(meta_name, None)):
+ break
+ else:
+ result[name] = trait
+
+ return result
+
+ @classmethod
+ def class_own_traits(cls, **metadata):
+ """Get a dict of all the traitlets defined on this class, not a parent.
+
+ Works like `class_traits`, except for excluding traits from parents.
+ """
+ sup = super(cls, cls)
+ return {n: t for (n, t) in cls.class_traits(**metadata).items()
+ if getattr(sup, n, None) is not t}
+
+ def has_trait(self, name):
+ """Returns True if the object has a trait with the specified name."""
+ return isinstance(getattr(self.__class__, name, None), TraitType)
+
+ def trait_names(self, **metadata):
+ """Get a list of all the names of this class' traits."""
return list(self.traits(**metadata))
-
- def traits(self, **metadata):
- """Get a ``dict`` of all the traits of this class. The dictionary
- is keyed on the name and the values are the TraitType objects.
-
- The TraitTypes returned don't know anything about the values
- that the various HasTrait's instances are holding.
-
- The metadata kwargs allow functions to be passed in which
- filter traits based on metadata values. The functions should
- take a single value as an argument and return a boolean. If
- any function returns False, then the trait is not included in
- the output. If a metadata key doesn't exist, None will be passed
- to the function.
- """
- traits = dict([memb for memb in getmembers(self.__class__) if
- isinstance(memb[1], TraitType)])
-
- if len(metadata) == 0:
- return traits
-
- result = {}
- for name, trait in traits.items():
- for meta_name, meta_eval in metadata.items():
- if type(meta_eval) is not types.FunctionType:
- meta_eval = _SimpleTest(meta_eval)
- if not meta_eval(trait.metadata.get(meta_name, None)):
- break
- else:
- result[name] = trait
-
- return result
-
- def trait_metadata(self, traitname, key, default=None):
- """Get metadata values for trait by key."""
- try:
- trait = getattr(self.__class__, traitname)
- except AttributeError:
- raise TraitError("Class %s does not have a trait named %s" %
- (self.__class__.__name__, traitname))
+
+ def traits(self, **metadata):
+ """Get a ``dict`` of all the traits of this class. The dictionary
+ is keyed on the name and the values are the TraitType objects.
+
+ The TraitTypes returned don't know anything about the values
+ that the various HasTrait's instances are holding.
+
+ The metadata kwargs allow functions to be passed in which
+ filter traits based on metadata values. The functions should
+ take a single value as an argument and return a boolean. If
+ any function returns False, then the trait is not included in
+ the output. If a metadata key doesn't exist, None will be passed
+ to the function.
+ """
+ traits = dict([memb for memb in getmembers(self.__class__) if
+ isinstance(memb[1], TraitType)])
+
+ if len(metadata) == 0:
+ return traits
+
+ result = {}
+ for name, trait in traits.items():
+ for meta_name, meta_eval in metadata.items():
+ if type(meta_eval) is not types.FunctionType:
+ meta_eval = _SimpleTest(meta_eval)
+ if not meta_eval(trait.metadata.get(meta_name, None)):
+ break
+ else:
+ result[name] = trait
+
+ return result
+
+ def trait_metadata(self, traitname, key, default=None):
+ """Get metadata values for trait by key."""
+ try:
+ trait = getattr(self.__class__, traitname)
+ except AttributeError:
+ raise TraitError("Class %s does not have a trait named %s" %
+ (self.__class__.__name__, traitname))
metadata_name = '_' + traitname + '_metadata'
if hasattr(self, metadata_name) and key in getattr(self, metadata_name):
return getattr(self, metadata_name).get(key, default)
- else:
- return trait.metadata.get(key, default)
-
+ else:
+ return trait.metadata.get(key, default)
+
@classmethod
def class_own_trait_events(cls, name):
"""Get a dict of all event handlers defined on this class, not a parent.
-
+
Works like ``event_handlers``, except for excluding traits from parents.
"""
sup = super(cls, cls)
return {n: e for (n, e) in cls.events(name).items()
if getattr(sup, n, None) is not e}
-
+
@classmethod
def trait_events(cls, name=None):
"""Get a ``dict`` of all the event handlers of this class.
@@ -1485,329 +1485,329 @@ class HasTraits(six.with_metaclass(MetaHasTraits, HasDescriptors)):
events[k] = v
return events
-#-----------------------------------------------------------------------------
-# Actual TraitTypes implementations/subclasses
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# TraitTypes subclasses for handling classes and instances of classes
-#-----------------------------------------------------------------------------
-
-
-class ClassBasedTraitType(TraitType):
- """
- A trait with error reporting and string -> type resolution for Type,
- Instance and This.
- """
-
- def _resolve_string(self, string):
- """
- Resolve a string supplied for a type into an actual object.
- """
- return import_item(string)
-
- def error(self, obj, value):
- kind = type(value)
+#-----------------------------------------------------------------------------
+# Actual TraitTypes implementations/subclasses
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# TraitTypes subclasses for handling classes and instances of classes
+#-----------------------------------------------------------------------------
+
+
+class ClassBasedTraitType(TraitType):
+ """
+ A trait with error reporting and string -> type resolution for Type,
+ Instance and This.
+ """
+
+ def _resolve_string(self, string):
+ """
+ Resolve a string supplied for a type into an actual object.
+ """
+ return import_item(string)
+
+ def error(self, obj, value):
+ kind = type(value)
if six.PY2 and kind is InstanceType:
- msg = 'class %s' % value.__class__.__name__
- else:
- msg = '%s (i.e. %s)' % ( str( kind )[1:-1], repr( value ) )
-
- if obj is not None:
- e = "The '%s' trait of %s instance must be %s, but a value of %s was specified." \
- % (self.name, class_of(obj),
- self.info(), msg)
- else:
- e = "The '%s' trait must be %s, but a value of %r was specified." \
- % (self.name, self.info(), msg)
-
- raise TraitError(e)
-
-
-class Type(ClassBasedTraitType):
- """A trait whose value must be a subclass of a specified class."""
-
+ msg = 'class %s' % value.__class__.__name__
+ else:
+ msg = '%s (i.e. %s)' % ( str( kind )[1:-1], repr( value ) )
+
+ if obj is not None:
+ e = "The '%s' trait of %s instance must be %s, but a value of %s was specified." \
+ % (self.name, class_of(obj),
+ self.info(), msg)
+ else:
+ e = "The '%s' trait must be %s, but a value of %r was specified." \
+ % (self.name, self.info(), msg)
+
+ raise TraitError(e)
+
+
+class Type(ClassBasedTraitType):
+ """A trait whose value must be a subclass of a specified class."""
+
def __init__ (self, default_value=Undefined, klass=None, **kwargs):
- """Construct a Type trait
-
- A Type trait specifies that its values must be subclasses of
- a particular class.
-
- If only ``default_value`` is given, it is used for the ``klass`` as
- well. If neither are given, both default to ``object``.
-
- Parameters
- ----------
- default_value : class, str or None
- The default value must be a subclass of klass. If an str,
- the str must be a fully specified class name, like 'foo.bar.Bah'.
- The string is resolved into real class, when the parent
- :class:`HasTraits` class is instantiated.
- klass : class, str [ default object ]
- Values of this trait must be a subclass of klass. The klass
- may be specified in a string like: 'foo.bar.MyClass'.
- The string is resolved into real class, when the parent
- :class:`HasTraits` class is instantiated.
- allow_none : bool [ default False ]
- Indicates whether None is allowed as an assignable value.
- """
- if default_value is Undefined:
- new_default_value = object if (klass is None) else klass
- else:
- new_default_value = default_value
-
- if klass is None:
- if (default_value is None) or (default_value is Undefined):
- klass = object
- else:
- klass = default_value
-
+ """Construct a Type trait
+
+ A Type trait specifies that its values must be subclasses of
+ a particular class.
+
+ If only ``default_value`` is given, it is used for the ``klass`` as
+ well. If neither are given, both default to ``object``.
+
+ Parameters
+ ----------
+ default_value : class, str or None
+ The default value must be a subclass of klass. If an str,
+ the str must be a fully specified class name, like 'foo.bar.Bah'.
+ The string is resolved into real class, when the parent
+ :class:`HasTraits` class is instantiated.
+ klass : class, str [ default object ]
+ Values of this trait must be a subclass of klass. The klass
+ may be specified in a string like: 'foo.bar.MyClass'.
+ The string is resolved into real class, when the parent
+ :class:`HasTraits` class is instantiated.
+ allow_none : bool [ default False ]
+ Indicates whether None is allowed as an assignable value.
+ """
+ if default_value is Undefined:
+ new_default_value = object if (klass is None) else klass
+ else:
+ new_default_value = default_value
+
+ if klass is None:
+ if (default_value is None) or (default_value is Undefined):
+ klass = object
+ else:
+ klass = default_value
+
if not (inspect.isclass(klass) or isinstance(klass, six.string_types)):
- raise TraitError("A Type trait must specify a class.")
-
- self.klass = klass
-
+ raise TraitError("A Type trait must specify a class.")
+
+ self.klass = klass
+
super(Type, self).__init__(new_default_value, **kwargs)
-
- def validate(self, obj, value):
- """Validates that the value is a valid object instance."""
+
+ def validate(self, obj, value):
+ """Validates that the value is a valid object instance."""
if isinstance(value, six.string_types):
- try:
- value = self._resolve_string(value)
- except ImportError:
- raise TraitError("The '%s' trait of %s instance must be a type, but "
- "%r could not be imported" % (self.name, obj, value))
- try:
- if issubclass(value, self.klass):
- return value
- except:
- pass
-
- self.error(obj, value)
-
- def info(self):
- """ Returns a description of the trait."""
+ try:
+ value = self._resolve_string(value)
+ except ImportError:
+ raise TraitError("The '%s' trait of %s instance must be a type, but "
+ "%r could not be imported" % (self.name, obj, value))
+ try:
+ if issubclass(value, self.klass):
+ return value
+ except:
+ pass
+
+ self.error(obj, value)
+
+ def info(self):
+ """ Returns a description of the trait."""
if isinstance(self.klass, six.string_types):
- klass = self.klass
- else:
+ klass = self.klass
+ else:
klass = self.klass.__module__ + '.' + self.klass.__name__
- result = "a subclass of '%s'" % klass
- if self.allow_none:
- return result + ' or None'
- return result
-
- def instance_init(self, obj):
- self._resolve_classes()
- super(Type, self).instance_init(obj)
-
- def _resolve_classes(self):
+ result = "a subclass of '%s'" % klass
+ if self.allow_none:
+ return result + ' or None'
+ return result
+
+ def instance_init(self, obj):
+ self._resolve_classes()
+ super(Type, self).instance_init(obj)
+
+ def _resolve_classes(self):
if isinstance(self.klass, six.string_types):
- self.klass = self._resolve_string(self.klass)
+ self.klass = self._resolve_string(self.klass)
if isinstance(self.default_value, six.string_types):
- self.default_value = self._resolve_string(self.default_value)
-
- def default_value_repr(self):
- value = self.default_value
+ self.default_value = self._resolve_string(self.default_value)
+
+ def default_value_repr(self):
+ value = self.default_value
if isinstance(value, six.string_types):
- return repr(value)
- else:
- return repr('{}.{}'.format(value.__module__, value.__name__))
-
-
-class Instance(ClassBasedTraitType):
- """A trait whose value must be an instance of a specified class.
-
- The value can also be an instance of a subclass of the specified class.
-
- Subclasses can declare default classes by overriding the klass attribute
- """
-
- klass = None
-
+ return repr(value)
+ else:
+ return repr('{}.{}'.format(value.__module__, value.__name__))
+
+
+class Instance(ClassBasedTraitType):
+ """A trait whose value must be an instance of a specified class.
+
+ The value can also be an instance of a subclass of the specified class.
+
+ Subclasses can declare default classes by overriding the klass attribute
+ """
+
+ klass = None
+
def __init__(self, klass=None, args=None, kw=None, **kwargs):
- """Construct an Instance trait.
-
- This trait allows values that are instances of a particular
- class or its subclasses. Our implementation is quite different
- from that of enthough.traits as we don't allow instances to be used
- for klass and we handle the ``args`` and ``kw`` arguments differently.
-
- Parameters
- ----------
- klass : class, str
- The class that forms the basis for the trait. Class names
- can also be specified as strings, like 'foo.bar.Bar'.
- args : tuple
- Positional arguments for generating the default value.
- kw : dict
- Keyword arguments for generating the default value.
- allow_none : bool [ default False ]
- Indicates whether None is allowed as a value.
-
- Notes
- -----
- If both ``args`` and ``kw`` are None, then the default value is None.
- If ``args`` is a tuple and ``kw`` is a dict, then the default is
- created as ``klass(*args, **kw)``. If exactly one of ``args`` or ``kw`` is
- None, the None is replaced by ``()`` or ``{}``, respectively.
- """
- if klass is None:
- klass = self.klass
+ """Construct an Instance trait.
+
+ This trait allows values that are instances of a particular
+ class or its subclasses. Our implementation is quite different
+ from that of enthough.traits as we don't allow instances to be used
+ for klass and we handle the ``args`` and ``kw`` arguments differently.
+
+ Parameters
+ ----------
+ klass : class, str
+ The class that forms the basis for the trait. Class names
+ can also be specified as strings, like 'foo.bar.Bar'.
+ args : tuple
+ Positional arguments for generating the default value.
+ kw : dict
+ Keyword arguments for generating the default value.
+ allow_none : bool [ default False ]
+ Indicates whether None is allowed as a value.
+
+ Notes
+ -----
+ If both ``args`` and ``kw`` are None, then the default value is None.
+ If ``args`` is a tuple and ``kw`` is a dict, then the default is
+ created as ``klass(*args, **kw)``. If exactly one of ``args`` or ``kw`` is
+ None, the None is replaced by ``()`` or ``{}``, respectively.
+ """
+ if klass is None:
+ klass = self.klass
if (klass is not None) and (inspect.isclass(klass) or isinstance(klass, six.string_types)):
- self.klass = klass
- else:
- raise TraitError('The klass attribute must be a class'
- ' not: %r' % klass)
-
- if (kw is not None) and not isinstance(kw, dict):
- raise TraitError("The 'kw' argument must be a dict or None.")
- if (args is not None) and not isinstance(args, tuple):
- raise TraitError("The 'args' argument must be a tuple or None.")
-
- self.default_args = args
- self.default_kwargs = kw
-
+ self.klass = klass
+ else:
+ raise TraitError('The klass attribute must be a class'
+ ' not: %r' % klass)
+
+ if (kw is not None) and not isinstance(kw, dict):
+ raise TraitError("The 'kw' argument must be a dict or None.")
+ if (args is not None) and not isinstance(args, tuple):
+ raise TraitError("The 'args' argument must be a tuple or None.")
+
+ self.default_args = args
+ self.default_kwargs = kw
+
super(Instance, self).__init__(**kwargs)
-
- def validate(self, obj, value):
- if isinstance(value, self.klass):
- return value
- else:
- self.error(obj, value)
-
- def info(self):
+
+ def validate(self, obj, value):
+ if isinstance(value, self.klass):
+ return value
+ else:
+ self.error(obj, value)
+
+ def info(self):
if isinstance(self.klass, six.string_types):
- klass = self.klass
- else:
- klass = self.klass.__name__
- result = class_of(klass)
- if self.allow_none:
- return result + ' or None'
-
- return result
-
- def instance_init(self, obj):
- self._resolve_classes()
- super(Instance, self).instance_init(obj)
-
- def _resolve_classes(self):
+ klass = self.klass
+ else:
+ klass = self.klass.__name__
+ result = class_of(klass)
+ if self.allow_none:
+ return result + ' or None'
+
+ return result
+
+ def instance_init(self, obj):
+ self._resolve_classes()
+ super(Instance, self).instance_init(obj)
+
+ def _resolve_classes(self):
if isinstance(self.klass, six.string_types):
- self.klass = self._resolve_string(self.klass)
-
- def make_dynamic_default(self):
- if (self.default_args is None) and (self.default_kwargs is None):
- return None
- return self.klass(*(self.default_args or ()),
- **(self.default_kwargs or {}))
-
- def default_value_repr(self):
- return repr(self.make_dynamic_default())
-
-
-class ForwardDeclaredMixin(object):
- """
- Mixin for forward-declared versions of Instance and Type.
- """
- def _resolve_string(self, string):
- """
- Find the specified class name by looking for it in the module in which
- our this_class attribute was defined.
- """
- modname = self.this_class.__module__
- return import_item('.'.join([modname, string]))
-
-
-class ForwardDeclaredType(ForwardDeclaredMixin, Type):
- """
- Forward-declared version of Type.
- """
- pass
-
-
-class ForwardDeclaredInstance(ForwardDeclaredMixin, Instance):
- """
- Forward-declared version of Instance.
- """
- pass
-
-
-class This(ClassBasedTraitType):
- """A trait for instances of the class containing this trait.
-
- Because how how and when class bodies are executed, the ``This``
- trait can only have a default value of None. This, and because we
- always validate default values, ``allow_none`` is *always* true.
- """
-
- info_text = 'an instance of the same type as the receiver or None'
-
+ self.klass = self._resolve_string(self.klass)
+
+ def make_dynamic_default(self):
+ if (self.default_args is None) and (self.default_kwargs is None):
+ return None
+ return self.klass(*(self.default_args or ()),
+ **(self.default_kwargs or {}))
+
+ def default_value_repr(self):
+ return repr(self.make_dynamic_default())
+
+
+class ForwardDeclaredMixin(object):
+ """
+ Mixin for forward-declared versions of Instance and Type.
+ """
+ def _resolve_string(self, string):
+ """
+ Find the specified class name by looking for it in the module in which
+ our this_class attribute was defined.
+ """
+ modname = self.this_class.__module__
+ return import_item('.'.join([modname, string]))
+
+
+class ForwardDeclaredType(ForwardDeclaredMixin, Type):
+ """
+ Forward-declared version of Type.
+ """
+ pass
+
+
+class ForwardDeclaredInstance(ForwardDeclaredMixin, Instance):
+ """
+ Forward-declared version of Instance.
+ """
+ pass
+
+
+class This(ClassBasedTraitType):
+ """A trait for instances of the class containing this trait.
+
+ Because how how and when class bodies are executed, the ``This``
+ trait can only have a default value of None. This, and because we
+ always validate default values, ``allow_none`` is *always* true.
+ """
+
+ info_text = 'an instance of the same type as the receiver or None'
+
def __init__(self, **kwargs):
super(This, self).__init__(None, **kwargs)
-
- def validate(self, obj, value):
- # What if value is a superclass of obj.__class__? This is
- # complicated if it was the superclass that defined the This
- # trait.
- if isinstance(value, self.this_class) or (value is None):
- return value
- else:
- self.error(obj, value)
-
-
-class Union(TraitType):
- """A trait type representing a Union type."""
-
+
+ def validate(self, obj, value):
+ # What if value is a superclass of obj.__class__? This is
+ # complicated if it was the superclass that defined the This
+ # trait.
+ if isinstance(value, self.this_class) or (value is None):
+ return value
+ else:
+ self.error(obj, value)
+
+
+class Union(TraitType):
+ """A trait type representing a Union type."""
+
def __init__(self, trait_types, **kwargs):
- """Construct a Union trait.
-
- This trait allows values that are allowed by at least one of the
- specified trait types. A Union traitlet cannot have metadata on
- its own, besides the metadata of the listed types.
-
- Parameters
- ----------
- trait_types: sequence
- The list of trait types of length at least 1.
-
- Notes
- -----
- Union([Float(), Bool(), Int()]) attempts to validate the provided values
- with the validation function of Float, then Bool, and finally Int.
- """
- self.trait_types = trait_types
+ """Construct a Union trait.
+
+ This trait allows values that are allowed by at least one of the
+ specified trait types. A Union traitlet cannot have metadata on
+ its own, besides the metadata of the listed types.
+
+ Parameters
+ ----------
+ trait_types: sequence
+ The list of trait types of length at least 1.
+
+ Notes
+ -----
+ Union([Float(), Bool(), Int()]) attempts to validate the provided values
+ with the validation function of Float, then Bool, and finally Int.
+ """
+ self.trait_types = trait_types
self.info_text = " or ".join([tt.info() for tt in self.trait_types])
super(Union, self).__init__(**kwargs)
-
- def class_init(self, cls, name):
- for trait_type in self.trait_types:
- trait_type.class_init(cls, None)
- super(Union, self).class_init(cls, name)
-
- def instance_init(self, obj):
- for trait_type in self.trait_types:
- trait_type.instance_init(obj)
- super(Union, self).instance_init(obj)
-
- def validate(self, obj, value):
+
+ def class_init(self, cls, name):
+ for trait_type in self.trait_types:
+ trait_type.class_init(cls, None)
+ super(Union, self).class_init(cls, name)
+
+ def instance_init(self, obj):
+ for trait_type in self.trait_types:
+ trait_type.instance_init(obj)
+ super(Union, self).instance_init(obj)
+
+ def validate(self, obj, value):
with obj.cross_validation_lock:
- for trait_type in self.trait_types:
- try:
- v = trait_type._validate(obj, value)
+ for trait_type in self.trait_types:
+ try:
+ v = trait_type._validate(obj, value)
# In the case of an element trait, the name is None
if self.name is not None:
setattr(obj, '_' + self.name + '_metadata', trait_type.metadata)
- return v
- except TraitError:
- continue
- self.error(obj, value)
-
- def __or__(self, other):
- if isinstance(other, Union):
- return Union(self.trait_types + other.trait_types)
- else:
- return Union(self.trait_types + [other])
-
+ return v
+ except TraitError:
+ continue
+ self.error(obj, value)
+
+ def __or__(self, other):
+ if isinstance(other, Union):
+ return Union(self.trait_types + other.trait_types)
+ else:
+ return Union(self.trait_types + [other])
+
def make_dynamic_default(self):
if self.default_value is not Undefined:
return self.default_value
@@ -1818,17 +1818,17 @@ class Union(TraitType):
return trait_type.make_dynamic_default()
-#-----------------------------------------------------------------------------
-# Basic TraitTypes implementations/subclasses
-#-----------------------------------------------------------------------------
-
-
-class Any(TraitType):
- """A trait which allows any value."""
- default_value = None
- info_text = 'any value'
-
-
+#-----------------------------------------------------------------------------
+# Basic TraitTypes implementations/subclasses
+#-----------------------------------------------------------------------------
+
+
+class Any(TraitType):
+ """A trait which allows any value."""
+ default_value = None
+ info_text = 'any value'
+
+
def _validate_bounds(trait, obj, value):
"""
Validate that a number to be applied to a trait is between bounds.
@@ -1853,42 +1853,42 @@ def _validate_bounds(trait, obj, value):
return value
-class Int(TraitType):
- """An int trait."""
-
- default_value = 0
- info_text = 'an int'
-
+class Int(TraitType):
+ """An int trait."""
+
+ default_value = 0
+ info_text = 'an int'
+
def __init__(self, default_value=Undefined, allow_none=False, **kwargs):
- self.min = kwargs.pop('min', None)
- self.max = kwargs.pop('max', None)
- super(Int, self).__init__(default_value=default_value,
- allow_none=allow_none, **kwargs)
-
- def validate(self, obj, value):
- if not isinstance(value, int):
- self.error(obj, value)
+ self.min = kwargs.pop('min', None)
+ self.max = kwargs.pop('max', None)
+ super(Int, self).__init__(default_value=default_value,
+ allow_none=allow_none, **kwargs)
+
+ def validate(self, obj, value):
+ if not isinstance(value, int):
+ self.error(obj, value)
return _validate_bounds(self, obj, value)
-
-
-class CInt(Int):
- """A casting version of the int trait."""
-
- def validate(self, obj, value):
- try:
+
+
+class CInt(Int):
+ """A casting version of the int trait."""
+
+ def validate(self, obj, value):
+ try:
value = int(value)
- except:
- self.error(obj, value)
+ except:
+ self.error(obj, value)
return _validate_bounds(self, obj, value)
-
+
if six.PY2:
- class Long(TraitType):
- """A long integer trait."""
-
- default_value = 0
- info_text = 'a long'
-
+ class Long(TraitType):
+ """A long integer trait."""
+
+ default_value = 0
+ info_text = 'a long'
+
def __init__(self, default_value=Undefined, allow_none=False, **kwargs):
self.min = kwargs.pop('min', None)
self.max = kwargs.pop('max', None)
@@ -1897,36 +1897,36 @@ if six.PY2:
allow_none=allow_none, **kwargs)
def _validate_long(self, obj, value):
- if isinstance(value, long):
- return value
- if isinstance(value, int):
- return long(value)
- self.error(obj, value)
-
+ if isinstance(value, long):
+ return value
+ if isinstance(value, int):
+ return long(value)
+ self.error(obj, value)
+
def validate(self, obj, value):
value = self._validate_long(obj, value)
return _validate_bounds(self, obj, value)
-
- class CLong(Long):
- """A casting version of the long integer trait."""
-
- def validate(self, obj, value):
- try:
+
+ class CLong(Long):
+ """A casting version of the long integer trait."""
+
+ def validate(self, obj, value):
+ try:
value = long(value)
- except:
- self.error(obj, value)
+ except:
+ self.error(obj, value)
return _validate_bounds(self, obj, value)
-
-
- class Integer(TraitType):
- """An integer trait.
-
- Longs that are unnecessary (<= sys.maxint) are cast to ints."""
-
- default_value = 0
- info_text = 'an integer'
-
+
+
+ class Integer(TraitType):
+ """An integer trait.
+
+ Longs that are unnecessary (<= sys.maxint) are cast to ints."""
+
+ default_value = 0
+ info_text = 'an integer'
+
def __init__(self, default_value=Undefined, allow_none=False, **kwargs):
self.min = kwargs.pop('min', None)
self.max = kwargs.pop('max', None)
@@ -1935,674 +1935,674 @@ if six.PY2:
allow_none=allow_none, **kwargs)
def _validate_int(self, obj, value):
- if isinstance(value, int):
- return value
- if isinstance(value, long):
- # downcast longs that fit in int:
- # note that int(n > sys.maxint) returns a long, so
- # we don't need a condition on this cast
- return int(value)
- if sys.platform == "cli":
- from System import Int64
- if isinstance(value, Int64):
- return int(value)
- self.error(obj, value)
-
+ if isinstance(value, int):
+ return value
+ if isinstance(value, long):
+ # downcast longs that fit in int:
+ # note that int(n > sys.maxint) returns a long, so
+ # we don't need a condition on this cast
+ return int(value)
+ if sys.platform == "cli":
+ from System import Int64
+ if isinstance(value, Int64):
+ return int(value)
+ self.error(obj, value)
+
def validate(self, obj, value):
value = self._validate_int(obj, value)
return _validate_bounds(self, obj, value)
-
+
else:
Long, CLong = Int, CInt
Integer = Int
-class Float(TraitType):
- """A float trait."""
-
- default_value = 0.0
- info_text = 'a float'
-
+class Float(TraitType):
+ """A float trait."""
+
+ default_value = 0.0
+ info_text = 'a float'
+
def __init__(self, default_value=Undefined, allow_none=False, **kwargs):
- self.min = kwargs.pop('min', -float('inf'))
- self.max = kwargs.pop('max', float('inf'))
+ self.min = kwargs.pop('min', -float('inf'))
+ self.max = kwargs.pop('max', float('inf'))
super(Float, self).__init__(default_value=default_value,
- allow_none=allow_none, **kwargs)
-
- def validate(self, obj, value):
- if isinstance(value, int):
- value = float(value)
- if not isinstance(value, float):
- self.error(obj, value)
+ allow_none=allow_none, **kwargs)
+
+ def validate(self, obj, value):
+ if isinstance(value, int):
+ value = float(value)
+ if not isinstance(value, float):
+ self.error(obj, value)
return _validate_bounds(self, obj, value)
-
-
-class CFloat(Float):
- """A casting version of the float trait."""
-
- def validate(self, obj, value):
- try:
+
+
+class CFloat(Float):
+ """A casting version of the float trait."""
+
+ def validate(self, obj, value):
+ try:
value = float(value)
- except:
- self.error(obj, value)
+ except:
+ self.error(obj, value)
return _validate_bounds(self, obj, value)
-
-
-class Complex(TraitType):
- """A trait for complex numbers."""
-
- default_value = 0.0 + 0.0j
- info_text = 'a complex number'
-
- def validate(self, obj, value):
- if isinstance(value, complex):
- return value
- if isinstance(value, (float, int)):
- return complex(value)
- self.error(obj, value)
-
-
-class CComplex(Complex):
- """A casting version of the complex number trait."""
-
- def validate (self, obj, value):
- try:
- return complex(value)
- except:
- self.error(obj, value)
-
-# We should always be explicit about whether we're using bytes or unicode, both
-# for Python 3 conversion and for reliable unicode behaviour on Python 2. So
-# we don't have a Str type.
-class Bytes(TraitType):
- """A trait for byte strings."""
-
- default_value = b''
- info_text = 'a bytes object'
-
- def validate(self, obj, value):
- if isinstance(value, bytes):
- return value
- self.error(obj, value)
-
-
-class CBytes(Bytes):
- """A casting version of the byte string trait."""
-
- def validate(self, obj, value):
- try:
- return bytes(value)
- except:
- self.error(obj, value)
-
-
-class Unicode(TraitType):
- """A trait for unicode strings."""
-
- default_value = u''
- info_text = 'a unicode string'
-
- def validate(self, obj, value):
+
+
+class Complex(TraitType):
+ """A trait for complex numbers."""
+
+ default_value = 0.0 + 0.0j
+ info_text = 'a complex number'
+
+ def validate(self, obj, value):
+ if isinstance(value, complex):
+ return value
+ if isinstance(value, (float, int)):
+ return complex(value)
+ self.error(obj, value)
+
+
+class CComplex(Complex):
+ """A casting version of the complex number trait."""
+
+ def validate (self, obj, value):
+ try:
+ return complex(value)
+ except:
+ self.error(obj, value)
+
+# We should always be explicit about whether we're using bytes or unicode, both
+# for Python 3 conversion and for reliable unicode behaviour on Python 2. So
+# we don't have a Str type.
+class Bytes(TraitType):
+ """A trait for byte strings."""
+
+ default_value = b''
+ info_text = 'a bytes object'
+
+ def validate(self, obj, value):
+ if isinstance(value, bytes):
+ return value
+ self.error(obj, value)
+
+
+class CBytes(Bytes):
+ """A casting version of the byte string trait."""
+
+ def validate(self, obj, value):
+ try:
+ return bytes(value)
+ except:
+ self.error(obj, value)
+
+
+class Unicode(TraitType):
+ """A trait for unicode strings."""
+
+ default_value = u''
+ info_text = 'a unicode string'
+
+ def validate(self, obj, value):
if isinstance(value, six.text_type):
- return value
- if isinstance(value, bytes):
- try:
- return value.decode('ascii', 'strict')
- except UnicodeDecodeError:
- msg = "Could not decode {!r} for unicode trait '{}' of {} instance."
- raise TraitError(msg.format(value, self.name, class_of(obj)))
- self.error(obj, value)
-
-
-class CUnicode(Unicode):
- """A casting version of the unicode trait."""
-
- def validate(self, obj, value):
- try:
+ return value
+ if isinstance(value, bytes):
+ try:
+ return value.decode('ascii', 'strict')
+ except UnicodeDecodeError:
+ msg = "Could not decode {!r} for unicode trait '{}' of {} instance."
+ raise TraitError(msg.format(value, self.name, class_of(obj)))
+ self.error(obj, value)
+
+
+class CUnicode(Unicode):
+ """A casting version of the unicode trait."""
+
+ def validate(self, obj, value):
+ try:
return six.text_type(value)
- except:
- self.error(obj, value)
-
-
-class ObjectName(TraitType):
- """A string holding a valid object name in this version of Python.
-
- This does not check that the name exists in any scope."""
- info_text = "a valid object identifier in Python"
-
+ except:
+ self.error(obj, value)
+
+
+class ObjectName(TraitType):
+ """A string holding a valid object name in this version of Python.
+
+ This does not check that the name exists in any scope."""
+ info_text = "a valid object identifier in Python"
+
if six.PY2:
- # Python 2:
- def coerce_str(self, obj, value):
- "In Python 2, coerce ascii-only unicode to str"
- if isinstance(value, unicode):
- try:
- return str(value)
- except UnicodeEncodeError:
- self.error(obj, value)
- return value
+ # Python 2:
+ def coerce_str(self, obj, value):
+ "In Python 2, coerce ascii-only unicode to str"
+ if isinstance(value, unicode):
+ try:
+ return str(value)
+ except UnicodeEncodeError:
+ self.error(obj, value)
+ return value
else:
coerce_str = staticmethod(lambda _,s: s)
-
- def validate(self, obj, value):
- value = self.coerce_str(obj, value)
-
+
+ def validate(self, obj, value):
+ value = self.coerce_str(obj, value)
+
if isinstance(value, six.string_types) and isidentifier(value):
- return value
- self.error(obj, value)
-
-class DottedObjectName(ObjectName):
- """A string holding a valid dotted object name in Python, such as A.b3._c"""
- def validate(self, obj, value):
- value = self.coerce_str(obj, value)
-
+ return value
+ self.error(obj, value)
+
+class DottedObjectName(ObjectName):
+ """A string holding a valid dotted object name in Python, such as A.b3._c"""
+ def validate(self, obj, value):
+ value = self.coerce_str(obj, value)
+
if isinstance(value, six.string_types) and all(isidentifier(a)
for a in value.split('.')):
- return value
- self.error(obj, value)
-
-
-class Bool(TraitType):
- """A boolean (True, False) trait."""
-
- default_value = False
- info_text = 'a boolean'
-
- def validate(self, obj, value):
- if isinstance(value, bool):
- return value
- self.error(obj, value)
-
-
-class CBool(Bool):
- """A casting version of the boolean trait."""
-
- def validate(self, obj, value):
- try:
- return bool(value)
- except:
- self.error(obj, value)
-
-
-class Enum(TraitType):
- """An enum whose value must be in a given sequence."""
-
+ return value
+ self.error(obj, value)
+
+
+class Bool(TraitType):
+ """A boolean (True, False) trait."""
+
+ default_value = False
+ info_text = 'a boolean'
+
+ def validate(self, obj, value):
+ if isinstance(value, bool):
+ return value
+ self.error(obj, value)
+
+
+class CBool(Bool):
+ """A casting version of the boolean trait."""
+
+ def validate(self, obj, value):
+ try:
+ return bool(value)
+ except:
+ self.error(obj, value)
+
+
+class Enum(TraitType):
+ """An enum whose value must be in a given sequence."""
+
def __init__(self, values, default_value=Undefined, **kwargs):
- self.values = values
+ self.values = values
if kwargs.get('allow_none', False) and default_value is Undefined:
- default_value = None
+ default_value = None
super(Enum, self).__init__(default_value, **kwargs)
-
- def validate(self, obj, value):
- if value in self.values:
- return value
- self.error(obj, value)
-
- def info(self):
- """ Returns a description of the trait."""
- result = 'any of ' + repr(self.values)
- if self.allow_none:
- return result + ' or None'
- return result
-
-class CaselessStrEnum(Enum):
- """An enum of strings where the case should be ignored."""
+
+ def validate(self, obj, value):
+ if value in self.values:
+ return value
+ self.error(obj, value)
+
+ def info(self):
+ """ Returns a description of the trait."""
+ result = 'any of ' + repr(self.values)
+ if self.allow_none:
+ return result + ' or None'
+ return result
+
+class CaselessStrEnum(Enum):
+ """An enum of strings where the case should be ignored."""
def __init__(self, values, default_value=Undefined, **kwargs):
values = [cast_unicode_py2(value) for value in values]
super(CaselessStrEnum, self).__init__(values, default_value=default_value, **kwargs)
- def validate(self, obj, value):
- if isinstance(value, str):
+ def validate(self, obj, value):
+ if isinstance(value, str):
value = cast_unicode_py2(value)
if not isinstance(value, six.string_types):
- self.error(obj, value)
-
- for v in self.values:
- if v.lower() == value.lower():
- return v
- self.error(obj, value)
-
-class Container(Instance):
- """An instance of a container (list, set, etc.)
-
- To be subclassed by overriding klass.
- """
- klass = None
- _cast_types = ()
- _valid_defaults = SequenceTypes
- _trait = None
-
+ self.error(obj, value)
+
+ for v in self.values:
+ if v.lower() == value.lower():
+ return v
+ self.error(obj, value)
+
+class Container(Instance):
+ """An instance of a container (list, set, etc.)
+
+ To be subclassed by overriding klass.
+ """
+ klass = None
+ _cast_types = ()
+ _valid_defaults = SequenceTypes
+ _trait = None
+
def __init__(self, trait=None, default_value=None, **kwargs):
- """Create a container trait type from a list, set, or tuple.
-
- The default value is created by doing ``List(default_value)``,
- which creates a copy of the ``default_value``.
-
- ``trait`` can be specified, which restricts the type of elements
- in the container to that TraitType.
-
- If only one arg is given and it is not a Trait, it is taken as
- ``default_value``:
-
- ``c = List([1, 2, 3])``
-
- Parameters
- ----------
-
- trait : TraitType [ optional ]
- the type for restricting the contents of the Container. If unspecified,
- types are not checked.
-
- default_value : SequenceType [ optional ]
- The default value for the Trait. Must be list/tuple/set, and
- will be cast to the container type.
-
- allow_none : bool [ default False ]
- Whether to allow the value to be None
-
+ """Create a container trait type from a list, set, or tuple.
+
+ The default value is created by doing ``List(default_value)``,
+ which creates a copy of the ``default_value``.
+
+ ``trait`` can be specified, which restricts the type of elements
+ in the container to that TraitType.
+
+ If only one arg is given and it is not a Trait, it is taken as
+ ``default_value``:
+
+ ``c = List([1, 2, 3])``
+
+ Parameters
+ ----------
+
+ trait : TraitType [ optional ]
+ the type for restricting the contents of the Container. If unspecified,
+ types are not checked.
+
+ default_value : SequenceType [ optional ]
+ The default value for the Trait. Must be list/tuple/set, and
+ will be cast to the container type.
+
+ allow_none : bool [ default False ]
+ Whether to allow the value to be None
+
**kwargs : any
- further keys for extensions to the Trait (e.g. config)
-
- """
- # allow List([values]):
- if default_value is None and not is_trait(trait):
- default_value = trait
- trait = None
-
- if default_value is None:
- args = ()
- elif isinstance(default_value, self._valid_defaults):
- args = (default_value,)
- else:
- raise TypeError('default value of %s was %s' %(self.__class__.__name__, default_value))
-
- if is_trait(trait):
- if isinstance(trait, type):
+ further keys for extensions to the Trait (e.g. config)
+
+ """
+ # allow List([values]):
+ if default_value is None and not is_trait(trait):
+ default_value = trait
+ trait = None
+
+ if default_value is None:
+ args = ()
+ elif isinstance(default_value, self._valid_defaults):
+ args = (default_value,)
+ else:
+ raise TypeError('default value of %s was %s' %(self.__class__.__name__, default_value))
+
+ if is_trait(trait):
+ if isinstance(trait, type):
warn("Traits should be given as instances, not types (for example, `Int()`, not `Int`)."
" Passing types is deprecated in traitlets 4.1.",
- DeprecationWarning, stacklevel=3)
- self._trait = trait() if isinstance(trait, type) else trait
- elif trait is not None:
- raise TypeError("`trait` must be a Trait or None, got %s" % repr_type(trait))
-
+ DeprecationWarning, stacklevel=3)
+ self._trait = trait() if isinstance(trait, type) else trait
+ elif trait is not None:
+ raise TypeError("`trait` must be a Trait or None, got %s" % repr_type(trait))
+
super(Container,self).__init__(klass=self.klass, args=args, **kwargs)
-
- def element_error(self, obj, element, validator):
- e = "Element of the '%s' trait of %s instance must be %s, but a value of %s was specified." \
- % (self.name, class_of(obj), validator.info(), repr_type(element))
- raise TraitError(e)
-
- def validate(self, obj, value):
- if isinstance(value, self._cast_types):
- value = self.klass(value)
- value = super(Container, self).validate(obj, value)
- if value is None:
- return value
-
- value = self.validate_elements(obj, value)
-
- return value
-
- def validate_elements(self, obj, value):
- validated = []
- if self._trait is None or isinstance(self._trait, Any):
- return value
- for v in value:
- try:
- v = self._trait._validate(obj, v)
- except TraitError:
- self.element_error(obj, v, self._trait)
- else:
- validated.append(v)
- return self.klass(validated)
-
- def class_init(self, cls, name):
- if isinstance(self._trait, TraitType):
- self._trait.class_init(cls, None)
- super(Container, self).class_init(cls, name)
-
- def instance_init(self, obj):
- if isinstance(self._trait, TraitType):
- self._trait.instance_init(obj)
- super(Container, self).instance_init(obj)
-
-
-class List(Container):
- """An instance of a Python list."""
- klass = list
- _cast_types = (tuple,)
-
+
+ def element_error(self, obj, element, validator):
+ e = "Element of the '%s' trait of %s instance must be %s, but a value of %s was specified." \
+ % (self.name, class_of(obj), validator.info(), repr_type(element))
+ raise TraitError(e)
+
+ def validate(self, obj, value):
+ if isinstance(value, self._cast_types):
+ value = self.klass(value)
+ value = super(Container, self).validate(obj, value)
+ if value is None:
+ return value
+
+ value = self.validate_elements(obj, value)
+
+ return value
+
+ def validate_elements(self, obj, value):
+ validated = []
+ if self._trait is None or isinstance(self._trait, Any):
+ return value
+ for v in value:
+ try:
+ v = self._trait._validate(obj, v)
+ except TraitError:
+ self.element_error(obj, v, self._trait)
+ else:
+ validated.append(v)
+ return self.klass(validated)
+
+ def class_init(self, cls, name):
+ if isinstance(self._trait, TraitType):
+ self._trait.class_init(cls, None)
+ super(Container, self).class_init(cls, name)
+
+ def instance_init(self, obj):
+ if isinstance(self._trait, TraitType):
+ self._trait.instance_init(obj)
+ super(Container, self).instance_init(obj)
+
+
+class List(Container):
+ """An instance of a Python list."""
+ klass = list
+ _cast_types = (tuple,)
+
def __init__(self, trait=None, default_value=None, minlen=0, maxlen=sys.maxsize, **kwargs):
- """Create a List trait type from a list, set, or tuple.
-
- The default value is created by doing ``list(default_value)``,
- which creates a copy of the ``default_value``.
-
- ``trait`` can be specified, which restricts the type of elements
- in the container to that TraitType.
-
- If only one arg is given and it is not a Trait, it is taken as
- ``default_value``:
-
- ``c = List([1, 2, 3])``
-
- Parameters
- ----------
-
- trait : TraitType [ optional ]
- the type for restricting the contents of the Container.
- If unspecified, types are not checked.
-
- default_value : SequenceType [ optional ]
- The default value for the Trait. Must be list/tuple/set, and
- will be cast to the container type.
-
- minlen : Int [ default 0 ]
- The minimum length of the input list
-
- maxlen : Int [ default sys.maxsize ]
- The maximum length of the input list
- """
- self._minlen = minlen
- self._maxlen = maxlen
- super(List, self).__init__(trait=trait, default_value=default_value,
+ """Create a List trait type from a list, set, or tuple.
+
+ The default value is created by doing ``list(default_value)``,
+ which creates a copy of the ``default_value``.
+
+ ``trait`` can be specified, which restricts the type of elements
+ in the container to that TraitType.
+
+ If only one arg is given and it is not a Trait, it is taken as
+ ``default_value``:
+
+ ``c = List([1, 2, 3])``
+
+ Parameters
+ ----------
+
+ trait : TraitType [ optional ]
+ the type for restricting the contents of the Container.
+ If unspecified, types are not checked.
+
+ default_value : SequenceType [ optional ]
+ The default value for the Trait. Must be list/tuple/set, and
+ will be cast to the container type.
+
+ minlen : Int [ default 0 ]
+ The minimum length of the input list
+
+ maxlen : Int [ default sys.maxsize ]
+ The maximum length of the input list
+ """
+ self._minlen = minlen
+ self._maxlen = maxlen
+ super(List, self).__init__(trait=trait, default_value=default_value,
**kwargs)
-
- def length_error(self, obj, value):
- e = "The '%s' trait of %s instance must be of length %i <= L <= %i, but a value of %s was specified." \
- % (self.name, class_of(obj), self._minlen, self._maxlen, value)
- raise TraitError(e)
-
- def validate_elements(self, obj, value):
- length = len(value)
- if length < self._minlen or length > self._maxlen:
- self.length_error(obj, value)
-
- return super(List, self).validate_elements(obj, value)
-
- def validate(self, obj, value):
- value = super(List, self).validate(obj, value)
- value = self.validate_elements(obj, value)
- return value
-
-
-class Set(List):
- """An instance of a Python set."""
- klass = set
- _cast_types = (tuple, list)
-
- # Redefine __init__ just to make the docstring more accurate.
- def __init__(self, trait=None, default_value=None, minlen=0, maxlen=sys.maxsize,
+
+ def length_error(self, obj, value):
+ e = "The '%s' trait of %s instance must be of length %i <= L <= %i, but a value of %s was specified." \
+ % (self.name, class_of(obj), self._minlen, self._maxlen, value)
+ raise TraitError(e)
+
+ def validate_elements(self, obj, value):
+ length = len(value)
+ if length < self._minlen or length > self._maxlen:
+ self.length_error(obj, value)
+
+ return super(List, self).validate_elements(obj, value)
+
+ def validate(self, obj, value):
+ value = super(List, self).validate(obj, value)
+ value = self.validate_elements(obj, value)
+ return value
+
+
+class Set(List):
+ """An instance of a Python set."""
+ klass = set
+ _cast_types = (tuple, list)
+
+ # Redefine __init__ just to make the docstring more accurate.
+ def __init__(self, trait=None, default_value=None, minlen=0, maxlen=sys.maxsize,
**kwargs):
- """Create a Set trait type from a list, set, or tuple.
-
- The default value is created by doing ``set(default_value)``,
- which creates a copy of the ``default_value``.
-
- ``trait`` can be specified, which restricts the type of elements
- in the container to that TraitType.
-
- If only one arg is given and it is not a Trait, it is taken as
- ``default_value``:
-
- ``c = Set({1, 2, 3})``
-
- Parameters
- ----------
-
- trait : TraitType [ optional ]
- the type for restricting the contents of the Container.
- If unspecified, types are not checked.
-
- default_value : SequenceType [ optional ]
- The default value for the Trait. Must be list/tuple/set, and
- will be cast to the container type.
-
- minlen : Int [ default 0 ]
- The minimum length of the input list
-
- maxlen : Int [ default sys.maxsize ]
- The maximum length of the input list
- """
+ """Create a Set trait type from a list, set, or tuple.
+
+ The default value is created by doing ``set(default_value)``,
+ which creates a copy of the ``default_value``.
+
+ ``trait`` can be specified, which restricts the type of elements
+ in the container to that TraitType.
+
+ If only one arg is given and it is not a Trait, it is taken as
+ ``default_value``:
+
+ ``c = Set({1, 2, 3})``
+
+ Parameters
+ ----------
+
+ trait : TraitType [ optional ]
+ the type for restricting the contents of the Container.
+ If unspecified, types are not checked.
+
+ default_value : SequenceType [ optional ]
+ The default value for the Trait. Must be list/tuple/set, and
+ will be cast to the container type.
+
+ minlen : Int [ default 0 ]
+ The minimum length of the input list
+
+ maxlen : Int [ default sys.maxsize ]
+ The maximum length of the input list
+ """
super(Set, self).__init__(trait, default_value, minlen, maxlen, **kwargs)
-
-
-class Tuple(Container):
- """An instance of a Python tuple."""
- klass = tuple
- _cast_types = (list,)
-
+
+
+class Tuple(Container):
+ """An instance of a Python tuple."""
+ klass = tuple
+ _cast_types = (list,)
+
def __init__(self, *traits, **kwargs):
- """Create a tuple from a list, set, or tuple.
-
- Create a fixed-type tuple with Traits:
-
- ``t = Tuple(Int(), Str(), CStr())``
-
- would be length 3, with Int,Str,CStr for each element.
-
- If only one arg is given and it is not a Trait, it is taken as
- default_value:
-
- ``t = Tuple((1, 2, 3))``
-
- Otherwise, ``default_value`` *must* be specified by keyword.
-
- Parameters
- ----------
-
- `*traits` : TraitTypes [ optional ]
- the types for restricting the contents of the Tuple. If unspecified,
- types are not checked. If specified, then each positional argument
- corresponds to an element of the tuple. Tuples defined with traits
- are of fixed length.
-
- default_value : SequenceType [ optional ]
- The default value for the Tuple. Must be list/tuple/set, and
- will be cast to a tuple. If ``traits`` are specified,
- ``default_value`` must conform to the shape and type they specify.
- """
+ """Create a tuple from a list, set, or tuple.
+
+ Create a fixed-type tuple with Traits:
+
+ ``t = Tuple(Int(), Str(), CStr())``
+
+ would be length 3, with Int,Str,CStr for each element.
+
+ If only one arg is given and it is not a Trait, it is taken as
+ default_value:
+
+ ``t = Tuple((1, 2, 3))``
+
+ Otherwise, ``default_value`` *must* be specified by keyword.
+
+ Parameters
+ ----------
+
+ `*traits` : TraitTypes [ optional ]
+ the types for restricting the contents of the Tuple. If unspecified,
+ types are not checked. If specified, then each positional argument
+ corresponds to an element of the tuple. Tuples defined with traits
+ are of fixed length.
+
+ default_value : SequenceType [ optional ]
+ The default value for the Tuple. Must be list/tuple/set, and
+ will be cast to a tuple. If ``traits`` are specified,
+ ``default_value`` must conform to the shape and type they specify.
+ """
default_value = kwargs.pop('default_value', Undefined)
- # allow Tuple((values,)):
- if len(traits) == 1 and default_value is Undefined and not is_trait(traits[0]):
- default_value = traits[0]
- traits = ()
-
- if default_value is Undefined:
- args = ()
- elif isinstance(default_value, self._valid_defaults):
- args = (default_value,)
- else:
- raise TypeError('default value of %s was %s' %(self.__class__.__name__, default_value))
-
- self._traits = []
- for trait in traits:
- if isinstance(trait, type):
+ # allow Tuple((values,)):
+ if len(traits) == 1 and default_value is Undefined and not is_trait(traits[0]):
+ default_value = traits[0]
+ traits = ()
+
+ if default_value is Undefined:
+ args = ()
+ elif isinstance(default_value, self._valid_defaults):
+ args = (default_value,)
+ else:
+ raise TypeError('default value of %s was %s' %(self.__class__.__name__, default_value))
+
+ self._traits = []
+ for trait in traits:
+ if isinstance(trait, type):
warn("Traits should be given as instances, not types (for example, `Int()`, not `Int`)"
" Passing types is deprecated in traitlets 4.1.",
- DeprecationWarning, stacklevel=2)
- t = trait() if isinstance(trait, type) else trait
- self._traits.append(t)
-
- if self._traits and default_value is None:
- # don't allow default to be an empty container if length is specified
- args = None
+ DeprecationWarning, stacklevel=2)
+ t = trait() if isinstance(trait, type) else trait
+ self._traits.append(t)
+
+ if self._traits and default_value is None:
+ # don't allow default to be an empty container if length is specified
+ args = None
super(Container,self).__init__(klass=self.klass, args=args, **kwargs)
-
- def validate_elements(self, obj, value):
- if not self._traits:
- # nothing to validate
- return value
- if len(value) != len(self._traits):
- e = "The '%s' trait of %s instance requires %i elements, but a value of %s was specified." \
- % (self.name, class_of(obj), len(self._traits), repr_type(value))
- raise TraitError(e)
-
- validated = []
- for t, v in zip(self._traits, value):
- try:
- v = t._validate(obj, v)
- except TraitError:
- self.element_error(obj, v, t)
- else:
- validated.append(v)
- return tuple(validated)
-
- def class_init(self, cls, name):
- for trait in self._traits:
- if isinstance(trait, TraitType):
- trait.class_init(cls, None)
- super(Container, self).class_init(cls, name)
-
- def instance_init(self, obj):
- for trait in self._traits:
- if isinstance(trait, TraitType):
- trait.instance_init(obj)
- super(Container, self).instance_init(obj)
-
-
-class Dict(Instance):
- """An instance of a Python dict."""
- _trait = None
-
- def __init__(self, trait=None, traits=None, default_value=Undefined,
+
+ def validate_elements(self, obj, value):
+ if not self._traits:
+ # nothing to validate
+ return value
+ if len(value) != len(self._traits):
+ e = "The '%s' trait of %s instance requires %i elements, but a value of %s was specified." \
+ % (self.name, class_of(obj), len(self._traits), repr_type(value))
+ raise TraitError(e)
+
+ validated = []
+ for t, v in zip(self._traits, value):
+ try:
+ v = t._validate(obj, v)
+ except TraitError:
+ self.element_error(obj, v, t)
+ else:
+ validated.append(v)
+ return tuple(validated)
+
+ def class_init(self, cls, name):
+ for trait in self._traits:
+ if isinstance(trait, TraitType):
+ trait.class_init(cls, None)
+ super(Container, self).class_init(cls, name)
+
+ def instance_init(self, obj):
+ for trait in self._traits:
+ if isinstance(trait, TraitType):
+ trait.instance_init(obj)
+ super(Container, self).instance_init(obj)
+
+
+class Dict(Instance):
+ """An instance of a Python dict."""
+ _trait = None
+
+ def __init__(self, trait=None, traits=None, default_value=Undefined,
**kwargs):
"""Create a dict trait type from a Python dict.
-
- The default value is created by doing ``dict(default_value)``,
- which creates a copy of the ``default_value``.
-
+
+ The default value is created by doing ``dict(default_value)``,
+ which creates a copy of the ``default_value``.
+
Parameters
----------
- trait : TraitType [ optional ]
+ trait : TraitType [ optional ]
The specified trait type to check and use to restrict contents of
the Container. If unspecified, trait types are not checked.
-
+
traits : Dictionary of trait types [ optional ]
A Python dictionary containing the types that are valid for
restricting the content of the Dict Container for certain keys.
-
- default_value : SequenceType [ optional ]
- The default value for the Dict. Must be dict, tuple, or None, and
- will be cast to a dict if not None. If `trait` is specified, the
- `default_value` must conform to the constraints it specifies.
- """
- # Handling positional arguments
- if default_value is Undefined and trait is not None:
- if not is_trait(trait):
- default_value = trait
- trait = None
-
- # Handling default value
- if default_value is Undefined:
- default_value = {}
- if default_value is None:
- args = None
- elif isinstance(default_value, dict):
- args = (default_value,)
- elif isinstance(default_value, SequenceTypes):
- args = (default_value,)
- else:
- raise TypeError('default value of Dict was %s' % default_value)
-
- # Case where a type of TraitType is provided rather than an instance
- if is_trait(trait):
- if isinstance(trait, type):
+
+ default_value : SequenceType [ optional ]
+ The default value for the Dict. Must be dict, tuple, or None, and
+ will be cast to a dict if not None. If `trait` is specified, the
+ `default_value` must conform to the constraints it specifies.
+ """
+ # Handling positional arguments
+ if default_value is Undefined and trait is not None:
+ if not is_trait(trait):
+ default_value = trait
+ trait = None
+
+ # Handling default value
+ if default_value is Undefined:
+ default_value = {}
+ if default_value is None:
+ args = None
+ elif isinstance(default_value, dict):
+ args = (default_value,)
+ elif isinstance(default_value, SequenceTypes):
+ args = (default_value,)
+ else:
+ raise TypeError('default value of Dict was %s' % default_value)
+
+ # Case where a type of TraitType is provided rather than an instance
+ if is_trait(trait):
+ if isinstance(trait, type):
warn("Traits should be given as instances, not types (for example, `Int()`, not `Int`)"
" Passing types is deprecated in traitlets 4.1.",
- DeprecationWarning, stacklevel=2)
- self._trait = trait() if isinstance(trait, type) else trait
- elif trait is not None:
- raise TypeError("`trait` must be a Trait or None, got %s" % repr_type(trait))
-
- self._traits = traits
-
+ DeprecationWarning, stacklevel=2)
+ self._trait = trait() if isinstance(trait, type) else trait
+ elif trait is not None:
+ raise TypeError("`trait` must be a Trait or None, got %s" % repr_type(trait))
+
+ self._traits = traits
+
super(Dict, self).__init__(klass=dict, args=args, **kwargs)
-
- def element_error(self, obj, element, validator):
- e = "Element of the '%s' trait of %s instance must be %s, but a value of %s was specified." \
- % (self.name, class_of(obj), validator.info(), repr_type(element))
- raise TraitError(e)
-
- def validate(self, obj, value):
- value = super(Dict, self).validate(obj, value)
- if value is None:
- return value
- value = self.validate_elements(obj, value)
- return value
-
- def validate_elements(self, obj, value):
+
+ def element_error(self, obj, element, validator):
+ e = "Element of the '%s' trait of %s instance must be %s, but a value of %s was specified." \
+ % (self.name, class_of(obj), validator.info(), repr_type(element))
+ raise TraitError(e)
+
+ def validate(self, obj, value):
+ value = super(Dict, self).validate(obj, value)
+ if value is None:
+ return value
+ value = self.validate_elements(obj, value)
+ return value
+
+ def validate_elements(self, obj, value):
use_dict = bool(self._traits)
default_to = (self._trait or Any())
if not use_dict and isinstance(default_to, Any):
- return value
+ return value
- validated = {}
- for key in value:
+ validated = {}
+ for key in value:
if use_dict and key in self._traits:
validate_with = self._traits[key]
else:
validate_with = default_to
- try:
+ try:
v = value[key]
if not isinstance(validate_with, Any):
v = validate_with._validate(obj, v)
- except TraitError:
+ except TraitError:
self.element_error(obj, v, validate_with)
- else:
- validated[key] = v
-
- return self.klass(validated)
-
- def class_init(self, cls, name):
- if isinstance(self._trait, TraitType):
- self._trait.class_init(cls, None)
- if self._traits is not None:
- for trait in self._traits.values():
- trait.class_init(cls, None)
- super(Dict, self).class_init(cls, name)
-
- def instance_init(self, obj):
- if isinstance(self._trait, TraitType):
- self._trait.instance_init(obj)
- if self._traits is not None:
- for trait in self._traits.values():
- trait.instance_init(obj)
- super(Dict, self).instance_init(obj)
-
-
-class TCPAddress(TraitType):
- """A trait for an (ip, port) tuple.
-
- This allows for both IPv4 IP addresses as well as hostnames.
- """
-
- default_value = ('127.0.0.1', 0)
- info_text = 'an (ip, port) tuple'
-
- def validate(self, obj, value):
- if isinstance(value, tuple):
- if len(value) == 2:
+ else:
+ validated[key] = v
+
+ return self.klass(validated)
+
+ def class_init(self, cls, name):
+ if isinstance(self._trait, TraitType):
+ self._trait.class_init(cls, None)
+ if self._traits is not None:
+ for trait in self._traits.values():
+ trait.class_init(cls, None)
+ super(Dict, self).class_init(cls, name)
+
+ def instance_init(self, obj):
+ if isinstance(self._trait, TraitType):
+ self._trait.instance_init(obj)
+ if self._traits is not None:
+ for trait in self._traits.values():
+ trait.instance_init(obj)
+ super(Dict, self).instance_init(obj)
+
+
+class TCPAddress(TraitType):
+ """A trait for an (ip, port) tuple.
+
+ This allows for both IPv4 IP addresses as well as hostnames.
+ """
+
+ default_value = ('127.0.0.1', 0)
+ info_text = 'an (ip, port) tuple'
+
+ def validate(self, obj, value):
+ if isinstance(value, tuple):
+ if len(value) == 2:
if isinstance(value[0], six.string_types) and isinstance(value[1], int):
- port = value[1]
- if port >= 0 and port <= 65535:
- return value
- self.error(obj, value)
-
-class CRegExp(TraitType):
- """A casting compiled regular expression trait.
-
- Accepts both strings and compiled regular expressions. The resulting
- attribute will be a compiled regular expression."""
-
- info_text = 'a regular expression'
-
- def validate(self, obj, value):
- try:
- return re.compile(value)
- except:
- self.error(obj, value)
+ port = value[1]
+ if port >= 0 and port <= 65535:
+ return value
+ self.error(obj, value)
+
+class CRegExp(TraitType):
+ """A casting compiled regular expression trait.
+
+ Accepts both strings and compiled regular expressions. The resulting
+ attribute will be a compiled regular expression."""
+
+ info_text = 'a regular expression'
+
+ def validate(self, obj, value):
+ try:
+ return re.compile(value)
+ except:
+ self.error(obj, value)
class UseEnum(TraitType):
diff --git a/contrib/python/traitlets/py2/traitlets/utils/getargspec.py b/contrib/python/traitlets/py2/traitlets/utils/getargspec.py
index 5c79b29549..0a047379fe 100644
--- a/contrib/python/traitlets/py2/traitlets/utils/getargspec.py
+++ b/contrib/python/traitlets/py2/traitlets/utils/getargspec.py
@@ -1,86 +1,86 @@
-# -*- coding: utf-8 -*-
-"""
- getargspec excerpted from:
-
- sphinx.util.inspect
- ~~~~~~~~~~~~~~~~~~~
- Helpers for inspecting Python modules.
- :copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import inspect
+# -*- coding: utf-8 -*-
+"""
+ getargspec excerpted from:
+
+ sphinx.util.inspect
+ ~~~~~~~~~~~~~~~~~~~
+ Helpers for inspecting Python modules.
+ :copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import inspect
from six import PY3
-
-# Unmodified from sphinx below this line
-
-if PY3:
- from functools import partial
-
- def getargspec(func):
- """Like inspect.getargspec but supports functools.partial as well."""
- if inspect.ismethod(func):
- func = func.__func__
- if type(func) is partial:
- orig_func = func.func
- argspec = getargspec(orig_func)
- args = list(argspec[0])
- defaults = list(argspec[3] or ())
- kwoargs = list(argspec[4])
- kwodefs = dict(argspec[5] or {})
- if func.args:
- args = args[len(func.args):]
- for arg in func.keywords or ():
- try:
- i = args.index(arg) - len(args)
- del args[i]
- try:
- del defaults[i]
- except IndexError:
- pass
- except ValueError: # must be a kwonly arg
- i = kwoargs.index(arg)
- del kwoargs[i]
- del kwodefs[arg]
- return inspect.FullArgSpec(args, argspec[1], argspec[2],
- tuple(defaults), kwoargs,
- kwodefs, argspec[6])
- while hasattr(func, '__wrapped__'):
- func = func.__wrapped__
- if not inspect.isfunction(func):
- raise TypeError('%r is not a Python function' % func)
- return inspect.getfullargspec(func)
-
-else: # 2.6, 2.7
- from functools import partial
-
- def getargspec(func):
- """Like inspect.getargspec but supports functools.partial as well."""
- if inspect.ismethod(func):
- func = func.__func__
- parts = 0, ()
- if type(func) is partial:
- keywords = func.keywords
- if keywords is None:
- keywords = {}
- parts = len(func.args), keywords.keys()
- func = func.func
- if not inspect.isfunction(func):
- raise TypeError('%r is not a Python function' % func)
- args, varargs, varkw = inspect.getargs(func.__code__)
- func_defaults = func.__defaults__
- if func_defaults is None:
- func_defaults = []
- else:
- func_defaults = list(func_defaults)
- if parts[0]:
- args = args[parts[0]:]
- if parts[1]:
- for arg in parts[1]:
- i = args.index(arg) - len(args)
- del args[i]
- try:
- del func_defaults[i]
- except IndexError:
- pass
- return inspect.ArgSpec(args, varargs, varkw, func_defaults)
+
+# Unmodified from sphinx below this line
+
+if PY3:
+ from functools import partial
+
+ def getargspec(func):
+ """Like inspect.getargspec but supports functools.partial as well."""
+ if inspect.ismethod(func):
+ func = func.__func__
+ if type(func) is partial:
+ orig_func = func.func
+ argspec = getargspec(orig_func)
+ args = list(argspec[0])
+ defaults = list(argspec[3] or ())
+ kwoargs = list(argspec[4])
+ kwodefs = dict(argspec[5] or {})
+ if func.args:
+ args = args[len(func.args):]
+ for arg in func.keywords or ():
+ try:
+ i = args.index(arg) - len(args)
+ del args[i]
+ try:
+ del defaults[i]
+ except IndexError:
+ pass
+ except ValueError: # must be a kwonly arg
+ i = kwoargs.index(arg)
+ del kwoargs[i]
+ del kwodefs[arg]
+ return inspect.FullArgSpec(args, argspec[1], argspec[2],
+ tuple(defaults), kwoargs,
+ kwodefs, argspec[6])
+ while hasattr(func, '__wrapped__'):
+ func = func.__wrapped__
+ if not inspect.isfunction(func):
+ raise TypeError('%r is not a Python function' % func)
+ return inspect.getfullargspec(func)
+
+else: # 2.6, 2.7
+ from functools import partial
+
+ def getargspec(func):
+ """Like inspect.getargspec but supports functools.partial as well."""
+ if inspect.ismethod(func):
+ func = func.__func__
+ parts = 0, ()
+ if type(func) is partial:
+ keywords = func.keywords
+ if keywords is None:
+ keywords = {}
+ parts = len(func.args), keywords.keys()
+ func = func.func
+ if not inspect.isfunction(func):
+ raise TypeError('%r is not a Python function' % func)
+ args, varargs, varkw = inspect.getargs(func.__code__)
+ func_defaults = func.__defaults__
+ if func_defaults is None:
+ func_defaults = []
+ else:
+ func_defaults = list(func_defaults)
+ if parts[0]:
+ args = args[parts[0]:]
+ if parts[1]:
+ for arg in parts[1]:
+ i = args.index(arg) - len(args)
+ del args[i]
+ try:
+ del func_defaults[i]
+ except IndexError:
+ pass
+ return inspect.ArgSpec(args, varargs, varkw, func_defaults)
diff --git a/contrib/python/traitlets/py2/traitlets/utils/importstring.py b/contrib/python/traitlets/py2/traitlets/utils/importstring.py
index 873ab9635c..5b4f643f41 100644
--- a/contrib/python/traitlets/py2/traitlets/utils/importstring.py
+++ b/contrib/python/traitlets/py2/traitlets/utils/importstring.py
@@ -1,42 +1,42 @@
-# encoding: utf-8
-"""
-A simple utility to import something by its string name.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
+# encoding: utf-8
+"""
+A simple utility to import something by its string name.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
from ipython_genutils.py3compat import cast_bytes_py2
from six import string_types
-
-def import_item(name):
- """Import and return ``bar`` given the string ``foo.bar``.
-
- Calling ``bar = import_item("foo.bar")`` is the functional equivalent of
- executing the code ``from foo import bar``.
-
- Parameters
- ----------
- name : string
- The fully qualified name of the module/package being imported.
-
- Returns
- -------
- mod : module object
- The module that was imported.
- """
- if not isinstance(name, string_types):
- raise TypeError("import_item accepts strings, not '%s'." % type(name))
- name = cast_bytes_py2(name)
- parts = name.rsplit('.', 1)
- if len(parts) == 2:
- # called with 'foo.bar....'
- package, obj = parts
- module = __import__(package, fromlist=[obj])
- try:
- pak = getattr(module, obj)
- except AttributeError:
- raise ImportError('No module named %s' % obj)
- return pak
- else:
- # called with un-dotted string
- return __import__(parts[0])
+
+def import_item(name):
+ """Import and return ``bar`` given the string ``foo.bar``.
+
+ Calling ``bar = import_item("foo.bar")`` is the functional equivalent of
+ executing the code ``from foo import bar``.
+
+ Parameters
+ ----------
+ name : string
+ The fully qualified name of the module/package being imported.
+
+ Returns
+ -------
+ mod : module object
+ The module that was imported.
+ """
+ if not isinstance(name, string_types):
+ raise TypeError("import_item accepts strings, not '%s'." % type(name))
+ name = cast_bytes_py2(name)
+ parts = name.rsplit('.', 1)
+ if len(parts) == 2:
+ # called with 'foo.bar....'
+ package, obj = parts
+ module = __import__(package, fromlist=[obj])
+ try:
+ pak = getattr(module, obj)
+ except AttributeError:
+ raise ImportError('No module named %s' % obj)
+ return pak
+ else:
+ # called with un-dotted string
+ return __import__(parts[0])
diff --git a/contrib/python/traitlets/py2/traitlets/utils/sentinel.py b/contrib/python/traitlets/py2/traitlets/utils/sentinel.py
index 7af2558c1a..dc57a2591c 100644
--- a/contrib/python/traitlets/py2/traitlets/utils/sentinel.py
+++ b/contrib/python/traitlets/py2/traitlets/utils/sentinel.py
@@ -1,17 +1,17 @@
-"""Sentinel class for constants with useful reprs"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-class Sentinel(object):
-
- def __init__(self, name, module, docstring=None):
- self.name = name
- self.module = module
- if docstring:
- self.__doc__ = docstring
-
-
- def __repr__(self):
- return str(self.module)+'.'+self.name
-
+"""Sentinel class for constants with useful reprs"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+class Sentinel(object):
+
+ def __init__(self, name, module, docstring=None):
+ self.name = name
+ self.module = module
+ if docstring:
+ self.__doc__ = docstring
+
+
+ def __repr__(self):
+ return str(self.module)+'.'+self.name
+
diff --git a/contrib/python/traitlets/py2/ya.make b/contrib/python/traitlets/py2/ya.make
index ae62d588b4..4a60107101 100644
--- a/contrib/python/traitlets/py2/ya.make
+++ b/contrib/python/traitlets/py2/ya.make
@@ -1,49 +1,49 @@
# Generated by devtools/yamaker (pypi).
PY2_LIBRARY()
-
+
PROVIDES(python_traitlets)
OWNER(borman nslus g:python-contrib)
VERSION(4.3.3)
-
+
LICENSE(BSD-3-Clause)
-PEERDIR(
+PEERDIR(
contrib/python/decorator
contrib/python/enum34
contrib/python/ipython-genutils
contrib/python/six
-)
-
+)
+
NO_LINT()
-PY_SRCS(
- TOP_LEVEL
- traitlets/__init__.py
- traitlets/_version.py
- traitlets/config/__init__.py
- traitlets/config/application.py
- traitlets/config/configurable.py
- traitlets/config/loader.py
- traitlets/config/manager.py
- traitlets/log.py
- traitlets/traitlets.py
- traitlets/utils/__init__.py
+PY_SRCS(
+ TOP_LEVEL
+ traitlets/__init__.py
+ traitlets/_version.py
+ traitlets/config/__init__.py
+ traitlets/config/application.py
+ traitlets/config/configurable.py
+ traitlets/config/loader.py
+ traitlets/config/manager.py
+ traitlets/log.py
+ traitlets/traitlets.py
+ traitlets/utils/__init__.py
traitlets/utils/bunch.py
- traitlets/utils/getargspec.py
- traitlets/utils/importstring.py
- traitlets/utils/sentinel.py
-)
-
+ traitlets/utils/getargspec.py
+ traitlets/utils/importstring.py
+ traitlets/utils/sentinel.py
+)
+
RESOURCE_FILES(
PREFIX contrib/python/traitlets/py2/
.dist-info/METADATA
.dist-info/top_level.txt
)
-END()
+END()
RECURSE_FOR_TESTS(
tests
diff --git a/contrib/python/traitlets/py3/COPYING.md b/contrib/python/traitlets/py3/COPYING.md
index e314a9d376..39ca730a63 100644
--- a/contrib/python/traitlets/py3/COPYING.md
+++ b/contrib/python/traitlets/py3/COPYING.md
@@ -1,62 +1,62 @@
-# Licensing terms
-
-Traitlets is adapted from enthought.traits, Copyright (c) Enthought, Inc.,
-under the terms of the Modified BSD License.
-
-This project is licensed under the terms of the Modified BSD License
-(also known as New or Revised or 3-Clause BSD), as follows:
-
-- Copyright (c) 2001-, IPython Development Team
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-Redistributions of source code must retain the above copyright notice, this
-list of conditions and the following disclaimer.
-
-Redistributions in binary form must reproduce the above copyright notice, this
-list of conditions and the following disclaimer in the documentation and/or
-other materials provided with the distribution.
-
-Neither the name of the IPython Development Team nor the names of its
-contributors may be used to endorse or promote products derived from this
-software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-## About the IPython Development Team
-
-The IPython Development Team is the set of all contributors to the IPython project.
-This includes all of the IPython subprojects.
-
-The core team that coordinates development on GitHub can be found here:
-https://github.com/jupyter/.
-
-## Our Copyright Policy
-
-IPython uses a shared copyright model. Each contributor maintains copyright
-over their contributions to IPython. But, it is important to note that these
-contributions are typically only changes to the repositories. Thus, the IPython
-source code, in its entirety is not the copyright of any single person or
-institution. Instead, it is the collective copyright of the entire IPython
-Development Team. If individual contributors want to maintain a record of what
-changes/contributions they have specific copyright on, they should indicate
-their copyright in the commit message of the change, when they commit the
-change to one of the IPython repositories.
-
-With this in mind, the following banner should be used in any source code file
-to indicate the copyright and license terms:
-
- # Copyright (c) IPython Development Team.
- # Distributed under the terms of the Modified BSD License.
+# Licensing terms
+
+Traitlets is adapted from enthought.traits, Copyright (c) Enthought, Inc.,
+under the terms of the Modified BSD License.
+
+This project is licensed under the terms of the Modified BSD License
+(also known as New or Revised or 3-Clause BSD), as follows:
+
+- Copyright (c) 2001-, IPython Development Team
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+Neither the name of the IPython Development Team nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+## About the IPython Development Team
+
+The IPython Development Team is the set of all contributors to the IPython project.
+This includes all of the IPython subprojects.
+
+The core team that coordinates development on GitHub can be found here:
+https://github.com/jupyter/.
+
+## Our Copyright Policy
+
+IPython uses a shared copyright model. Each contributor maintains copyright
+over their contributions to IPython. But, it is important to note that these
+contributions are typically only changes to the repositories. Thus, the IPython
+source code, in its entirety is not the copyright of any single person or
+institution. Instead, it is the collective copyright of the entire IPython
+Development Team. If individual contributors want to maintain a record of what
+changes/contributions they have specific copyright on, they should indicate
+their copyright in the commit message of the change, when they commit the
+change to one of the IPython repositories.
+
+With this in mind, the following banner should be used in any source code file
+to indicate the copyright and license terms:
+
+ # Copyright (c) IPython Development Team.
+ # Distributed under the terms of the Modified BSD License.
diff --git a/contrib/python/traitlets/py3/traitlets/__init__.py b/contrib/python/traitlets/py3/traitlets/__init__.py
index 1b8675a879..ad5ba73c86 100644
--- a/contrib/python/traitlets/py3/traitlets/__init__.py
+++ b/contrib/python/traitlets/py3/traitlets/__init__.py
@@ -1,11 +1,11 @@
from warnings import warn
from . import traitlets
-from .traitlets import *
-from .utils.importstring import import_item
+from .traitlets import *
+from .utils.importstring import import_item
from .utils.decorators import signature_has_traits
from .utils.bunch import Bunch
-from ._version import version_info, __version__
+from ._version import version_info, __version__
class Sentinel(traitlets.Sentinel):
diff --git a/contrib/python/traitlets/py3/traitlets/config/__init__.py b/contrib/python/traitlets/py3/traitlets/config/__init__.py
index 1531ee5930..0ae7d63171 100644
--- a/contrib/python/traitlets/py3/traitlets/config/__init__.py
+++ b/contrib/python/traitlets/py3/traitlets/config/__init__.py
@@ -1,8 +1,8 @@
-# encoding: utf-8
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from .application import *
-from .configurable import *
-from .loader import Config
+# encoding: utf-8
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+from .application import *
+from .configurable import *
+from .loader import Config
diff --git a/contrib/python/traitlets/py3/traitlets/config/application.py b/contrib/python/traitlets/py3/traitlets/config/application.py
index 6cdb801008..99a6ef7ee0 100644
--- a/contrib/python/traitlets/py3/traitlets/config/application.py
+++ b/contrib/python/traitlets/py3/traitlets/config/application.py
@@ -1,68 +1,68 @@
-"""A base class for a configurable application."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-
+"""A base class for a configurable application."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
from collections import defaultdict, OrderedDict
from copy import deepcopy
import functools
-import json
-import logging
-import os
+import json
+import logging
+import os
import pprint
-import re
-import sys
+import re
+import sys
import warnings
-
-from traitlets.config.configurable import Configurable, SingletonConfigurable
-from traitlets.config.loader import (
- KVArgParseConfigLoader, PyFileConfigLoader, Config, ArgumentError, ConfigFileNotFound, JSONFileConfigLoader
-)
-from traitlets.traitlets import (
+
+from traitlets.config.configurable import Configurable, SingletonConfigurable
+from traitlets.config.loader import (
+ KVArgParseConfigLoader, PyFileConfigLoader, Config, ArgumentError, ConfigFileNotFound, JSONFileConfigLoader
+)
+from traitlets.traitlets import (
Bool, Unicode, List, Enum, Dict, Instance, TraitError, observe, observe_compat, default,
-)
+)
from ..utils.importstring import import_item
from ..utils import cast_unicode
from traitlets.utils.text import indent, wrap_paragraphs
from textwrap import dedent
-
-#-----------------------------------------------------------------------------
-# Descriptions for the various sections
-#-----------------------------------------------------------------------------
-# merge flags&aliases into options
-option_description = """
+
+#-----------------------------------------------------------------------------
+# Descriptions for the various sections
+#-----------------------------------------------------------------------------
+# merge flags&aliases into options
+option_description = """
The options below are convenience aliases to configurable class-options,
as listed in the "Equivalent to" description-line of the aliases.
To see all configurable class-options for some <cmd>, use:
<cmd> --help-all
""".strip() # trim newlines of front and back
-
-keyvalue_description = """
+
+keyvalue_description = """
The command-line option below sets the respective configurable class-parameter:
--Class.parameter=value
This line is evaluated in Python, so simple expressions are allowed.
For instance, to set `C.a=[0,1,2]`, you may type this:
--C.a='range(3)'
-""".strip() # trim newlines of front and back
-
-# sys.argv can be missing, for example when python is embedded. See the docs
-# for details: http://docs.python.org/2/c-api/intro.html#embedding-python
-if not hasattr(sys, "argv"):
- sys.argv = [""]
-
-subcommand_description = """
-Subcommands are launched as `{app} cmd [args]`. For information on using
-subcommand 'cmd', do: `{app} cmd -h`.
-"""
-# get running program name
-
-#-----------------------------------------------------------------------------
-# Application class
-#-----------------------------------------------------------------------------
-
+""".strip() # trim newlines of front and back
+
+# sys.argv can be missing, for example when python is embedded. See the docs
+# for details: http://docs.python.org/2/c-api/intro.html#embedding-python
+if not hasattr(sys, "argv"):
+ sys.argv = [""]
+
+subcommand_description = """
+Subcommands are launched as `{app} cmd [args]`. For information on using
+subcommand 'cmd', do: `{app} cmd -h`.
+"""
+# get running program name
+
+#-----------------------------------------------------------------------------
+# Application class
+#-----------------------------------------------------------------------------
+
_envvar = os.environ.get('TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR','')
@@ -75,13 +75,13 @@ else:
def catch_config_error(method):
- """Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
-
- On a TraitError (generally caused by bad config), this will print the trait's
- message, and exit the app.
+ """Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
+
+ On a TraitError (generally caused by bad config), this will print the trait's
+ message, and exit the app.
- For use on init methods, to prevent invoking excepthook on invalid input.
- """
+ For use on init methods, to prevent invoking excepthook on invalid input.
+ """
@functools.wraps(method)
def inner(app, *args, **kwargs):
try:
@@ -90,116 +90,116 @@ def catch_config_error(method):
app.log.fatal("Bad config encountered during initialization: %s", e)
app.log.debug("Config at the time: %s", app.config)
app.exit(1)
-
+
return inner
-
-class ApplicationError(Exception):
- pass
-
-
-class LevelFormatter(logging.Formatter):
- """Formatter with additional `highlevel` record
-
- This field is empty if log level is less than highlevel_limit,
- otherwise it is formatted with self.highlevel_format.
-
- Useful for adding 'WARNING' to warning messages,
- without adding 'INFO' to info, etc.
- """
- highlevel_limit = logging.WARN
- highlevel_format = " %(levelname)s |"
-
- def format(self, record):
- if record.levelno >= self.highlevel_limit:
- record.highlevel = self.highlevel_format % record.__dict__
- else:
- record.highlevel = ""
- return super(LevelFormatter, self).format(record)
-
-
-class Application(SingletonConfigurable):
- """A singleton application with full configuration support."""
-
- # The name of the application, will usually match the name of the command
- # line application
+
+class ApplicationError(Exception):
+ pass
+
+
+class LevelFormatter(logging.Formatter):
+ """Formatter with additional `highlevel` record
+
+ This field is empty if log level is less than highlevel_limit,
+ otherwise it is formatted with self.highlevel_format.
+
+ Useful for adding 'WARNING' to warning messages,
+ without adding 'INFO' to info, etc.
+ """
+ highlevel_limit = logging.WARN
+ highlevel_format = " %(levelname)s |"
+
+ def format(self, record):
+ if record.levelno >= self.highlevel_limit:
+ record.highlevel = self.highlevel_format % record.__dict__
+ else:
+ record.highlevel = ""
+ return super(LevelFormatter, self).format(record)
+
+
+class Application(SingletonConfigurable):
+ """A singleton application with full configuration support."""
+
+ # The name of the application, will usually match the name of the command
+ # line application
name = Unicode('application')
-
- # The description of the application that is printed at the beginning
- # of the help.
+
+ # The description of the application that is printed at the beginning
+ # of the help.
description = Unicode('This is an application.')
- # default section descriptions
- option_description = Unicode(option_description)
- keyvalue_description = Unicode(keyvalue_description)
- subcommand_description = Unicode(subcommand_description)
-
- python_config_loader_class = PyFileConfigLoader
- json_config_loader_class = JSONFileConfigLoader
-
- # The usage and example string that goes at the end of the help string.
- examples = Unicode()
-
- # A sequence of Configurable subclasses whose config=True attributes will
- # be exposed at the command line.
- classes = []
-
+ # default section descriptions
+ option_description = Unicode(option_description)
+ keyvalue_description = Unicode(keyvalue_description)
+ subcommand_description = Unicode(subcommand_description)
+
+ python_config_loader_class = PyFileConfigLoader
+ json_config_loader_class = JSONFileConfigLoader
+
+ # The usage and example string that goes at the end of the help string.
+ examples = Unicode()
+
+ # A sequence of Configurable subclasses whose config=True attributes will
+ # be exposed at the command line.
+ classes = []
+
def _classes_inc_parents(self, classes=None):
- """Iterate through configurable classes, including configurable parents
-
+ """Iterate through configurable classes, including configurable parents
+
:param classes:
The list of classes to iterate; if not set, uses :attr:`classes`.
- Children should always be after parents, and each class should only be
- yielded once.
- """
+ Children should always be after parents, and each class should only be
+ yielded once.
+ """
if classes is None:
classes = self.classes
- seen = set()
+ seen = set()
for c in classes:
- # We want to sort parents before children, so we reverse the MRO
- for parent in reversed(c.mro()):
- if issubclass(parent, Configurable) and (parent not in seen):
- seen.add(parent)
- yield parent
-
- # The version string of this application.
+ # We want to sort parents before children, so we reverse the MRO
+ for parent in reversed(c.mro()):
+ if issubclass(parent, Configurable) and (parent not in seen):
+ seen.add(parent)
+ yield parent
+
+ # The version string of this application.
version = Unicode('0.0')
- # the argv used to initialize the application
- argv = List()
-
+ # the argv used to initialize the application
+ argv = List()
+
# Whether failing to load config files should prevent startup
raise_config_file_errors = Bool(TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR)
- # The log level for the application
- log_level = Enum((0,10,20,30,40,50,'DEBUG','INFO','WARN','ERROR','CRITICAL'),
- default_value=logging.WARN,
- help="Set the log level by value or name.").tag(config=True)
-
- @observe('log_level')
- @observe_compat
- def _log_level_changed(self, change):
- """Adjust the log level when log_level is set."""
+ # The log level for the application
+ log_level = Enum((0,10,20,30,40,50,'DEBUG','INFO','WARN','ERROR','CRITICAL'),
+ default_value=logging.WARN,
+ help="Set the log level by value or name.").tag(config=True)
+
+ @observe('log_level')
+ @observe_compat
+ def _log_level_changed(self, change):
+ """Adjust the log level when log_level is set."""
new = change.new
if isinstance(new, str):
- new = getattr(logging, new)
- self.log_level = new
- self.log.setLevel(new)
+ new = getattr(logging, new)
+ self.log_level = new
+ self.log.setLevel(new)
- _log_formatter_cls = LevelFormatter
+ _log_formatter_cls = LevelFormatter
log_datefmt = Unicode("%Y-%m-%d %H:%M:%S",
- help="The date format used by logging formatters for %(asctime)s"
- ).tag(config=True)
-
- log_format = Unicode("[%(name)s]%(highlevel)s %(message)s",
- help="The Logging format template",
- ).tag(config=True)
-
- @observe('log_datefmt', 'log_format')
- @observe_compat
- def _log_format_changed(self, change):
- """Change the log formatter when log_format is set."""
+ help="The date format used by logging formatters for %(asctime)s"
+ ).tag(config=True)
+
+ log_format = Unicode("[%(name)s]%(highlevel)s %(message)s",
+ help="The Logging format template",
+ ).tag(config=True)
+
+ @observe('log_datefmt', 'log_format')
+ @observe_compat
+ def _log_format_changed(self, change):
+ """Change the log formatter when log_format is set."""
_log_handler = self._get_log_handler()
if not _log_handler:
warnings.warn(
@@ -207,48 +207,48 @@ class Application(SingletonConfigurable):
RuntimeWarning,
)
return
- _log_formatter = self._log_formatter_cls(fmt=self.log_format, datefmt=self.log_datefmt)
- _log_handler.setFormatter(_log_formatter)
-
- @default('log')
- def _log_default(self):
- """Start logging for this application.
-
- The default is to log to stderr using a StreamHandler, if no default
- handler already exists. The log level starts at logging.WARN, but this
- can be adjusted by setting the ``log_level`` attribute.
- """
- log = logging.getLogger(self.__class__.__name__)
- log.setLevel(self.log_level)
- log.propagate = False
- _log = log # copied from Logger.hasHandlers() (new in Python 3.2)
- while _log:
- if _log.handlers:
- return log
- if not _log.propagate:
- break
- else:
- _log = _log.parent
+ _log_formatter = self._log_formatter_cls(fmt=self.log_format, datefmt=self.log_datefmt)
+ _log_handler.setFormatter(_log_formatter)
+
+ @default('log')
+ def _log_default(self):
+ """Start logging for this application.
+
+ The default is to log to stderr using a StreamHandler, if no default
+ handler already exists. The log level starts at logging.WARN, but this
+ can be adjusted by setting the ``log_level`` attribute.
+ """
+ log = logging.getLogger(self.__class__.__name__)
+ log.setLevel(self.log_level)
+ log.propagate = False
+ _log = log # copied from Logger.hasHandlers() (new in Python 3.2)
+ while _log:
+ if _log.handlers:
+ return log
+ if not _log.propagate:
+ break
+ else:
+ _log = _log.parent
if sys.executable and sys.executable.endswith('pythonw.exe'):
- # this should really go to a file, but file-logging is only
- # hooked up in parallel applications
- _log_handler = logging.StreamHandler(open(os.devnull, 'w'))
- else:
- _log_handler = logging.StreamHandler()
- _log_formatter = self._log_formatter_cls(fmt=self.log_format, datefmt=self.log_datefmt)
- _log_handler.setFormatter(_log_formatter)
- log.addHandler(_log_handler)
- return log
-
+ # this should really go to a file, but file-logging is only
+ # hooked up in parallel applications
+ _log_handler = logging.StreamHandler(open(os.devnull, 'w'))
+ else:
+ _log_handler = logging.StreamHandler()
+ _log_formatter = self._log_formatter_cls(fmt=self.log_format, datefmt=self.log_datefmt)
+ _log_handler.setFormatter(_log_formatter)
+ log.addHandler(_log_handler)
+ return log
+
#: the alias map for configurables
#: Keys might strings or tuples for additional options; single-letter alias accessed like `-v`.
#: Values might be like "Class.trait" strings of two-tuples: (Class.trait, help-text).
aliases = {'log-level' : 'Application.log_level'}
-
- # flags for loading Configurables or store_const style flags
- # flags are loaded from this dict by '--key' flags
- # this must be a dict of two-tuples, the first element being the Config/dict
- # and the second being the help string for the flag
+
+ # flags for loading Configurables or store_const style flags
+ # flags are loaded from this dict by '--key' flags
+ # this must be a dict of two-tuples, the first element being the Config/dict
+ # and the second being the help string for the flag
flags = {
'debug': ({
'Application': {
@@ -266,22 +266,22 @@ class Application(SingletonConfigurable):
},
}, "Show the application's configuration (json format)"),
}
-
- # subcommands for launching other applications
- # if this is not empty, this will be a parent Application
- # this must be a dict of two-tuples,
- # the first element being the application class/import string
- # and the second being the help string for the subcommand
- subcommands = Dict()
- # parse_command_line will initialize a subapp, if requested
- subapp = Instance('traitlets.config.application.Application', allow_none=True)
-
- # extra command-line arguments that don't set config values
- extra_args = List(Unicode())
-
+
+ # subcommands for launching other applications
+ # if this is not empty, this will be a parent Application
+ # this must be a dict of two-tuples,
+ # the first element being the application class/import string
+ # and the second being the help string for the subcommand
+ subcommands = Dict()
+ # parse_command_line will initialize a subapp, if requested
+ subapp = Instance('traitlets.config.application.Application', allow_none=True)
+
+ # extra command-line arguments that don't set config values
+ extra_args = List(Unicode())
+
cli_config = Instance(Config, (), {},
help="""The subset of our configuration that came from the command-line
-
+
We re-load this configuration after loading config files,
to ensure that it maintains highest priority.
"""
@@ -307,10 +307,10 @@ class Application(SingletonConfigurable):
self._save_start = self.start
self.start = self.start_show_config
- def __init__(self, **kwargs):
- SingletonConfigurable.__init__(self, **kwargs)
- # Ensure my class is in self.classes, so my attributes appear in command line
- # options and config files.
+ def __init__(self, **kwargs):
+ SingletonConfigurable.__init__(self, **kwargs)
+ # Ensure my class is in self.classes, so my attributes appear in command line
+ # options and config files.
cls = self.__class__
if cls not in self.classes:
if self.classes is cls.classes:
@@ -319,29 +319,29 @@ class Application(SingletonConfigurable):
else:
self.classes.insert(0, self.__class__)
- @observe('config')
- @observe_compat
- def _config_changed(self, change):
- super(Application, self)._config_changed(change)
+ @observe('config')
+ @observe_compat
+ def _config_changed(self, change):
+ super(Application, self)._config_changed(change)
self.log.debug('Config changed: %r', change.new)
-
- @catch_config_error
- def initialize(self, argv=None):
- """Do the basic steps to configure me.
-
- Override in subclasses.
- """
- self.parse_command_line(argv)
-
-
- def start(self):
- """Start the app mainloop.
-
- Override in subclasses.
- """
- if self.subapp is not None:
- return self.subapp.start()
-
+
+ @catch_config_error
+ def initialize(self, argv=None):
+ """Do the basic steps to configure me.
+
+ Override in subclasses.
+ """
+ self.parse_command_line(argv)
+
+
+ def start(self):
+ """Start the app mainloop.
+
+ Override in subclasses.
+ """
+ if self.subapp is not None:
+ return self.subapp.start()
+
def start_show_config(self):
"""start function used when show_config is True"""
config = self.config.copy()
@@ -379,21 +379,21 @@ class Application(SingletonConfigurable):
pprint.pformat(value, **pformat_kwargs),
))
- def print_alias_help(self):
+ def print_alias_help(self):
"""Print the alias parts of the help."""
print('\n'.join(self.emit_alias_help()))
def emit_alias_help(self):
"""Yield the lines for alias part of the help."""
- if not self.aliases:
- return
-
- classdict = {}
- for cls in self.classes:
- # include all parents (up to, but excluding Configurable) in available names
- for c in cls.mro()[:-3]:
- classdict[c.__name__] = c
-
+ if not self.aliases:
+ return
+
+ classdict = {}
+ for cls in self.classes:
+ # include all parents (up to, but excluding Configurable) in available names
+ for c in cls.mro()[:-3]:
+ classdict[c.__name__] = c
+
for alias, longname in self.aliases.items():
try:
if isinstance(longname, tuple):
@@ -403,10 +403,10 @@ class Application(SingletonConfigurable):
classname, traitname = longname.split('.')[-2:]
longname = classname + '.' + traitname
cls = classdict[classname]
-
+
trait = cls.class_traits(config=True)[traitname]
fhelp = cls.class_get_trait_help(trait, helptext=fhelp).splitlines()
-
+
if not isinstance(alias, tuple):
alias = (alias, )
alias = sorted(alias, key=len)
@@ -423,15 +423,15 @@ class Application(SingletonConfigurable):
alias, ex)
raise
- def print_flag_help(self):
- """Print the flag part of the help."""
+ def print_flag_help(self):
+ """Print the flag part of the help."""
print('\n'.join(self.emit_flag_help()))
def emit_flag_help(self):
"""Yield the lines for the flag part of the help."""
- if not self.flags:
- return
-
+ if not self.flags:
+ return
+
for flags, (cfg, fhelp) in self.flags.items():
try:
if not isinstance(flags, tuple):
@@ -451,50 +451,50 @@ class Application(SingletonConfigurable):
self.log.error('Failed collecting help-message for flag %r, due to: %s',
flags, ex)
raise
-
- def print_options(self):
+
+ def print_options(self):
"""Print the options part of the help."""
print('\n'.join(self.emit_options_help()))
def emit_options_help(self):
"""Yield the lines for the options part of the help."""
- if not self.flags and not self.aliases:
- return
+ if not self.flags and not self.aliases:
+ return
header = 'Options'
yield header
yield '=' * len(header)
- for p in wrap_paragraphs(self.option_description):
+ for p in wrap_paragraphs(self.option_description):
yield p
yield ''
-
+
for l in self.emit_flag_help():
yield l
for l in self.emit_alias_help():
yield l
yield ''
- def print_subcommands(self):
- """Print the subcommand part of the help."""
+ def print_subcommands(self):
+ """Print the subcommand part of the help."""
print('\n'.join(self.emit_subcommands_help()))
def emit_subcommands_help(self):
"""Yield the lines for the subcommand part of the help."""
- if not self.subcommands:
- return
-
+ if not self.subcommands:
+ return
+
header = "Subcommands"
yield header
yield '=' * len(header)
- for p in wrap_paragraphs(self.subcommand_description.format(
- app=self.name)):
+ for p in wrap_paragraphs(self.subcommand_description.format(
+ app=self.name)):
yield p
yield ''
for subc, (cls, help) in self.subcommands.items():
yield subc
- if help:
+ if help:
yield indent(dedent(help.strip()))
yield ''
-
+
def emit_help_epilogue(self, classes):
"""Yield the very bottom lines of the help message.
@@ -504,13 +504,13 @@ class Application(SingletonConfigurable):
yield "To see all available configurables, use `--help-all`."
yield ''
- def print_help(self, classes=False):
- """Print the help for each Configurable class in self.classes.
-
- If classes=False (the default), only flags and aliases are printed.
- """
+ def print_help(self, classes=False):
+ """Print the help for each Configurable class in self.classes.
+
+ If classes=False (the default), only flags and aliases are printed.
+ """
print('\n'.join(self.emit_help(classes=classes)))
-
+
def emit_help(self, classes=False):
"""Yield the help-lines for each Configurable class in self.classes.
@@ -523,71 +523,71 @@ class Application(SingletonConfigurable):
for l in self.emit_options_help():
yield l
- if classes:
+ if classes:
help_classes = self._classes_with_config_traits()
- if help_classes:
+ if help_classes:
yield "Class options"
yield "============="
- for p in wrap_paragraphs(self.keyvalue_description):
+ for p in wrap_paragraphs(self.keyvalue_description):
yield p
yield ''
-
- for cls in help_classes:
+
+ for cls in help_classes:
yield cls.class_get_help()
yield ''
for l in self.emit_examples():
yield l
-
+
for l in self.emit_help_epilogue(classes):
yield l
-
- def document_config_options(self):
- """Generate rST format documentation for the config options this application
-
- Returns a multiline string.
- """
- return '\n'.join(c.class_config_rst_doc()
- for c in self._classes_inc_parents())
-
- def print_description(self):
- """Print the application description."""
+
+ def document_config_options(self):
+ """Generate rST format documentation for the config options this application
+
+ Returns a multiline string.
+ """
+ return '\n'.join(c.class_config_rst_doc()
+ for c in self._classes_inc_parents())
+
+ def print_description(self):
+ """Print the application description."""
print('\n'.join(self.emit_description()))
-
+
def emit_description(self):
"""Yield lines with the application description."""
for p in wrap_paragraphs(self.description or self.__doc__):
yield p
yield ''
- def print_examples(self):
+ def print_examples(self):
"""Print usage and examples (see `emit_examples()`). """
print('\n'.join(self.emit_examples()))
-
+
def emit_examples(self):
"""Yield lines with the usage and examples.
- This usage string goes at the end of the command line help string
- and should contain examples of the application's usage.
- """
- if self.examples:
+ This usage string goes at the end of the command line help string
+ and should contain examples of the application's usage.
+ """
+ if self.examples:
yield "Examples"
yield "--------"
yield ''
yield indent(dedent(self.examples.strip()))
yield ''
-
- def print_version(self):
- """Print the version string."""
- print(self.version)
-
- @catch_config_error
- def initialize_subcommand(self, subc, argv=None):
- """Initialize a subcommand with argv."""
+
+ def print_version(self):
+ """Print the version string."""
+ print(self.version)
+
+ @catch_config_error
+ def initialize_subcommand(self, subc, argv=None):
+ """Initialize a subcommand with argv."""
subapp, _ = self.subcommands.get(subc)
-
+
if isinstance(subapp, str):
- subapp = import_item(subapp)
-
+ subapp = import_item(subapp)
+
## Cannot issubclass() on a non-type (SOhttp://stackoverflow.com/questions/8692430)
if isinstance(subapp, type) and issubclass(subapp, Application):
# Clear existing instances before...
@@ -601,55 +601,55 @@ class Application(SingletonConfigurable):
raise AssertionError("Invalid mappings for subcommand '%s'!" % subc)
# ... and finally initialize subapp.
- self.subapp.initialize(argv)
+ self.subapp.initialize(argv)
- def flatten_flags(self):
+ def flatten_flags(self):
"""Flatten flags and aliases for loaders, so cl-args override as expected.
- This prevents issues such as an alias pointing to InteractiveShell,
- but a config file setting the same trait in TerminalInteraciveShell
- getting inappropriate priority over the command-line arg.
+ This prevents issues such as an alias pointing to InteractiveShell,
+ but a config file setting the same trait in TerminalInteraciveShell
+ getting inappropriate priority over the command-line arg.
Also, loaders expect ``(key: longname)`` and not ````key: (longname, help)`` items.
-
- Only aliases with exactly one descendent in the class list
- will be promoted.
-
- """
- # build a tree of classes in our list that inherit from a particular
- # it will be a dict by parent classname of classes in our list
- # that are descendents
- mro_tree = defaultdict(list)
- for cls in self.classes:
- clsname = cls.__name__
- for parent in cls.mro()[1:-3]:
- # exclude cls itself and Configurable,HasTraits,object
- mro_tree[parent.__name__].append(clsname)
- # flatten aliases, which have the form:
- # { 'alias' : 'Class.trait' }
- aliases = {}
+
+ Only aliases with exactly one descendent in the class list
+ will be promoted.
+
+ """
+ # build a tree of classes in our list that inherit from a particular
+ # it will be a dict by parent classname of classes in our list
+ # that are descendents
+ mro_tree = defaultdict(list)
+ for cls in self.classes:
+ clsname = cls.__name__
+ for parent in cls.mro()[1:-3]:
+ # exclude cls itself and Configurable,HasTraits,object
+ mro_tree[parent.__name__].append(clsname)
+ # flatten aliases, which have the form:
+ # { 'alias' : 'Class.trait' }
+ aliases = {}
for alias, longname in self.aliases.items():
if isinstance(longname, tuple):
longname, _ = longname
cls, trait = longname.split('.', 1)
- children = mro_tree[cls]
- if len(children) == 1:
- # exactly one descendent, promote alias
- cls = children[0]
+ children = mro_tree[cls]
+ if len(children) == 1:
+ # exactly one descendent, promote alias
+ cls = children[0]
if not isinstance(aliases, tuple):
alias = (alias, )
for al in alias:
aliases[al] = '.'.join([cls,trait])
- # flatten flags, which are of the form:
- # { 'key' : ({'Cls' : {'trait' : value}}, 'help')}
- flags = {}
+ # flatten flags, which are of the form:
+ # { 'key' : ({'Cls' : {'trait' : value}}, 'help')}
+ flags = {}
for key, (flagdict, help) in self.flags.items():
- newflag = {}
+ newflag = {}
for cls, subdict in flagdict.items():
- children = mro_tree[cls]
- # exactly one descendent, promote flag section
- if len(children) == 1:
- cls = children[0]
+ children = mro_tree[cls]
+ # exactly one descendent, promote flag section
+ if len(children) == 1:
+ cls = children[0]
if cls in newflag:
newflag[cls].update(subdict)
@@ -660,48 +660,48 @@ class Application(SingletonConfigurable):
key = (key, )
for k in key:
flags[k] = (newflag, help)
- return flags, aliases
-
+ return flags, aliases
+
def _create_loader(self, argv, aliases, flags, classes):
return KVArgParseConfigLoader(argv, aliases, flags, classes=classes,
log=self.log)
- @catch_config_error
- def parse_command_line(self, argv=None):
- """Parse the command line arguments."""
+ @catch_config_error
+ def parse_command_line(self, argv=None):
+ """Parse the command line arguments."""
assert not isinstance(argv, str)
- argv = sys.argv[1:] if argv is None else argv
+ argv = sys.argv[1:] if argv is None else argv
self.argv = [cast_unicode(arg) for arg in argv ]
- if argv and argv[0] == 'help':
- # turn `ipython help notebook` into `ipython notebook -h`
- argv = argv[1:] + ['-h']
-
- if self.subcommands and len(argv) > 0:
- # we have subcommands, and one may have been specified
- subc, subargv = argv[0], argv[1:]
- if re.match(r'^\w(\-?\w)*$', subc) and subc in self.subcommands:
- # it's a subcommand, and *not* a flag or class parameter
- return self.initialize_subcommand(subc, subargv)
-
- # Arguments after a '--' argument are for the script IPython may be
- # about to run, not IPython iteslf. For arguments parsed here (help and
- # version), we want to only search the arguments up to the first
- # occurrence of '--', which we're calling interpreted_argv.
- try:
- interpreted_argv = argv[:argv.index('--')]
- except ValueError:
- interpreted_argv = argv
-
- if any(x in interpreted_argv for x in ('-h', '--help-all', '--help')):
- self.print_help('--help-all' in interpreted_argv)
- self.exit(0)
-
- if '--version' in interpreted_argv or '-V' in interpreted_argv:
- self.print_version()
- self.exit(0)
-
- # flatten flags&aliases, so cl-args get appropriate priority:
+ if argv and argv[0] == 'help':
+ # turn `ipython help notebook` into `ipython notebook -h`
+ argv = argv[1:] + ['-h']
+
+ if self.subcommands and len(argv) > 0:
+ # we have subcommands, and one may have been specified
+ subc, subargv = argv[0], argv[1:]
+ if re.match(r'^\w(\-?\w)*$', subc) and subc in self.subcommands:
+ # it's a subcommand, and *not* a flag or class parameter
+ return self.initialize_subcommand(subc, subargv)
+
+ # Arguments after a '--' argument are for the script IPython may be
+ # about to run, not IPython iteslf. For arguments parsed here (help and
+ # version), we want to only search the arguments up to the first
+ # occurrence of '--', which we're calling interpreted_argv.
+ try:
+ interpreted_argv = argv[:argv.index('--')]
+ except ValueError:
+ interpreted_argv = argv
+
+ if any(x in interpreted_argv for x in ('-h', '--help-all', '--help')):
+ self.print_help('--help-all' in interpreted_argv)
+ self.exit(0)
+
+ if '--version' in interpreted_argv or '-V' in interpreted_argv:
+ self.print_version()
+ self.exit(0)
+
+ # flatten flags&aliases, so cl-args get appropriate priority:
flags, aliases = self.flatten_flags()
classes = tuple(self._classes_with_config_traits())
loader = self._create_loader(argv, aliases, flags, classes=classes)
@@ -712,46 +712,46 @@ class Application(SingletonConfigurable):
# help output is huge, and comes after the error
raise
self.update_config(self.cli_config)
- # store unparsed args in extra_args
- self.extra_args = loader.extra_args
-
- @classmethod
+ # store unparsed args in extra_args
+ self.extra_args = loader.extra_args
+
+ @classmethod
def _load_config_files(cls, basefilename, path=None, log=None, raise_config_file_errors=False):
- """Load config files (py,json) by filename and path.
-
- yield each config object in turn.
- """
-
- if not isinstance(path, list):
- path = [path]
- for path in path[::-1]:
- # path list is in descending priority order, so load files backwards:
- pyloader = cls.python_config_loader_class(basefilename+'.py', path=path, log=log)
- if log:
+ """Load config files (py,json) by filename and path.
+
+ yield each config object in turn.
+ """
+
+ if not isinstance(path, list):
+ path = [path]
+ for path in path[::-1]:
+ # path list is in descending priority order, so load files backwards:
+ pyloader = cls.python_config_loader_class(basefilename+'.py', path=path, log=log)
+ if log:
log.debug("Looking for %s in %s", basefilename, path or os.getcwd())
- jsonloader = cls.json_config_loader_class(basefilename+'.json', path=path, log=log)
+ jsonloader = cls.json_config_loader_class(basefilename+'.json', path=path, log=log)
loaded = []
filenames = []
- for loader in [pyloader, jsonloader]:
+ for loader in [pyloader, jsonloader]:
config = None
- try:
- config = loader.load_config()
- except ConfigFileNotFound:
- pass
- except Exception:
- # try to get the full filename, but it will be empty in the
- # unlikely event that the error raised before filefind finished
- filename = loader.full_filename or basefilename
- # problem while running the file
+ try:
+ config = loader.load_config()
+ except ConfigFileNotFound:
+ pass
+ except Exception:
+ # try to get the full filename, but it will be empty in the
+ # unlikely event that the error raised before filefind finished
+ filename = loader.full_filename or basefilename
+ # problem while running the file
if raise_config_file_errors:
raise
- if log:
- log.error("Exception while loading config file %s",
- filename, exc_info=True)
- else:
- if log:
- log.debug("Loaded config file: %s", loader.full_filename)
- if config:
+ if log:
+ log.error("Exception while loading config file %s",
+ filename, exc_info=True)
+ else:
+ if log:
+ log.debug("Loaded config file: %s", loader.full_filename)
+ if config:
for filename, earlier_config in zip(filenames, loaded):
collisions = earlier_config.collisions(config)
if collisions and log:
@@ -762,16 +762,16 @@ class Application(SingletonConfigurable):
yield (config, loader.full_filename)
loaded.append(config)
filenames.append(loader.full_filename)
-
+
@property
def loaded_config_files(self):
"""Currently loaded configuration files"""
return self._loaded_config_files[:]
-
- @catch_config_error
- def load_config_file(self, filename, path=None):
- """Load config files by filename and path."""
- filename, ext = os.path.splitext(filename)
+
+ @catch_config_error
+ def load_config_file(self, filename, path=None):
+ """Load config files by filename and path."""
+ filename, ext = os.path.splitext(filename)
new_config = Config()
for (config, filename) in self._load_config_files(filename, path=path, log=self.log,
raise_config_file_errors=self.raise_config_file_errors,
@@ -782,7 +782,7 @@ class Application(SingletonConfigurable):
# add self.cli_config to preserve CLI config priority
new_config.merge(self.cli_config)
self.update_config(new_config)
-
+
def _classes_with_config_traits(self, classes=None):
"""
Yields only classes with configurable traits, and their subclasses.
@@ -822,76 +822,76 @@ class Application(SingletonConfigurable):
yield cl
def generate_config_file(self, classes=None):
- """generate default config file from Configurables"""
- lines = ["# Configuration file for %s." % self.name]
- lines.append('')
+ """generate default config file from Configurables"""
+ lines = ["# Configuration file for %s." % self.name]
+ lines.append('')
classes = self.classes if classes is None else classes
config_classes = list(self._classes_with_config_traits(classes))
for cls in config_classes:
lines.append(cls.class_config_section(config_classes))
- return '\n'.join(lines)
-
- def exit(self, exit_status=0):
- self.log.debug("Exiting application: %s" % self.name)
- sys.exit(exit_status)
-
- @classmethod
- def launch_instance(cls, argv=None, **kwargs):
- """Launch a global instance of this Application
-
- If a global instance already exists, this reinitializes and starts it
- """
- app = cls.instance(**kwargs)
- app.initialize(argv)
- app.start()
-
-#-----------------------------------------------------------------------------
-# utility functions, for convenience
-#-----------------------------------------------------------------------------
-
+ return '\n'.join(lines)
+
+ def exit(self, exit_status=0):
+ self.log.debug("Exiting application: %s" % self.name)
+ sys.exit(exit_status)
+
+ @classmethod
+ def launch_instance(cls, argv=None, **kwargs):
+ """Launch a global instance of this Application
+
+ If a global instance already exists, this reinitializes and starts it
+ """
+ app = cls.instance(**kwargs)
+ app.initialize(argv)
+ app.start()
+
+#-----------------------------------------------------------------------------
+# utility functions, for convenience
+#-----------------------------------------------------------------------------
+
default_aliases = Application.aliases
default_flags = Application.flags
-def boolean_flag(name, configurable, set_help='', unset_help=''):
- """Helper for building basic --trait, --no-trait flags.
-
- Parameters
- ----------
- name : str
- The name of the flag.
- configurable : str
- The 'Class.trait' string of the trait to be set/unset with the flag
- set_help : unicode
- help string for --name flag
- unset_help : unicode
- help string for --no-name flag
-
- Returns
- -------
- cfg : dict
- A dict with two keys: 'name', and 'no-name', for setting and unsetting
- the trait, respectively.
- """
- # default helpstrings
- set_help = set_help or "set %s=True"%configurable
- unset_help = unset_help or "set %s=False"%configurable
-
- cls,trait = configurable.split('.')
-
- setter = {cls : {trait : True}}
- unsetter = {cls : {trait : False}}
- return {name : (setter, set_help), 'no-'+name : (unsetter, unset_help)}
-
-
-def get_config():
- """Get the config object for the global Application instance, if there is one
-
- otherwise return an empty config object
- """
- if Application.initialized():
- return Application.instance().config
- else:
- return Config()
+def boolean_flag(name, configurable, set_help='', unset_help=''):
+ """Helper for building basic --trait, --no-trait flags.
+
+ Parameters
+ ----------
+ name : str
+ The name of the flag.
+ configurable : str
+ The 'Class.trait' string of the trait to be set/unset with the flag
+ set_help : unicode
+ help string for --name flag
+ unset_help : unicode
+ help string for --no-name flag
+
+ Returns
+ -------
+ cfg : dict
+ A dict with two keys: 'name', and 'no-name', for setting and unsetting
+ the trait, respectively.
+ """
+ # default helpstrings
+ set_help = set_help or "set %s=True"%configurable
+ unset_help = unset_help or "set %s=False"%configurable
+
+ cls,trait = configurable.split('.')
+
+ setter = {cls : {trait : True}}
+ unsetter = {cls : {trait : False}}
+ return {name : (setter, set_help), 'no-'+name : (unsetter, unset_help)}
+
+
+def get_config():
+ """Get the config object for the global Application instance, if there is one
+
+ otherwise return an empty config object
+ """
+ if Application.initialized():
+ return Application.instance().config
+ else:
+ return Config()
if __name__ == '__main__':
diff --git a/contrib/python/traitlets/py3/traitlets/config/configurable.py b/contrib/python/traitlets/py3/traitlets/config/configurable.py
index d6dd93e114..3b2044a01b 100644
--- a/contrib/python/traitlets/py3/traitlets/config/configurable.py
+++ b/contrib/python/traitlets/py3/traitlets/config/configurable.py
@@ -1,13 +1,13 @@
-"""A base class for objects that are configurable."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-
-from copy import deepcopy
+"""A base class for objects that are configurable."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+from copy import deepcopy
import logging
import warnings
-
+
from .loader import Config, LazyConfigValue, DeferredConfig, _is_section_key
from traitlets.traitlets import (
Any,
@@ -22,67 +22,67 @@ from traitlets.traitlets import (
)
from traitlets.utils.text import indent, wrap_paragraphs
from textwrap import dedent
-
-
-
-
-#-----------------------------------------------------------------------------
-# Helper classes for Configurables
-#-----------------------------------------------------------------------------
-
-
-class ConfigurableError(Exception):
- pass
-
-
-class MultipleInstanceError(ConfigurableError):
- pass
-
-#-----------------------------------------------------------------------------
-# Configurable implementation
-#-----------------------------------------------------------------------------
-
-class Configurable(HasTraits):
-
- config = Instance(Config, (), {})
- parent = Instance('traitlets.config.configurable.Configurable', allow_none=True)
-
- def __init__(self, **kwargs):
- """Create a configurable given a config config.
-
- Parameters
- ----------
- config : Config
- If this is empty, default values are used. If config is a
- :class:`Config` instance, it will be used to configure the
- instance.
- parent : Configurable instance, optional
- The parent Configurable instance of this object.
-
- Notes
- -----
- Subclasses of Configurable must call the :meth:`__init__` method of
- :class:`Configurable` *before* doing anything else and using
- :func:`super`::
-
- class MyConfigurable(Configurable):
- def __init__(self, config=None):
- super(MyConfigurable, self).__init__(config=config)
- # Then any other code you need to finish initialization.
-
- This ensures that instances will be configured properly.
- """
- parent = kwargs.pop('parent', None)
- if parent is not None:
- # config is implied from parent
- if kwargs.get('config', None) is None:
- kwargs['config'] = parent.config
- self.parent = parent
-
- config = kwargs.pop('config', None)
-
- # load kwarg traits, other than config
- super(Configurable, self).__init__(**kwargs)
+
+
+
+
+#-----------------------------------------------------------------------------
+# Helper classes for Configurables
+#-----------------------------------------------------------------------------
+
+
+class ConfigurableError(Exception):
+ pass
+
+
+class MultipleInstanceError(ConfigurableError):
+ pass
+
+#-----------------------------------------------------------------------------
+# Configurable implementation
+#-----------------------------------------------------------------------------
+
+class Configurable(HasTraits):
+
+ config = Instance(Config, (), {})
+ parent = Instance('traitlets.config.configurable.Configurable', allow_none=True)
+
+ def __init__(self, **kwargs):
+ """Create a configurable given a config config.
+
+ Parameters
+ ----------
+ config : Config
+ If this is empty, default values are used. If config is a
+ :class:`Config` instance, it will be used to configure the
+ instance.
+ parent : Configurable instance, optional
+ The parent Configurable instance of this object.
+
+ Notes
+ -----
+ Subclasses of Configurable must call the :meth:`__init__` method of
+ :class:`Configurable` *before* doing anything else and using
+ :func:`super`::
+
+ class MyConfigurable(Configurable):
+ def __init__(self, config=None):
+ super(MyConfigurable, self).__init__(config=config)
+ # Then any other code you need to finish initialization.
+
+ This ensures that instances will be configured properly.
+ """
+ parent = kwargs.pop('parent', None)
+ if parent is not None:
+ # config is implied from parent
+ if kwargs.get('config', None) is None:
+ kwargs['config'] = parent.config
+ self.parent = parent
+
+ config = kwargs.pop('config', None)
+
+ # load kwarg traits, other than config
+ super(Configurable, self).__init__(**kwargs)
# record traits set by config
config_override_names = set()
@@ -95,122 +95,122 @@ class Configurable(HasTraits):
config_override_names.add(change.name)
self.observe(notice_config_override)
- # load config
- if config is not None:
- # We used to deepcopy, but for now we are trying to just save
- # by reference. This *could* have side effects as all components
- # will share config. In fact, I did find such a side effect in
- # _config_changed below. If a config attribute value was a mutable type
- # all instances of a component were getting the same copy, effectively
- # making that a class attribute.
- # self.config = deepcopy(config)
- self.config = config
- else:
- # allow _config_default to return something
- self._load_config(self.config)
+ # load config
+ if config is not None:
+ # We used to deepcopy, but for now we are trying to just save
+ # by reference. This *could* have side effects as all components
+ # will share config. In fact, I did find such a side effect in
+ # _config_changed below. If a config attribute value was a mutable type
+ # all instances of a component were getting the same copy, effectively
+ # making that a class attribute.
+ # self.config = deepcopy(config)
+ self.config = config
+ else:
+ # allow _config_default to return something
+ self._load_config(self.config)
self.unobserve(notice_config_override)
for name in config_override_names:
setattr(self, name, kwargs[name])
-
- #-------------------------------------------------------------------------
- # Static trait notifiations
- #-------------------------------------------------------------------------
- @classmethod
- def section_names(cls):
- """return section names as a list"""
- return [c.__name__ for c in reversed(cls.__mro__) if
- issubclass(c, Configurable) and issubclass(cls, c)
- ]
+ #-------------------------------------------------------------------------
+ # Static trait notifiations
+ #-------------------------------------------------------------------------
+
+ @classmethod
+ def section_names(cls):
+ """return section names as a list"""
+ return [c.__name__ for c in reversed(cls.__mro__) if
+ issubclass(c, Configurable) and issubclass(cls, c)
+ ]
- def _find_my_config(self, cfg):
- """extract my config from a global Config object
+ def _find_my_config(self, cfg):
+ """extract my config from a global Config object
- will construct a Config object of only the config values that apply to me
- based on my mro(), as well as those of my parent(s) if they exist.
+ will construct a Config object of only the config values that apply to me
+ based on my mro(), as well as those of my parent(s) if they exist.
- If I am Bar and my parent is Foo, and their parent is Tim,
- this will return merge following config sections, in this order::
+ If I am Bar and my parent is Foo, and their parent is Tim,
+ this will return merge following config sections, in this order::
[Bar, Foo.Bar, Tim.Foo.Bar]
- With the last item being the highest priority.
- """
- cfgs = [cfg]
- if self.parent:
- cfgs.append(self.parent._find_my_config(cfg))
- my_config = Config()
- for c in cfgs:
- for sname in self.section_names():
- # Don't do a blind getattr as that would cause the config to
- # dynamically create the section with name Class.__name__.
- if c._has_section(sname):
- my_config.merge(c[sname])
- return my_config
-
- def _load_config(self, cfg, section_names=None, traits=None):
- """load traits from a Config object"""
-
- if traits is None:
- traits = self.traits(config=True)
- if section_names is None:
- section_names = self.section_names()
-
- my_config = self._find_my_config(cfg)
-
- # hold trait notifications until after all config has been loaded
- with self.hold_trait_notifications():
+ With the last item being the highest priority.
+ """
+ cfgs = [cfg]
+ if self.parent:
+ cfgs.append(self.parent._find_my_config(cfg))
+ my_config = Config()
+ for c in cfgs:
+ for sname in self.section_names():
+ # Don't do a blind getattr as that would cause the config to
+ # dynamically create the section with name Class.__name__.
+ if c._has_section(sname):
+ my_config.merge(c[sname])
+ return my_config
+
+ def _load_config(self, cfg, section_names=None, traits=None):
+ """load traits from a Config object"""
+
+ if traits is None:
+ traits = self.traits(config=True)
+ if section_names is None:
+ section_names = self.section_names()
+
+ my_config = self._find_my_config(cfg)
+
+ # hold trait notifications until after all config has been loaded
+ with self.hold_trait_notifications():
for name, config_value in my_config.items():
- if name in traits:
- if isinstance(config_value, LazyConfigValue):
- # ConfigValue is a wrapper for using append / update on containers
- # without having to copy the initial value
- initial = getattr(self, name)
- config_value = config_value.get_value(initial)
+ if name in traits:
+ if isinstance(config_value, LazyConfigValue):
+ # ConfigValue is a wrapper for using append / update on containers
+ # without having to copy the initial value
+ initial = getattr(self, name)
+ config_value = config_value.get_value(initial)
elif isinstance(config_value, DeferredConfig):
# DeferredConfig tends to come from CLI/environment variables
config_value = config_value.get_value(traits[name])
- # We have to do a deepcopy here if we don't deepcopy the entire
- # config object. If we don't, a mutable config_value will be
- # shared by all instances, effectively making it a class attribute.
- setattr(self, name, deepcopy(config_value))
+ # We have to do a deepcopy here if we don't deepcopy the entire
+ # config object. If we don't, a mutable config_value will be
+ # shared by all instances, effectively making it a class attribute.
+ setattr(self, name, deepcopy(config_value))
elif not _is_section_key(name) and not isinstance(config_value, Config):
- from difflib import get_close_matches
+ from difflib import get_close_matches
if isinstance(self, LoggingConfigurable):
warn = self.log.warning
else:
warn = lambda msg: warnings.warn(msg, stacklevel=9)
- matches = get_close_matches(name, traits)
+ matches = get_close_matches(name, traits)
msg = "Config option `{option}` not recognized by `{klass}`.".format(
option=name, klass=self.__class__.__name__)
- if len(matches) == 1:
+ if len(matches) == 1:
msg += " Did you mean `{matches}`?".format(matches=matches[0])
- elif len(matches) >= 1:
+ elif len(matches) >= 1:
msg +=" Did you mean one of: `{matches}`?".format(matches=', '.join(sorted(matches)))
warn(msg)
-
- @observe('config')
- @observe_compat
- def _config_changed(self, change):
- """Update all the class traits having ``config=True`` in metadata.
-
- For any class trait with a ``config`` metadata attribute that is
- ``True``, we update the trait with the value of the corresponding
- config entry.
- """
- # Get all traits with a config metadata entry that is True
- traits = self.traits(config=True)
-
- # We auto-load config section for this class as well as any parent
- # classes that are Configurable subclasses. This starts with Configurable
- # and works down the mro loading the config for each section.
- section_names = self.section_names()
+
+ @observe('config')
+ @observe_compat
+ def _config_changed(self, change):
+ """Update all the class traits having ``config=True`` in metadata.
+
+ For any class trait with a ``config`` metadata attribute that is
+ ``True``, we update the trait with the value of the corresponding
+ config entry.
+ """
+ # Get all traits with a config metadata entry that is True
+ traits = self.traits(config=True)
+
+ # We auto-load config section for this class as well as any parent
+ # classes that are Configurable subclasses. This starts with Configurable
+ # and works down the mro loading the config for each section.
+ section_names = self.section_names()
self._load_config(change.new, traits=traits, section_names=section_names)
-
- def update_config(self, config):
+
+ def update_config(self, config):
"""Update config and load the new values"""
# traitlets prior to 4.2 created a copy of self.config in order to trigger change events.
# Some projects (IPython < 5) relied upon one side effect of this,
@@ -222,28 +222,28 @@ class Configurable(HasTraits):
# load config
self._load_config(config)
# merge it into self.config
- self.config.merge(config)
+ self.config.merge(config)
# TODO: trigger change event if/when dict-update change events take place
# DO NOT trigger full trait-change
-
- @classmethod
- def class_get_help(cls, inst=None):
- """Get the help string for this class in ReST format.
-
- If `inst` is given, it's current trait values will be used in place of
- class defaults.
- """
- assert inst is None or isinstance(inst, cls)
- final_help = []
+
+ @classmethod
+ def class_get_help(cls, inst=None):
+ """Get the help string for this class in ReST format.
+
+ If `inst` is given, it's current trait values will be used in place of
+ class defaults.
+ """
+ assert inst is None or isinstance(inst, cls)
+ final_help = []
base_classes = ', '.join(p.__name__ for p in cls.__bases__)
final_help.append('%s(%s) options' % (cls.__name__, base_classes))
final_help.append(len(final_help[0])*'-')
- for k, v in sorted(cls.class_traits(config=True).items()):
- help = cls.class_get_trait_help(v, inst)
- final_help.append(help)
- return '\n'.join(final_help)
-
- @classmethod
+ for k, v in sorted(cls.class_traits(config=True).items()):
+ help = cls.class_get_trait_help(v, inst)
+ final_help.append(help)
+ return '\n'.join(final_help)
+
+ @classmethod
def class_get_trait_help(cls, trait, inst=None, helptext=None):
"""Get the helptext string for a single trait.
@@ -252,9 +252,9 @@ class Configurable(HasTraits):
the class default.
:param helptext:
If not given, uses the `help` attribute of the current trait.
- """
- assert inst is None or isinstance(inst, cls)
- lines = []
+ """
+ assert inst is None or isinstance(inst, cls)
+ lines = []
header = "--%s.%s" % (cls.__name__, trait.name)
if isinstance(trait, (Container, Dict)):
multiplicity = trait.metadata.get('multiplicity', 'append')
@@ -269,7 +269,7 @@ class Configurable(HasTraits):
else:
header = '%s=<%s>' % (header, trait.__class__.__name__)
#header = "--%s.%s=<%s>" % (cls.__name__, trait.name, trait.__class__.__name__)
- lines.append(header)
+ lines.append(header)
if helptext is None:
helptext = trait.help
@@ -281,26 +281,26 @@ class Configurable(HasTraits):
# include Enum choices
lines.append(indent('Choices: %s' % trait.info()))
- if inst is not None:
+ if inst is not None:
lines.append(indent("Current: %r" % (getattr(inst, trait.name),)))
- else:
- try:
- dvr = trait.default_value_repr()
- except Exception:
- dvr = None # ignore defaults we can't construct
- if dvr is not None:
- if len(dvr) > 64:
+ else:
+ try:
+ dvr = trait.default_value_repr()
+ except Exception:
+ dvr = None # ignore defaults we can't construct
+ if dvr is not None:
+ if len(dvr) > 64:
dvr = dvr[:61] + "..."
lines.append(indent("Default: %s" % dvr))
-
- return '\n'.join(lines)
-
- @classmethod
- def class_print_help(cls, inst=None):
- """Get the help string for a single trait and print it."""
- print(cls.class_get_help(inst))
-
- @classmethod
+
+ return '\n'.join(lines)
+
+ @classmethod
+ def class_print_help(cls, inst=None):
+ """Get the help string for a single trait and print it."""
+ print(cls.class_get_help(inst))
+
+ @classmethod
def _defining_class(cls, trait, classes):
"""Get the class that defines a trait
@@ -336,13 +336,13 @@ class Configurable(HasTraits):
The list of other classes in the config file.
Used to reduce redundant information.
"""
- def c(s):
- """return a commented, wrapped block."""
- s = '\n\n'.join(wrap_paragraphs(s, 78))
-
+ def c(s):
+ """return a commented, wrapped block."""
+ s = '\n\n'.join(wrap_paragraphs(s, 78))
+
return '## ' + s.replace('\n', '\n# ')
-
- # section header
+
+ # section header
breaker = '#' + '-' * 78
parent_classes = ', '.join(
p.__name__ for p in cls.__bases__
@@ -351,17 +351,17 @@ class Configurable(HasTraits):
s = "# %s(%s) configuration" % (cls.__name__, parent_classes)
lines = [breaker, s, breaker]
- # get the description trait
- desc = cls.class_traits().get('description')
- if desc:
- desc = desc.default_value
+ # get the description trait
+ desc = cls.class_traits().get('description')
+ if desc:
+ desc = desc.default_value
if not desc:
# no description from trait, use __doc__
- desc = getattr(cls, '__doc__', '')
- if desc:
- lines.append(c(desc))
- lines.append('')
-
+ desc = getattr(cls, '__doc__', '')
+ if desc:
+ lines.append(c(desc))
+ lines.append('')
+
for name, trait in sorted(cls.class_traits(config=True).items()):
default_repr = trait.default_value_repr()
@@ -385,60 +385,60 @@ class Configurable(HasTraits):
lines.append('# See also: %s.%s' % (defining_class.__name__, name))
lines.append('# c.%s.%s = %s' % (cls.__name__, name, default_repr))
- lines.append('')
- return '\n'.join(lines)
-
- @classmethod
- def class_config_rst_doc(cls):
- """Generate rST documentation for this class' config options.
-
- Excludes traits defined on parent classes.
- """
- lines = []
- classname = cls.__name__
+ lines.append('')
+ return '\n'.join(lines)
+
+ @classmethod
+ def class_config_rst_doc(cls):
+ """Generate rST documentation for this class' config options.
+
+ Excludes traits defined on parent classes.
+ """
+ lines = []
+ classname = cls.__name__
for k, trait in sorted(cls.class_traits(config=True).items()):
- ttype = trait.__class__.__name__
-
- termline = classname + '.' + trait.name
-
- # Choices or type
- if 'Enum' in ttype:
- # include Enum choices
+ ttype = trait.__class__.__name__
+
+ termline = classname + '.' + trait.name
+
+ # Choices or type
+ if 'Enum' in ttype:
+ # include Enum choices
termline += ' : ' + trait.info_rst()
- else:
- termline += ' : ' + ttype
- lines.append(termline)
-
- # Default value
- try:
- dvr = trait.default_value_repr()
- except Exception:
- dvr = None # ignore defaults we can't construct
- if dvr is not None:
- if len(dvr) > 64:
- dvr = dvr[:61]+'...'
- # Double up backslashes, so they get to the rendered docs
+ else:
+ termline += ' : ' + ttype
+ lines.append(termline)
+
+ # Default value
+ try:
+ dvr = trait.default_value_repr()
+ except Exception:
+ dvr = None # ignore defaults we can't construct
+ if dvr is not None:
+ if len(dvr) > 64:
+ dvr = dvr[:61]+'...'
+ # Double up backslashes, so they get to the rendered docs
dvr = dvr.replace("\\n", "\\\\n")
lines.append(indent("Default: ``%s``" % dvr))
lines.append("")
-
- help = trait.help or 'No description'
+
+ help = trait.help or 'No description'
lines.append(indent(dedent(help)))
-
- # Blank line
- lines.append('')
-
- return '\n'.join(lines)
-
-
-
-class LoggingConfigurable(Configurable):
- """A parent class for Configurables that log.
-
- Subclasses have a log trait, and the default behavior
- is to get the logger from the currently running Application.
- """
-
+
+ # Blank line
+ lines.append('')
+
+ return '\n'.join(lines)
+
+
+
+class LoggingConfigurable(Configurable):
+ """A parent class for Configurables that log.
+
+ Subclasses have a log trait, and the default behavior
+ is to get the logger from the currently running Application.
+ """
+
log = Any(help="Logger or LoggerAdapter instance")
@validate("log")
@@ -452,15 +452,15 @@ class LoggingConfigurable(Configurable):
return proposal.value
@default("log")
- def _log_default(self):
+ def _log_default(self):
if isinstance(self.parent, LoggingConfigurable):
return self.parent.log
- from traitlets import log
- return log.get_logger()
-
+ from traitlets import log
+ return log.get_logger()
+
def _get_log_handler(self):
"""Return the default Handler
-
+
Returns None if none can be found
"""
logger = self.log
@@ -472,89 +472,89 @@ class LoggingConfigurable(Configurable):
return logger.handlers[0]
-class SingletonConfigurable(LoggingConfigurable):
- """A configurable that only allows one instance.
-
- This class is for classes that should only have one instance of itself
- or *any* subclass. To create and retrieve such a class use the
- :meth:`SingletonConfigurable.instance` method.
- """
-
- _instance = None
-
- @classmethod
- def _walk_mro(cls):
- """Walk the cls.mro() for parent classes that are also singletons
-
- For use in instance()
- """
-
- for subclass in cls.mro():
- if issubclass(cls, subclass) and \
- issubclass(subclass, SingletonConfigurable) and \
- subclass != SingletonConfigurable:
- yield subclass
-
- @classmethod
- def clear_instance(cls):
- """unset _instance for this class and singleton parents.
- """
- if not cls.initialized():
- return
- for subclass in cls._walk_mro():
- if isinstance(subclass._instance, cls):
- # only clear instances that are instances
- # of the calling class
- subclass._instance = None
-
- @classmethod
- def instance(cls, *args, **kwargs):
- """Returns a global instance of this class.
-
- This method create a new instance if none have previously been created
- and returns a previously created instance is one already exists.
-
- The arguments and keyword arguments passed to this method are passed
- on to the :meth:`__init__` method of the class upon instantiation.
-
- Examples
- --------
- Create a singleton class using instance, and retrieve it::
-
- >>> from traitlets.config.configurable import SingletonConfigurable
- >>> class Foo(SingletonConfigurable): pass
- >>> foo = Foo.instance()
- >>> foo == Foo.instance()
- True
-
- Create a subclass that is retrived using the base class instance::
-
- >>> class Bar(SingletonConfigurable): pass
- >>> class Bam(Bar): pass
- >>> bam = Bam.instance()
- >>> bam == Bar.instance()
- True
- """
- # Create and save the instance
- if cls._instance is None:
- inst = cls(*args, **kwargs)
- # Now make sure that the instance will also be returned by
- # parent classes' _instance attribute.
- for subclass in cls._walk_mro():
- subclass._instance = inst
-
- if isinstance(cls._instance, cls):
- return cls._instance
- else:
- raise MultipleInstanceError(
+class SingletonConfigurable(LoggingConfigurable):
+ """A configurable that only allows one instance.
+
+ This class is for classes that should only have one instance of itself
+ or *any* subclass. To create and retrieve such a class use the
+ :meth:`SingletonConfigurable.instance` method.
+ """
+
+ _instance = None
+
+ @classmethod
+ def _walk_mro(cls):
+ """Walk the cls.mro() for parent classes that are also singletons
+
+ For use in instance()
+ """
+
+ for subclass in cls.mro():
+ if issubclass(cls, subclass) and \
+ issubclass(subclass, SingletonConfigurable) and \
+ subclass != SingletonConfigurable:
+ yield subclass
+
+ @classmethod
+ def clear_instance(cls):
+ """unset _instance for this class and singleton parents.
+ """
+ if not cls.initialized():
+ return
+ for subclass in cls._walk_mro():
+ if isinstance(subclass._instance, cls):
+ # only clear instances that are instances
+ # of the calling class
+ subclass._instance = None
+
+ @classmethod
+ def instance(cls, *args, **kwargs):
+ """Returns a global instance of this class.
+
+ This method create a new instance if none have previously been created
+ and returns a previously created instance is one already exists.
+
+ The arguments and keyword arguments passed to this method are passed
+ on to the :meth:`__init__` method of the class upon instantiation.
+
+ Examples
+ --------
+ Create a singleton class using instance, and retrieve it::
+
+ >>> from traitlets.config.configurable import SingletonConfigurable
+ >>> class Foo(SingletonConfigurable): pass
+ >>> foo = Foo.instance()
+ >>> foo == Foo.instance()
+ True
+
+ Create a subclass that is retrived using the base class instance::
+
+ >>> class Bar(SingletonConfigurable): pass
+ >>> class Bam(Bar): pass
+ >>> bam = Bam.instance()
+ >>> bam == Bar.instance()
+ True
+ """
+ # Create and save the instance
+ if cls._instance is None:
+ inst = cls(*args, **kwargs)
+ # Now make sure that the instance will also be returned by
+ # parent classes' _instance attribute.
+ for subclass in cls._walk_mro():
+ subclass._instance = inst
+
+ if isinstance(cls._instance, cls):
+ return cls._instance
+ else:
+ raise MultipleInstanceError(
"An incompatible sibling of '%s' is already instanciated"
" as singleton: %s" % (cls.__name__, type(cls._instance).__name__)
- )
-
- @classmethod
- def initialized(cls):
- """Has an instance been created?"""
- return hasattr(cls, "_instance") and cls._instance is not None
-
-
-
+ )
+
+ @classmethod
+ def initialized(cls):
+ """Has an instance been created?"""
+ return hasattr(cls, "_instance") and cls._instance is not None
+
+
+
diff --git a/contrib/python/traitlets/py3/traitlets/config/loader.py b/contrib/python/traitlets/py3/traitlets/config/loader.py
index 3af27bc22f..5360f889ab 100644
--- a/contrib/python/traitlets/py3/traitlets/config/loader.py
+++ b/contrib/python/traitlets/py3/traitlets/config/loader.py
@@ -1,49 +1,49 @@
-"""A simple configuration system."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import argparse
-import copy
-import os
-import re
-import sys
-import json
+"""A simple configuration system."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import argparse
+import copy
+import os
+import re
+import sys
+import json
import warnings
-
+
from ..utils import cast_unicode, filefind
from traitlets.traitlets import (
HasTraits, Container, List, Dict, Any, Undefined,
)
-
-#-----------------------------------------------------------------------------
-# Exceptions
-#-----------------------------------------------------------------------------
-
-
-class ConfigError(Exception):
- pass
-
-class ConfigLoaderError(ConfigError):
- pass
-
-class ConfigFileNotFound(ConfigError):
- pass
-
-class ArgumentError(ConfigLoaderError):
- pass
-
-#-----------------------------------------------------------------------------
-# Argparse fix
-#-----------------------------------------------------------------------------
-
-# Unfortunately argparse by default prints help messages to stderr instead of
-# stdout. This makes it annoying to capture long help screens at the command
-# line, since one must know how to pipe stderr, which many users don't know how
-# to do. So we override the print_help method with one that defaults to
-# stdout and use our class instead.
-
+
+#-----------------------------------------------------------------------------
+# Exceptions
+#-----------------------------------------------------------------------------
+
+
+class ConfigError(Exception):
+ pass
+
+class ConfigLoaderError(ConfigError):
+ pass
+
+class ConfigFileNotFound(ConfigError):
+ pass
+
+class ArgumentError(ConfigLoaderError):
+ pass
+
+#-----------------------------------------------------------------------------
+# Argparse fix
+#-----------------------------------------------------------------------------
+
+# Unfortunately argparse by default prints help messages to stderr instead of
+# stdout. This makes it annoying to capture long help screens at the command
+# line, since one must know how to pipe stderr, which many users don't know how
+# to do. So we override the print_help method with one that defaults to
+# stdout and use our class instead.
+
class _Sentinel:
def __repr__(self):
@@ -56,55 +56,55 @@ class _Sentinel:
_deprecated = _Sentinel()
-class ArgumentParser(argparse.ArgumentParser):
- """Simple argparse subclass that prints help to stdout by default."""
-
- def print_help(self, file=None):
- if file is None:
- file = sys.stdout
- return super(ArgumentParser, self).print_help(file)
-
- print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
-
-#-----------------------------------------------------------------------------
-# Config class for holding config information
-#-----------------------------------------------------------------------------
-
+class ArgumentParser(argparse.ArgumentParser):
+ """Simple argparse subclass that prints help to stdout by default."""
+
+ def print_help(self, file=None):
+ if file is None:
+ file = sys.stdout
+ return super(ArgumentParser, self).print_help(file)
+
+ print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
+
+#-----------------------------------------------------------------------------
+# Config class for holding config information
+#-----------------------------------------------------------------------------
+
def execfile(fname, glob):
with open(fname, 'rb') as f:
exec(compile(f.read(), fname, 'exec'), glob, glob)
-class LazyConfigValue(HasTraits):
- """Proxy object for exposing methods on configurable containers
+class LazyConfigValue(HasTraits):
+ """Proxy object for exposing methods on configurable containers
These methods allow appending/extending/updating
to add to non-empty defaults instead of clobbering them.
- Exposes:
+ Exposes:
- - append, extend, insert on lists
- - update on dicts
- - update, add on sets
- """
+ - append, extend, insert on lists
+ - update on dicts
+ - update, add on sets
+ """
- _value = None
+ _value = None
- # list methods
- _extend = List()
- _prepend = List()
+ # list methods
+ _extend = List()
+ _prepend = List()
_inserts = List()
- def append(self, obj):
+ def append(self, obj):
"""Append an item to a List"""
- self._extend.append(obj)
+ self._extend.append(obj)
- def extend(self, other):
+ def extend(self, other):
"""Extend a list"""
- self._extend.extend(other)
+ self._extend.extend(other)
- def prepend(self, other):
- """like list.extend, but for the front"""
- self._prepend[:0] = other
+ def prepend(self, other):
+ """like list.extend, but for the front"""
+ self._prepend[:0] = other
def merge_into(self, other):
@@ -140,86 +140,86 @@ class LazyConfigValue(HasTraits):
# other is a container, reify now.
return self.get_value(other)
- def insert(self, index, other):
- if not isinstance(index, int):
- raise TypeError("An integer is required")
- self._inserts.append((index, other))
+ def insert(self, index, other):
+ if not isinstance(index, int):
+ raise TypeError("An integer is required")
+ self._inserts.append((index, other))
- # dict methods
- # update is used for both dict and set
- _update = Any()
+ # dict methods
+ # update is used for both dict and set
+ _update = Any()
- def update(self, other):
+ def update(self, other):
"""Update either a set or dict"""
- if self._update is None:
- if isinstance(other, dict):
- self._update = {}
- else:
- self._update = set()
- self._update.update(other)
-
- # set methods
- def add(self, obj):
+ if self._update is None:
+ if isinstance(other, dict):
+ self._update = {}
+ else:
+ self._update = set()
+ self._update.update(other)
+
+ # set methods
+ def add(self, obj):
"""Add an item to a set"""
- self.update({obj})
-
- def get_value(self, initial):
- """construct the value from the initial one
-
- after applying any insert / extend / update changes
- """
- if self._value is not None:
- return self._value
- value = copy.deepcopy(initial)
- if isinstance(value, list):
- for idx, obj in self._inserts:
- value.insert(idx, obj)
- value[:0] = self._prepend
- value.extend(self._extend)
-
- elif isinstance(value, dict):
- if self._update:
- value.update(self._update)
- elif isinstance(value, set):
- if self._update:
- value.update(self._update)
- self._value = value
- return value
-
- def to_dict(self):
- """return JSONable dict form of my data
-
- Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
- """
- d = {}
- if self._update:
- d['update'] = self._update
- if self._extend:
- d['extend'] = self._extend
- if self._prepend:
- d['prepend'] = self._prepend
- elif self._inserts:
- d['inserts'] = self._inserts
- return d
-
+ self.update({obj})
+
+ def get_value(self, initial):
+ """construct the value from the initial one
+
+ after applying any insert / extend / update changes
+ """
+ if self._value is not None:
+ return self._value
+ value = copy.deepcopy(initial)
+ if isinstance(value, list):
+ for idx, obj in self._inserts:
+ value.insert(idx, obj)
+ value[:0] = self._prepend
+ value.extend(self._extend)
+
+ elif isinstance(value, dict):
+ if self._update:
+ value.update(self._update)
+ elif isinstance(value, set):
+ if self._update:
+ value.update(self._update)
+ self._value = value
+ return value
+
+ def to_dict(self):
+ """return JSONable dict form of my data
+
+ Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
+ """
+ d = {}
+ if self._update:
+ d['update'] = self._update
+ if self._extend:
+ d['extend'] = self._extend
+ if self._prepend:
+ d['prepend'] = self._prepend
+ elif self._inserts:
+ d['inserts'] = self._inserts
+ return d
+
def __repr__(self):
if self._value is not None:
return "<%s value=%r>" % (self.__class__.__name__, self._value)
else:
return "<%s %r>" % (self.__class__.__name__, self.to_dict())
-
-
-def _is_section_key(key):
- """Is a Config key a section name (does it start with a capital)?"""
- if key and key[0].upper()==key[0] and not key.startswith('_'):
- return True
- else:
- return False
-
-
-class Config(dict):
+
+
+def _is_section_key(key):
+ """Is a Config key a section name (does it start with a capital)?"""
+ if key and key[0].upper()==key[0] and not key.startswith('_'):
+ return True
+ else:
+ return False
+
+
+class Config(dict):
"""An attribute-based dict that can do smart merges.
-
+
Accessing a field on a config object for the first time populates the key
with either a nested Config object for keys starting with capitals
or :class:`.LazyConfigValue` for lowercase keys,
@@ -231,147 +231,147 @@ class Config(dict):
"""
- def __init__(self, *args, **kwds):
- dict.__init__(self, *args, **kwds)
- self._ensure_subconfig()
-
- def _ensure_subconfig(self):
- """ensure that sub-dicts that should be Config objects are
-
- casts dicts that are under section keys to Config objects,
- which is necessary for constructing Config objects from dict literals.
- """
- for key in self:
- obj = self[key]
- if _is_section_key(key) \
- and isinstance(obj, dict) \
- and not isinstance(obj, Config):
- setattr(self, key, Config(obj))
-
- def _merge(self, other):
- """deprecated alias, use Config.merge()"""
- self.merge(other)
-
- def merge(self, other):
- """merge another config object into this one"""
- to_update = {}
+ def __init__(self, *args, **kwds):
+ dict.__init__(self, *args, **kwds)
+ self._ensure_subconfig()
+
+ def _ensure_subconfig(self):
+ """ensure that sub-dicts that should be Config objects are
+
+ casts dicts that are under section keys to Config objects,
+ which is necessary for constructing Config objects from dict literals.
+ """
+ for key in self:
+ obj = self[key]
+ if _is_section_key(key) \
+ and isinstance(obj, dict) \
+ and not isinstance(obj, Config):
+ setattr(self, key, Config(obj))
+
+ def _merge(self, other):
+ """deprecated alias, use Config.merge()"""
+ self.merge(other)
+
+ def merge(self, other):
+ """merge another config object into this one"""
+ to_update = {}
for k, v in other.items():
- if k not in self:
- to_update[k] = v
- else: # I have this key
- if isinstance(v, Config) and isinstance(self[k], Config):
- # Recursively merge common sub Configs
- self[k].merge(v)
+ if k not in self:
+ to_update[k] = v
+ else: # I have this key
+ if isinstance(v, Config) and isinstance(self[k], Config):
+ # Recursively merge common sub Configs
+ self[k].merge(v)
elif isinstance(v, LazyConfigValue):
self[k] = v.merge_into(self[k])
- else:
- # Plain updates for non-Configs
- to_update[k] = v
-
- self.update(to_update)
-
- def collisions(self, other):
- """Check for collisions between two config objects.
-
- Returns a dict of the form {"Class": {"trait": "collision message"}}`,
- indicating which values have been ignored.
-
- An empty dict indicates no collisions.
- """
- collisions = {}
- for section in self:
- if section not in other:
- continue
- mine = self[section]
- theirs = other[section]
- for key in mine:
- if key in theirs and mine[key] != theirs[key]:
- collisions.setdefault(section, {})
- collisions[section][key] = "%r ignored, using %r" % (mine[key], theirs[key])
- return collisions
-
- def __contains__(self, key):
- # allow nested contains of the form `"Section.key" in config`
- if '.' in key:
- first, remainder = key.split('.', 1)
- if first not in self:
- return False
- return remainder in self[first]
-
- return super(Config, self).__contains__(key)
-
- # .has_key is deprecated for dictionaries.
- has_key = __contains__
-
- def _has_section(self, key):
- return _is_section_key(key) and key in self
-
- def copy(self):
- return type(self)(dict.copy(self))
-
- def __copy__(self):
- return self.copy()
-
- def __deepcopy__(self, memo):
- new_config = type(self)()
- for key, value in self.items():
- if isinstance(value, (Config, LazyConfigValue)):
- # deep copy config objects
- value = copy.deepcopy(value, memo)
- elif type(value) in {dict, list, set, tuple}:
- # shallow copy plain container traits
- value = copy.copy(value)
- new_config[key] = value
- return new_config
-
- def __getitem__(self, key):
- try:
- return dict.__getitem__(self, key)
- except KeyError:
- if _is_section_key(key):
- c = Config()
- dict.__setitem__(self, key, c)
- return c
- elif not key.startswith('_'):
- # undefined, create lazy value, used for container methods
- v = LazyConfigValue()
- dict.__setitem__(self, key, v)
- return v
- else:
- raise KeyError
-
- def __setitem__(self, key, value):
- if _is_section_key(key):
- if not isinstance(value, Config):
- raise ValueError('values whose keys begin with an uppercase '
- 'char must be Config instances: %r, %r' % (key, value))
- dict.__setitem__(self, key, value)
-
- def __getattr__(self, key):
- if key.startswith('__'):
- return dict.__getattr__(self, key)
- try:
- return self.__getitem__(key)
- except KeyError as e:
- raise AttributeError(e)
-
- def __setattr__(self, key, value):
- if key.startswith('__'):
- return dict.__setattr__(self, key, value)
- try:
- self.__setitem__(key, value)
- except KeyError as e:
- raise AttributeError(e)
-
- def __delattr__(self, key):
- if key.startswith('__'):
- return dict.__delattr__(self, key)
- try:
- dict.__delitem__(self, key)
- except KeyError as e:
- raise AttributeError(e)
-
-
+ else:
+ # Plain updates for non-Configs
+ to_update[k] = v
+
+ self.update(to_update)
+
+ def collisions(self, other):
+ """Check for collisions between two config objects.
+
+ Returns a dict of the form {"Class": {"trait": "collision message"}}`,
+ indicating which values have been ignored.
+
+ An empty dict indicates no collisions.
+ """
+ collisions = {}
+ for section in self:
+ if section not in other:
+ continue
+ mine = self[section]
+ theirs = other[section]
+ for key in mine:
+ if key in theirs and mine[key] != theirs[key]:
+ collisions.setdefault(section, {})
+ collisions[section][key] = "%r ignored, using %r" % (mine[key], theirs[key])
+ return collisions
+
+ def __contains__(self, key):
+ # allow nested contains of the form `"Section.key" in config`
+ if '.' in key:
+ first, remainder = key.split('.', 1)
+ if first not in self:
+ return False
+ return remainder in self[first]
+
+ return super(Config, self).__contains__(key)
+
+ # .has_key is deprecated for dictionaries.
+ has_key = __contains__
+
+ def _has_section(self, key):
+ return _is_section_key(key) and key in self
+
+ def copy(self):
+ return type(self)(dict.copy(self))
+
+ def __copy__(self):
+ return self.copy()
+
+ def __deepcopy__(self, memo):
+ new_config = type(self)()
+ for key, value in self.items():
+ if isinstance(value, (Config, LazyConfigValue)):
+ # deep copy config objects
+ value = copy.deepcopy(value, memo)
+ elif type(value) in {dict, list, set, tuple}:
+ # shallow copy plain container traits
+ value = copy.copy(value)
+ new_config[key] = value
+ return new_config
+
+ def __getitem__(self, key):
+ try:
+ return dict.__getitem__(self, key)
+ except KeyError:
+ if _is_section_key(key):
+ c = Config()
+ dict.__setitem__(self, key, c)
+ return c
+ elif not key.startswith('_'):
+ # undefined, create lazy value, used for container methods
+ v = LazyConfigValue()
+ dict.__setitem__(self, key, v)
+ return v
+ else:
+ raise KeyError
+
+ def __setitem__(self, key, value):
+ if _is_section_key(key):
+ if not isinstance(value, Config):
+ raise ValueError('values whose keys begin with an uppercase '
+ 'char must be Config instances: %r, %r' % (key, value))
+ dict.__setitem__(self, key, value)
+
+ def __getattr__(self, key):
+ if key.startswith('__'):
+ return dict.__getattr__(self, key)
+ try:
+ return self.__getitem__(key)
+ except KeyError as e:
+ raise AttributeError(e)
+
+ def __setattr__(self, key, value):
+ if key.startswith('__'):
+ return dict.__setattr__(self, key, value)
+ try:
+ self.__setitem__(key, value)
+ except KeyError as e:
+ raise AttributeError(e)
+
+ def __delattr__(self, key):
+ if key.startswith('__'):
+ return dict.__delattr__(self, key)
+ try:
+ dict.__delitem__(self, key)
+ except KeyError as e:
+ raise AttributeError(e)
+
+
class DeferredConfig:
"""Class for deferred-evaluation of config from CLI"""
pass
@@ -455,95 +455,95 @@ class DeferredConfigList(list, DeferredConfig):
return '%s(%s)' % (self.__class__.__name__, self._super_repr())
-#-----------------------------------------------------------------------------
-# Config loading classes
-#-----------------------------------------------------------------------------
-
-
-class ConfigLoader(object):
- """A object for loading configurations from just about anywhere.
-
- The resulting configuration is packaged as a :class:`Config`.
-
- Notes
- -----
- A :class:`ConfigLoader` does one thing: load a config from a source
- (file, command line arguments) and returns the data as a :class:`Config` object.
- There are lots of things that :class:`ConfigLoader` does not do. It does
- not implement complex logic for finding config files. It does not handle
- default values or merge multiple configs. These things need to be
- handled elsewhere.
- """
-
- def _log_default(self):
- from traitlets.log import get_logger
- return get_logger()
-
- def __init__(self, log=None):
- """A base class for config loaders.
-
- log : instance of :class:`logging.Logger` to use.
+#-----------------------------------------------------------------------------
+# Config loading classes
+#-----------------------------------------------------------------------------
+
+
+class ConfigLoader(object):
+ """A object for loading configurations from just about anywhere.
+
+ The resulting configuration is packaged as a :class:`Config`.
+
+ Notes
+ -----
+ A :class:`ConfigLoader` does one thing: load a config from a source
+ (file, command line arguments) and returns the data as a :class:`Config` object.
+ There are lots of things that :class:`ConfigLoader` does not do. It does
+ not implement complex logic for finding config files. It does not handle
+ default values or merge multiple configs. These things need to be
+ handled elsewhere.
+ """
+
+ def _log_default(self):
+ from traitlets.log import get_logger
+ return get_logger()
+
+ def __init__(self, log=None):
+ """A base class for config loaders.
+
+ log : instance of :class:`logging.Logger` to use.
By default logger of :meth:`traitlets.config.application.Application.instance()`
- will be used
-
- Examples
- --------
- >>> cl = ConfigLoader()
- >>> config = cl.load_config()
- >>> config
- {}
- """
- self.clear()
- if log is None:
- self.log = self._log_default()
- self.log.debug('Using default logger')
- else:
- self.log = log
-
- def clear(self):
- self.config = Config()
-
- def load_config(self):
- """Load a config from somewhere, return a :class:`Config` instance.
-
- Usually, this will cause self.config to be set and then returned.
- However, in most cases, :meth:`ConfigLoader.clear` should be called
- to erase any previous state.
- """
- self.clear()
- return self.config
-
-
-class FileConfigLoader(ConfigLoader):
- """A base class for file based configurations.
-
- As we add more file based config loaders, the common logic should go
- here.
- """
-
- def __init__(self, filename, path=None, **kw):
- """Build a config loader for a filename and path.
-
- Parameters
- ----------
- filename : str
- The file name of the config file.
- path : str, list, tuple
- The path to search for the config file on, or a sequence of
- paths to try in order.
- """
- super(FileConfigLoader, self).__init__(**kw)
- self.filename = filename
- self.path = path
- self.full_filename = ''
-
- def _find_file(self):
- """Try to find the file by searching the paths."""
- self.full_filename = filefind(self.filename, self.path)
-
-class JSONFileConfigLoader(FileConfigLoader):
+ will be used
+
+ Examples
+ --------
+ >>> cl = ConfigLoader()
+ >>> config = cl.load_config()
+ >>> config
+ {}
+ """
+ self.clear()
+ if log is None:
+ self.log = self._log_default()
+ self.log.debug('Using default logger')
+ else:
+ self.log = log
+
+ def clear(self):
+ self.config = Config()
+
+ def load_config(self):
+ """Load a config from somewhere, return a :class:`Config` instance.
+
+ Usually, this will cause self.config to be set and then returned.
+ However, in most cases, :meth:`ConfigLoader.clear` should be called
+ to erase any previous state.
+ """
+ self.clear()
+ return self.config
+
+
+class FileConfigLoader(ConfigLoader):
+ """A base class for file based configurations.
+
+ As we add more file based config loaders, the common logic should go
+ here.
+ """
+
+ def __init__(self, filename, path=None, **kw):
+ """Build a config loader for a filename and path.
+
+ Parameters
+ ----------
+ filename : str
+ The file name of the config file.
+ path : str, list, tuple
+ The path to search for the config file on, or a sequence of
+ paths to try in order.
+ """
+ super(FileConfigLoader, self).__init__(**kw)
+ self.filename = filename
+ self.path = path
+ self.full_filename = ''
+
+ def _find_file(self):
+ """Try to find the file by searching the paths."""
+ self.full_filename = filefind(self.filename, self.path)
+
+class JSONFileConfigLoader(FileConfigLoader):
"""A JSON file loader for config
-
+
Can also act as a context manager that rewrite the configuration file to disk on exit.
Example::
@@ -553,36 +553,36 @@ class JSONFileConfigLoader(FileConfigLoader):
"""
- def load_config(self):
- """Load the config from a file and return it as a Config object."""
- self.clear()
- try:
- self._find_file()
- except IOError as e:
- raise ConfigFileNotFound(str(e))
- dct = self._read_file_as_dict()
- self.config = self._convert_to_config(dct)
- return self.config
-
- def _read_file_as_dict(self):
- with open(self.full_filename) as f:
- return json.load(f)
-
- def _convert_to_config(self, dictionary):
- if 'version' in dictionary:
- version = dictionary.pop('version')
- else:
- version = 1
-
- if version == 1:
- return Config(dictionary)
- else:
- raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
-
+ def load_config(self):
+ """Load the config from a file and return it as a Config object."""
+ self.clear()
+ try:
+ self._find_file()
+ except IOError as e:
+ raise ConfigFileNotFound(str(e))
+ dct = self._read_file_as_dict()
+ self.config = self._convert_to_config(dct)
+ return self.config
+
+ def _read_file_as_dict(self):
+ with open(self.full_filename) as f:
+ return json.load(f)
+
+ def _convert_to_config(self, dictionary):
+ if 'version' in dictionary:
+ version = dictionary.pop('version')
+ else:
+ version = 1
+
+ if version == 1:
+ return Config(dictionary)
+ else:
+ raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
+
def __enter__(self):
self.load_config()
return self.config
-
+
def __exit__(self, exc_type, exc_value, traceback):
"""
Exit the context manager but do not handle any errors.
@@ -597,68 +597,68 @@ class JSONFileConfigLoader(FileConfigLoader):
-class PyFileConfigLoader(FileConfigLoader):
- """A config loader for pure python files.
-
- This is responsible for locating a Python config file by filename and
- path, then executing it to construct a Config object.
- """
-
- def load_config(self):
- """Load the config from a file and return it as a Config object."""
- self.clear()
- try:
- self._find_file()
- except IOError as e:
- raise ConfigFileNotFound(str(e))
- self._read_file_as_dict()
- return self.config
-
- def load_subconfig(self, fname, path=None):
- """Injected into config file namespace as load_subconfig"""
- if path is None:
- path = self.path
-
- loader = self.__class__(fname, path)
- try:
- sub_config = loader.load_config()
- except ConfigFileNotFound:
- # Pass silently if the sub config is not there,
- # treat it as an empty config file.
- pass
- else:
- self.config.merge(sub_config)
-
- def _read_file_as_dict(self):
- """Load the config file into self.config, with recursive loading."""
- def get_config():
- """Unnecessary now, but a deprecation warning is more trouble than it's worth."""
- return self.config
-
- namespace = dict(
- c=self.config,
- load_subconfig=self.load_subconfig,
- get_config=get_config,
- __file__=self.full_filename,
- )
+class PyFileConfigLoader(FileConfigLoader):
+ """A config loader for pure python files.
+
+ This is responsible for locating a Python config file by filename and
+ path, then executing it to construct a Config object.
+ """
+
+ def load_config(self):
+ """Load the config from a file and return it as a Config object."""
+ self.clear()
+ try:
+ self._find_file()
+ except IOError as e:
+ raise ConfigFileNotFound(str(e))
+ self._read_file_as_dict()
+ return self.config
+
+ def load_subconfig(self, fname, path=None):
+ """Injected into config file namespace as load_subconfig"""
+ if path is None:
+ path = self.path
+
+ loader = self.__class__(fname, path)
+ try:
+ sub_config = loader.load_config()
+ except ConfigFileNotFound:
+ # Pass silently if the sub config is not there,
+ # treat it as an empty config file.
+ pass
+ else:
+ self.config.merge(sub_config)
+
+ def _read_file_as_dict(self):
+ """Load the config file into self.config, with recursive loading."""
+ def get_config():
+ """Unnecessary now, but a deprecation warning is more trouble than it's worth."""
+ return self.config
+
+ namespace = dict(
+ c=self.config,
+ load_subconfig=self.load_subconfig,
+ get_config=get_config,
+ __file__=self.full_filename,
+ )
conf_filename = self.full_filename
with open(conf_filename, 'rb') as f:
exec(compile(f.read(), conf_filename, 'exec'), namespace, namespace)
-
-
-class CommandLineConfigLoader(ConfigLoader):
- """A config loader for command line arguments.
-
- As we add more command line based loaders, the common logic should go
- here.
- """
-
+
+
+class CommandLineConfigLoader(ConfigLoader):
+ """A config loader for command line arguments.
+
+ As we add more command line based loaders, the common logic should go
+ here.
+ """
+
def _exec_config_str(self, lhs, rhs, trait=None):
- """execute self.config.<lhs> = <rhs>
+ """execute self.config.<lhs> = <rhs>
- * expands ~ with expanduser
+ * expands ~ with expanduser
* interprets value with trait if available
- """
+ """
value = rhs
if isinstance(value, DeferredConfig):
if trait:
@@ -672,41 +672,41 @@ class CommandLineConfigLoader(ConfigLoader):
value = trait.from_string(value)
else:
value = DeferredConfigString(value)
-
+
*path, key = lhs.split(".")
section = self.config
for part in path:
section = section[part]
section[key] = value
return
-
- def _load_flag(self, cfg):
- """update self.config from a flag, which can be a dict or Config"""
- if isinstance(cfg, (dict, Config)):
- # don't clobber whole config sections, update
- # each section from config:
+
+ def _load_flag(self, cfg):
+ """update self.config from a flag, which can be a dict or Config"""
+ if isinstance(cfg, (dict, Config)):
+ # don't clobber whole config sections, update
+ # each section from config:
for sec, c in cfg.items():
- self.config[sec].update(c)
- else:
- raise TypeError("Invalid flag: %r" % cfg)
-
+ self.config[sec].update(c)
+ else:
+ raise TypeError("Invalid flag: %r" % cfg)
+
# match --Class.trait keys for argparse
# matches:
# --Class.trait
# --x
# -x
-
+
class_trait_opt_pattern = re.compile(r'^\-?\-[A-Za-z][\w]*(\.[\w]+)*$')
-
+
_DOT_REPLACEMENT = "__DOT__"
_DASH_REPLACEMENT = "__DASH__"
-
-
+
+
class _KVAction(argparse.Action):
"""Custom argparse action for handling --Class.trait=x
-
+
Always
- """
+ """
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, str):
values = [values]
@@ -718,11 +718,11 @@ class _KVAction(argparse.Action):
items = DeferredConfigList(items)
items.extend(values)
setattr(namespace, self.dest, items)
-
-
+
+
class _DefaultOptionDict(dict):
"""Like the default options dict
-
+
but acts as if all --Class.trait options are predefined
"""
def _add_kv_action(self, key):
@@ -732,31 +732,31 @@ class _DefaultOptionDict(dict):
# use metavar for display purposes
metavar=key.lstrip("-"),
)
-
+
def __contains__(self, key):
if '=' in key:
return False
if super().__contains__(key):
return True
-
+
if key.startswith("-") and class_trait_opt_pattern.match(key):
self._add_kv_action(key)
return True
return False
-
+
def __getitem__(self, key):
if key in self:
return super().__getitem__(key)
else:
raise KeyError(key)
-
+
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
-
-
+
+
class _KVArgParser(argparse.ArgumentParser):
"""subclass of ArgumentParser where any --Class.trait option is implicitly defined"""
def parse_known_args(self, args=None, namespace=None):
@@ -766,23 +766,23 @@ class _KVArgParser(argparse.ArgumentParser):
container._option_string_actions = _DefaultOptionDict(
container._option_string_actions)
return super().parse_known_args(args, namespace)
-
-
-class ArgParseConfigLoader(CommandLineConfigLoader):
- """A loader that uses the argparse module to load from the command line."""
-
+
+
+class ArgParseConfigLoader(CommandLineConfigLoader):
+ """A loader that uses the argparse module to load from the command line."""
+
parser_class = ArgumentParser
def __init__(self, argv=None, aliases=None, flags=None, log=None, classes=(),
*parser_args, **parser_kw):
- """Create a config loader for use with argparse.
-
- Parameters
- ----------
+ """Create a config loader for use with argparse.
+
+ Parameters
+ ----------
classes : optional, list
The classes to scan for *container* config-traits and decide
for their "multiplicity" when adding them as *argparse* arguments.
- argv : optional, list
+ argv : optional, list
If given, used to read command-line arguments from, otherwise
sys.argv[1:] is used.
*parser_args : tuple
@@ -797,39 +797,39 @@ class ArgParseConfigLoader(CommandLineConfigLoader):
Dict of flags to full traitlests names for CLI parsing
log
Passed to `ConfigLoader`
-
- Returns
- -------
- config : Config
- The resulting Config object.
- """
- super(CommandLineConfigLoader, self).__init__(log=log)
- self.clear()
- if argv is None:
- argv = sys.argv[1:]
- self.argv = argv
- self.aliases = aliases or {}
- self.flags = flags or {}
+
+ Returns
+ -------
+ config : Config
+ The resulting Config object.
+ """
+ super(CommandLineConfigLoader, self).__init__(log=log)
+ self.clear()
+ if argv is None:
+ argv = sys.argv[1:]
+ self.argv = argv
+ self.aliases = aliases or {}
+ self.flags = flags or {}
self.classes = classes
-
- self.parser_args = parser_args
- self.version = parser_kw.pop("version", None)
- kwargs = dict(argument_default=argparse.SUPPRESS)
- kwargs.update(parser_kw)
- self.parser_kw = kwargs
-
+
+ self.parser_args = parser_args
+ self.version = parser_kw.pop("version", None)
+ kwargs = dict(argument_default=argparse.SUPPRESS)
+ kwargs.update(parser_kw)
+ self.parser_kw = kwargs
+
def load_config(self, argv=None, aliases=None, flags=_deprecated, classes=None):
- """Parse command line arguments and return as a Config object.
-
- Parameters
- ----------
+ """Parse command line arguments and return as a Config object.
+
+ Parameters
+ ----------
argv : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used.
flags
Deprecated in traitlets 5.0, instanciate the config loader with the flags.
-
+
"""
if flags is not _deprecated:
@@ -840,35 +840,35 @@ class ArgParseConfigLoader(CommandLineConfigLoader):
stacklevel=2,
)
- self.clear()
- if argv is None:
- argv = self.argv
+ self.clear()
+ if argv is None:
+ argv = self.argv
if aliases is not None:
self.aliases = aliases
if classes is not None:
self.classes = classes
self._create_parser()
- self._parse_args(argv)
- self._convert_to_config()
- return self.config
-
- def get_extra_args(self):
- if hasattr(self, 'extra_args'):
- return self.extra_args
- else:
- return []
-
+ self._parse_args(argv)
+ self._convert_to_config()
+ return self.config
+
+ def get_extra_args(self):
+ if hasattr(self, 'extra_args'):
+ return self.extra_args
+ else:
+ return []
+
def _create_parser(self):
self.parser = self.parser_class(*self.parser_args, **self.parser_kw)
self._add_arguments(self.aliases, self.flags, self.classes)
-
+
def _add_arguments(self, aliases, flags, classes):
- raise NotImplementedError("subclasses must implement _add_arguments")
-
- def _parse_args(self, args):
- """self.parser->self.parsed_data"""
+ raise NotImplementedError("subclasses must implement _add_arguments")
+
+ def _parse_args(self, args):
+ """self.parser->self.parsed_data"""
uargs = [cast_unicode(a) for a in args]
-
+
unpacked_aliases = {}
if self.aliases:
unpacked_aliases = {}
@@ -908,15 +908,15 @@ class ArgParseConfigLoader(CommandLineConfigLoader):
self.parsed_data = self.parser.parse_args(to_parse)
self.extra_args = extra_args
- def _convert_to_config(self):
- """self.parsed_data->self.config"""
+ def _convert_to_config(self):
+ """self.parsed_data->self.config"""
for k, v in vars(self.parsed_data).items():
*path, key = k.split(".")
section = self.config
for p in path:
section = section[p]
setattr(section, key, v)
-
+
class _FlagAction(argparse.Action):
"""ArgParse action to handle a flag"""
@@ -937,9 +937,9 @@ class _FlagAction(argparse.Action):
setattr(namespace, self.alias, values)
-class KVArgParseConfigLoader(ArgParseConfigLoader):
- """A config loader that loads aliases and flags with argparse,
-
+class KVArgParseConfigLoader(ArgParseConfigLoader):
+ """A config loader that loads aliases and flags with argparse,
+
as well as arbitrary --Class.trait value
"""
@@ -947,10 +947,10 @@ class KVArgParseConfigLoader(ArgParseConfigLoader):
def _add_arguments(self, aliases, flags, classes):
alias_flags = {}
- paa = self.parser.add_argument
+ paa = self.parser.add_argument
self.parser.set_defaults(_flags=[])
paa("extra_args", nargs="*")
-
+
## An index of all container traits collected::
#
# { <traitname>: (<trait>, <argparse-kwds>) }
@@ -1010,10 +1010,10 @@ class KVArgParseConfigLoader(ArgParseConfigLoader):
keys = ('-' + key, '--' + key) if len(key) == 1 else ('--'+ key,)
paa(*keys, **argparse_kwds)
- def _convert_to_config(self):
- """self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
+ def _convert_to_config(self):
+ """self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
extra_args = self.extra_args
-
+
for lhs, rhs in vars(self.parsed_data).items():
if lhs == "extra_args":
self.extra_args = ["-" if a == _DASH_REPLACEMENT else a for a in rhs] + extra_args
@@ -1021,7 +1021,7 @@ class KVArgParseConfigLoader(ArgParseConfigLoader):
elif lhs == '_flags':
# _flags will be handled later
continue
-
+
lhs = lhs.replace(_DOT_REPLACEMENT, ".")
if '.' not in lhs:
# probably a mistyped alias, but not technically illegal
@@ -1048,12 +1048,12 @@ class KVArgParseConfigLoader(ArgParseConfigLoader):
raise ArgumentError(f"Error loading argument {lhs}={rhs}, {e}")
for subc in self.parsed_data._flags:
- self._load_flag(subc)
-
-
+ self._load_flag(subc)
+
+
class KeyValueConfigLoader(KVArgParseConfigLoader):
"""Deprecated in traitlets 5.0
-
+
Use KVArgParseConfigLoader
"""
def __init__(self, *args, **kwargs):
@@ -1066,25 +1066,25 @@ class KeyValueConfigLoader(KVArgParseConfigLoader):
super().__init__(*args, **kwargs)
-def load_pyconfig_files(config_files, path):
- """Load multiple Python config files, merging each of them in turn.
-
- Parameters
+def load_pyconfig_files(config_files, path):
+ """Load multiple Python config files, merging each of them in turn.
+
+ Parameters
----------
- config_files : list of str
- List of config files names to load and merge into the config.
- path : unicode
- The full path to the location of the config files.
- """
- config = Config()
- for cf in config_files:
- loader = PyFileConfigLoader(cf, path=path)
- try:
- next_config = loader.load_config()
- except ConfigFileNotFound:
- pass
- except:
- raise
- else:
- config.merge(next_config)
- return config
+ config_files : list of str
+ List of config files names to load and merge into the config.
+ path : unicode
+ The full path to the location of the config files.
+ """
+ config = Config()
+ for cf in config_files:
+ loader = PyFileConfigLoader(cf, path=path)
+ try:
+ next_config = loader.load_config()
+ except ConfigFileNotFound:
+ pass
+ except:
+ raise
+ else:
+ config.merge(next_config)
+ return config
diff --git a/contrib/python/traitlets/py3/traitlets/config/manager.py b/contrib/python/traitlets/py3/traitlets/config/manager.py
index 041477b1b9..164053261e 100644
--- a/contrib/python/traitlets/py3/traitlets/config/manager.py
+++ b/contrib/python/traitlets/py3/traitlets/config/manager.py
@@ -1,84 +1,84 @@
-"""Manager to read and modify config data in JSON files.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-import errno
-import io
-import json
-import os
-
-from traitlets.config import LoggingConfigurable
-from traitlets.traitlets import Unicode
-
-
-def recursive_update(target, new):
- """Recursively update one dictionary using another.
-
- None values will delete their keys.
- """
- for k, v in new.items():
- if isinstance(v, dict):
- if k not in target:
- target[k] = {}
- recursive_update(target[k], v)
- if not target[k]:
- # Prune empty subdicts
- del target[k]
-
- elif v is None:
- target.pop(k, None)
-
- else:
- target[k] = v
-
-
-class BaseJSONConfigManager(LoggingConfigurable):
- """General JSON config manager
-
- Deals with persisting/storing config in a json file
- """
-
- config_dir = Unicode('.')
-
- def ensure_config_dir_exists(self):
- try:
- os.makedirs(self.config_dir, 0o755)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
-
- def file_name(self, section_name):
- return os.path.join(self.config_dir, section_name+'.json')
-
- def get(self, section_name):
- """Retrieve the config data for the specified section.
-
- Returns the data as a dictionary, or an empty dictionary if the file
- doesn't exist.
- """
- filename = self.file_name(section_name)
- if os.path.isfile(filename):
- with io.open(filename, encoding='utf-8') as f:
- return json.load(f)
- else:
- return {}
-
- def set(self, section_name, data):
- """Store the given config data.
- """
- filename = self.file_name(section_name)
- self.ensure_config_dir_exists()
-
+"""Manager to read and modify config data in JSON files.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+import errno
+import io
+import json
+import os
+
+from traitlets.config import LoggingConfigurable
+from traitlets.traitlets import Unicode
+
+
+def recursive_update(target, new):
+ """Recursively update one dictionary using another.
+
+ None values will delete their keys.
+ """
+ for k, v in new.items():
+ if isinstance(v, dict):
+ if k not in target:
+ target[k] = {}
+ recursive_update(target[k], v)
+ if not target[k]:
+ # Prune empty subdicts
+ del target[k]
+
+ elif v is None:
+ target.pop(k, None)
+
+ else:
+ target[k] = v
+
+
+class BaseJSONConfigManager(LoggingConfigurable):
+ """General JSON config manager
+
+ Deals with persisting/storing config in a json file
+ """
+
+ config_dir = Unicode('.')
+
+ def ensure_config_dir_exists(self):
+ try:
+ os.makedirs(self.config_dir, 0o755)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ def file_name(self, section_name):
+ return os.path.join(self.config_dir, section_name+'.json')
+
+ def get(self, section_name):
+ """Retrieve the config data for the specified section.
+
+ Returns the data as a dictionary, or an empty dictionary if the file
+ doesn't exist.
+ """
+ filename = self.file_name(section_name)
+ if os.path.isfile(filename):
+ with io.open(filename, encoding='utf-8') as f:
+ return json.load(f)
+ else:
+ return {}
+
+ def set(self, section_name, data):
+ """Store the given config data.
+ """
+ filename = self.file_name(section_name)
+ self.ensure_config_dir_exists()
+
f = open(filename, 'w', encoding='utf-8')
- with f:
- json.dump(data, f, indent=2)
-
- def update(self, section_name, new_data):
- """Modify the config section by recursively updating it with new_data.
-
- Returns the modified config data as a dictionary.
- """
- data = self.get(section_name)
- recursive_update(data, new_data)
- self.set(section_name, data)
- return data
+ with f:
+ json.dump(data, f, indent=2)
+
+ def update(self, section_name, new_data):
+ """Modify the config section by recursively updating it with new_data.
+
+ Returns the modified config data as a dictionary.
+ """
+ data = self.get(section_name)
+ recursive_update(data, new_data)
+ self.set(section_name, data)
+ return data
diff --git a/contrib/python/traitlets/py3/traitlets/log.py b/contrib/python/traitlets/py3/traitlets/log.py
index 559735bd1a..af86b325f5 100644
--- a/contrib/python/traitlets/py3/traitlets/log.py
+++ b/contrib/python/traitlets/py3/traitlets/log.py
@@ -1,27 +1,27 @@
-"""Grab the global logger instance."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import logging
-
-_logger = None
-
-def get_logger():
- """Grab the global logger instance.
+"""Grab the global logger instance."""
- If a global Application is instantiated, grab its logger.
- Otherwise, grab the root logger.
- """
- global _logger
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
- if _logger is None:
- from .config import Application
- if Application.initialized():
- _logger = Application.instance().log
- else:
+import logging
+
+_logger = None
+
+def get_logger():
+ """Grab the global logger instance.
+
+ If a global Application is instantiated, grab its logger.
+ Otherwise, grab the root logger.
+ """
+ global _logger
+
+ if _logger is None:
+ from .config import Application
+ if Application.initialized():
+ _logger = Application.instance().log
+ else:
_logger = logging.getLogger('traitlets')
# Add a NullHandler to silence warnings about not being
# initialized, per best practice for libraries.
_logger.addHandler(logging.NullHandler())
- return _logger
+ return _logger
diff --git a/contrib/python/traitlets/py3/traitlets/traitlets.py b/contrib/python/traitlets/py3/traitlets/traitlets.py
index 1a278992a3..6bdf7414d3 100644
--- a/contrib/python/traitlets/py3/traitlets/traitlets.py
+++ b/contrib/python/traitlets/py3/traitlets/traitlets.py
@@ -1,62 +1,62 @@
-"""
-A lightweight Traits like module.
-
-This is designed to provide a lightweight, simple, pure Python version of
-many of the capabilities of enthought.traits. This includes:
-
-* Validation
-* Type specification with defaults
-* Static and dynamic notification
-* Basic predefined types
-* An API that is similar to enthought.traits
-
-We don't support:
-
-* Delegation
-* Automatic GUI generation
-* A full set of trait types. Most importantly, we don't provide container
- traits (list, dict, tuple) that can trigger notifications if their
- contents change.
-* API compatibility with enthought.traits
-
-There are also some important difference in our design:
-
-* enthought.traits does not validate default values. We do.
-
-We choose to create this module because we need these capabilities, but
-we need them to be pure Python so they work in all Python implementations,
-including Jython and IronPython.
-
-Inheritance diagram:
-
-.. inheritance-diagram:: traitlets.traitlets
- :parts: 3
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-#
-# Adapted from enthought.traits, Copyright (c) Enthought, Inc.,
-# also under the terms of the Modified BSD License.
-
+"""
+A lightweight Traits like module.
+
+This is designed to provide a lightweight, simple, pure Python version of
+many of the capabilities of enthought.traits. This includes:
+
+* Validation
+* Type specification with defaults
+* Static and dynamic notification
+* Basic predefined types
+* An API that is similar to enthought.traits
+
+We don't support:
+
+* Delegation
+* Automatic GUI generation
+* A full set of trait types. Most importantly, we don't provide container
+ traits (list, dict, tuple) that can trigger notifications if their
+ contents change.
+* API compatibility with enthought.traits
+
+There are also some important difference in our design:
+
+* enthought.traits does not validate default values. We do.
+
+We choose to create this module because we need these capabilities, but
+we need them to be pure Python so they work in all Python implementations,
+including Jython and IronPython.
+
+Inheritance diagram:
+
+.. inheritance-diagram:: traitlets.traitlets
+ :parts: 3
+"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+#
+# Adapted from enthought.traits, Copyright (c) Enthought, Inc.,
+# also under the terms of the Modified BSD License.
+
from ast import literal_eval
-import contextlib
-import inspect
+import contextlib
+import inspect
import os
-import re
-import sys
-import types
+import re
+import sys
+import types
import enum
-from warnings import warn, warn_explicit
-
-from .utils.getargspec import getargspec
-from .utils.importstring import import_item
-from .utils.sentinel import Sentinel
+from warnings import warn, warn_explicit
+
+from .utils.getargspec import getargspec
+from .utils.importstring import import_item
+from .utils.sentinel import Sentinel
from .utils.bunch import Bunch
from .utils.descriptions import describe, class_of, add_article, repr_type
-
-SequenceTypes = (list, tuple, set, frozenset)
-
+
+SequenceTypes = (list, tuple, set, frozenset)
+
# backward compatibility, use to differ between Python 2 and 3.
ClassTypes = (type,)
@@ -85,34 +85,34 @@ __all__ = [
# any TraitType subclass (that doesn't start with _) will be added automatically
-#-----------------------------------------------------------------------------
-# Basic classes
-#-----------------------------------------------------------------------------
-
-
-Undefined = Sentinel('Undefined', 'traitlets',
-'''
-Used in Traitlets to specify that no defaults are set in kwargs
-'''
-)
-
-All = Sentinel('All', 'traitlets',
-'''
-Used in Traitlets to listen to all types of notification or to notifications
-from all trait attributes.
-'''
-)
-
-# Deprecated alias
-NoDefaultSpecified = Undefined
-
-class TraitError(Exception):
- pass
-
-#-----------------------------------------------------------------------------
-# Utilities
-#-----------------------------------------------------------------------------
-
+#-----------------------------------------------------------------------------
+# Basic classes
+#-----------------------------------------------------------------------------
+
+
+Undefined = Sentinel('Undefined', 'traitlets',
+'''
+Used in Traitlets to specify that no defaults are set in kwargs
+'''
+)
+
+All = Sentinel('All', 'traitlets',
+'''
+Used in Traitlets to listen to all types of notification or to notifications
+from all trait attributes.
+'''
+)
+
+# Deprecated alias
+NoDefaultSpecified = Undefined
+
+class TraitError(Exception):
+ pass
+
+#-----------------------------------------------------------------------------
+# Utilities
+#-----------------------------------------------------------------------------
+
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
def isidentifier(s):
@@ -134,332 +134,332 @@ def _should_warn(key):
else:
return False
-def _deprecated_method(method, cls, method_name, msg):
- """Show deprecation warning about a magic method definition.
-
- Uses warn_explicit to bind warning to method definition instead of triggering code,
- which isn't relevant.
- """
+def _deprecated_method(method, cls, method_name, msg):
+ """Show deprecation warning about a magic method definition.
+
+ Uses warn_explicit to bind warning to method definition instead of triggering code,
+ which isn't relevant.
+ """
warn_msg = "{classname}.{method_name} is deprecated in traitlets 4.1: {msg}".format(
- classname=cls.__name__, method_name=method_name, msg=msg
- )
-
- for parent in inspect.getmro(cls):
- if method_name in parent.__dict__:
- cls = parent
- break
+ classname=cls.__name__, method_name=method_name, msg=msg
+ )
+
+ for parent in inspect.getmro(cls):
+ if method_name in parent.__dict__:
+ cls = parent
+ break
# limit deprecation messages to once per package
package_name = cls.__module__.split('.', 1)[0]
key = (package_name, msg)
if not _should_warn(key):
return
- try:
- fname = inspect.getsourcefile(method) or "<unknown>"
- lineno = inspect.getsourcelines(method)[1] or 0
+ try:
+ fname = inspect.getsourcefile(method) or "<unknown>"
+ lineno = inspect.getsourcelines(method)[1] or 0
except (OSError, TypeError) as e:
- # Failed to inspect for some reason
- warn(warn_msg + ('\n(inspection failed) %s' % e), DeprecationWarning)
- else:
- warn_explicit(warn_msg, DeprecationWarning, fname, lineno)
-
+ # Failed to inspect for some reason
+ warn(warn_msg + ('\n(inspection failed) %s' % e), DeprecationWarning)
+ else:
+ warn_explicit(warn_msg, DeprecationWarning, fname, lineno)
+
def _safe_literal_eval(s):
"""Safely evaluate an expression
-
+
Returns original string if eval fails.
-
+
Use only where types are ambiguous.
- """
+ """
try:
return literal_eval(s)
except (NameError, SyntaxError, ValueError):
return s
-
-def is_trait(t):
- """ Returns whether the given value is an instance or subclass of TraitType.
- """
- return (isinstance(t, TraitType) or
- (isinstance(t, type) and issubclass(t, TraitType)))
-
-
-def parse_notifier_name(names):
- """Convert the name argument to a list of names.
-
- Examples
- --------
- >>> parse_notifier_name([])
- [All]
+
+def is_trait(t):
+ """ Returns whether the given value is an instance or subclass of TraitType.
+ """
+ return (isinstance(t, TraitType) or
+ (isinstance(t, type) and issubclass(t, TraitType)))
+
+
+def parse_notifier_name(names):
+ """Convert the name argument to a list of names.
+
+ Examples
+ --------
+ >>> parse_notifier_name([])
+ [All]
>>> parse_notifier_name("a")
- ['a']
+ ['a']
>>> parse_notifier_name(["a", "b"])
- ['a', 'b']
- >>> parse_notifier_name(All)
- [All]
- """
+ ['a', 'b']
+ >>> parse_notifier_name(All)
+ [All]
+ """
if names is All or isinstance(names, str):
- return [names]
+ return [names]
else:
- if not names or All in names:
- return [All]
- for n in names:
+ if not names or All in names:
+ return [All]
+ for n in names:
if not isinstance(n, str):
raise TypeError("names must be strings, not %r" % n)
- return names
-
-
-class _SimpleTest:
- def __init__ ( self, value ): self.value = value
- def __call__ ( self, test ):
- return test == self.value
- def __repr__(self):
- return "<SimpleTest(%r)" % self.value
- def __str__(self):
- return self.__repr__()
-
-
-def getmembers(object, predicate=None):
- """A safe version of inspect.getmembers that handles missing attributes.
-
- This is useful when there are descriptor based attributes that for
- some reason raise AttributeError even though they exist. This happens
- in zope.inteface with the __provides__ attribute.
- """
- results = []
- for key in dir(object):
- try:
- value = getattr(object, key)
- except AttributeError:
- pass
- else:
- if not predicate or predicate(value):
- results.append((key, value))
- results.sort()
- return results
-
-def _validate_link(*tuples):
- """Validate arguments for traitlet link functions"""
- for t in tuples:
- if not len(t) == 2:
- raise TypeError("Each linked traitlet must be specified as (HasTraits, 'trait_name'), not %r" % t)
- obj, trait_name = t
- if not isinstance(obj, HasTraits):
- raise TypeError("Each object must be HasTraits, not %r" % type(obj))
- if not trait_name in obj.traits():
- raise TypeError("%r has no trait %r" % (obj, trait_name))
-
-class link(object):
- """Link traits from different objects together so they remain in sync.
-
- Parameters
- ----------
- source : (object / attribute name) pair
- target : (object / attribute name) pair
+ return names
+
+
+class _SimpleTest:
+ def __init__ ( self, value ): self.value = value
+ def __call__ ( self, test ):
+ return test == self.value
+ def __repr__(self):
+ return "<SimpleTest(%r)" % self.value
+ def __str__(self):
+ return self.__repr__()
+
+
+def getmembers(object, predicate=None):
+ """A safe version of inspect.getmembers that handles missing attributes.
+
+ This is useful when there are descriptor based attributes that for
+ some reason raise AttributeError even though they exist. This happens
+ in zope.inteface with the __provides__ attribute.
+ """
+ results = []
+ for key in dir(object):
+ try:
+ value = getattr(object, key)
+ except AttributeError:
+ pass
+ else:
+ if not predicate or predicate(value):
+ results.append((key, value))
+ results.sort()
+ return results
+
+def _validate_link(*tuples):
+ """Validate arguments for traitlet link functions"""
+ for t in tuples:
+ if not len(t) == 2:
+ raise TypeError("Each linked traitlet must be specified as (HasTraits, 'trait_name'), not %r" % t)
+ obj, trait_name = t
+ if not isinstance(obj, HasTraits):
+ raise TypeError("Each object must be HasTraits, not %r" % type(obj))
+ if not trait_name in obj.traits():
+ raise TypeError("%r has no trait %r" % (obj, trait_name))
+
+class link(object):
+ """Link traits from different objects together so they remain in sync.
+
+ Parameters
+ ----------
+ source : (object / attribute name) pair
+ target : (object / attribute name) pair
transform: iterable with two callables (optional)
Data transformation between source and target and target and source.
-
- Examples
- --------
+
+ Examples
+ --------
>>> c = link((src, "value"), (tgt, "value"))
- >>> src.value = 5 # updates other objects as well
- """
- updating = False
-
+ >>> src.value = 5 # updates other objects as well
+ """
+ updating = False
+
def __init__(self, source, target, transform=None):
- _validate_link(source, target)
- self.source, self.target = source, target
+ _validate_link(source, target)
+ self.source, self.target = source, target
self._transform, self._transform_inv = (
transform if transform else (lambda x: x,) * 2)
self.link()
def link(self):
- try:
+ try:
setattr(self.target[0], self.target[1],
self._transform(getattr(self.source[0], self.source[1])))
- finally:
+ finally:
self.source[0].observe(self._update_target, names=self.source[1])
self.target[0].observe(self._update_source, names=self.target[1])
-
- @contextlib.contextmanager
- def _busy_updating(self):
- self.updating = True
- try:
- yield
- finally:
- self.updating = False
-
- def _update_target(self, change):
- if self.updating:
- return
- with self._busy_updating():
+
+ @contextlib.contextmanager
+ def _busy_updating(self):
+ self.updating = True
+ try:
+ yield
+ finally:
+ self.updating = False
+
+ def _update_target(self, change):
+ if self.updating:
+ return
+ with self._busy_updating():
setattr(self.target[0], self.target[1], self._transform(change.new))
if getattr(self.source[0], self.source[1]) != change.new:
raise TraitError(
"Broken link {}: the source value changed while updating "
"the target.".format(self))
-
- def _update_source(self, change):
- if self.updating:
- return
- with self._busy_updating():
+
+ def _update_source(self, change):
+ if self.updating:
+ return
+ with self._busy_updating():
setattr(self.source[0], self.source[1],
self._transform_inv(change.new))
if getattr(self.target[0], self.target[1]) != change.new:
raise TraitError(
"Broken link {}: the target value changed while updating "
"the source.".format(self))
-
- def unlink(self):
- self.source[0].unobserve(self._update_target, names=self.source[1])
- self.target[0].unobserve(self._update_source, names=self.target[1])
-
-
-class directional_link(object):
- """Link the trait of a source object with traits of target objects.
-
- Parameters
- ----------
- source : (object, attribute name) pair
- target : (object, attribute name) pair
- transform: callable (optional)
- Data transformation between source and target.
-
- Examples
- --------
+
+ def unlink(self):
+ self.source[0].unobserve(self._update_target, names=self.source[1])
+ self.target[0].unobserve(self._update_source, names=self.target[1])
+
+
+class directional_link(object):
+ """Link the trait of a source object with traits of target objects.
+
+ Parameters
+ ----------
+ source : (object, attribute name) pair
+ target : (object, attribute name) pair
+ transform: callable (optional)
+ Data transformation between source and target.
+
+ Examples
+ --------
>>> c = directional_link((src, "value"), (tgt, "value"))
- >>> src.value = 5 # updates target objects
- >>> tgt.value = 6 # does not update source object
- """
- updating = False
-
- def __init__(self, source, target, transform=None):
- self._transform = transform if transform else lambda x: x
- _validate_link(source, target)
- self.source, self.target = source, target
+ >>> src.value = 5 # updates target objects
+ >>> tgt.value = 6 # does not update source object
+ """
+ updating = False
+
+ def __init__(self, source, target, transform=None):
+ self._transform = transform if transform else lambda x: x
+ _validate_link(source, target)
+ self.source, self.target = source, target
self.link()
def link(self):
- try:
+ try:
setattr(self.target[0], self.target[1],
self._transform(getattr(self.source[0], self.source[1])))
- finally:
- self.source[0].observe(self._update, names=self.source[1])
-
- @contextlib.contextmanager
- def _busy_updating(self):
- self.updating = True
- try:
- yield
- finally:
- self.updating = False
-
- def _update(self, change):
- if self.updating:
- return
- with self._busy_updating():
- setattr(self.target[0], self.target[1],
+ finally:
+ self.source[0].observe(self._update, names=self.source[1])
+
+ @contextlib.contextmanager
+ def _busy_updating(self):
+ self.updating = True
+ try:
+ yield
+ finally:
+ self.updating = False
+
+ def _update(self, change):
+ if self.updating:
+ return
+ with self._busy_updating():
+ setattr(self.target[0], self.target[1],
self._transform(change.new))
-
- def unlink(self):
- self.source[0].unobserve(self._update, names=self.source[1])
-
-dlink = directional_link
-
-
-#-----------------------------------------------------------------------------
+
+ def unlink(self):
+ self.source[0].unobserve(self._update, names=self.source[1])
+
+dlink = directional_link
+
+
+#-----------------------------------------------------------------------------
# Base Descriptor Class
-#-----------------------------------------------------------------------------
-
-
-class BaseDescriptor(object):
- """Base descriptor class
-
- Notes
- -----
+#-----------------------------------------------------------------------------
+
+
+class BaseDescriptor(object):
+ """Base descriptor class
+
+ Notes
+ -----
This implements Python's descriptor protocol.
-
- This class is the base class for all such descriptors. The
- only magic we use is a custom metaclass for the main :class:`HasTraits`
- class that does the following:
-
- 1. Sets the :attr:`name` attribute of every :class:`BaseDescriptor`
- instance in the class dict to the name of the attribute.
- 2. Sets the :attr:`this_class` attribute of every :class:`BaseDescriptor`
- instance in the class dict to the *class* that declared the trait.
- This is used by the :class:`This` trait to allow subclasses to
- accept superclasses for :class:`This` values.
- """
-
- name = None
- this_class = None
-
- def class_init(self, cls, name):
- """Part of the initialization which may depend on the underlying
- HasDescriptors class.
-
- It is typically overloaded for specific types.
-
- This method is called by :meth:`MetaHasDescriptors.__init__`
- passing the class (`cls`) and `name` under which the descriptor
- has been assigned.
- """
- self.this_class = cls
- self.name = name
-
+
+ This class is the base class for all such descriptors. The
+ only magic we use is a custom metaclass for the main :class:`HasTraits`
+ class that does the following:
+
+ 1. Sets the :attr:`name` attribute of every :class:`BaseDescriptor`
+ instance in the class dict to the name of the attribute.
+ 2. Sets the :attr:`this_class` attribute of every :class:`BaseDescriptor`
+ instance in the class dict to the *class* that declared the trait.
+ This is used by the :class:`This` trait to allow subclasses to
+ accept superclasses for :class:`This` values.
+ """
+
+ name = None
+ this_class = None
+
+ def class_init(self, cls, name):
+ """Part of the initialization which may depend on the underlying
+ HasDescriptors class.
+
+ It is typically overloaded for specific types.
+
+ This method is called by :meth:`MetaHasDescriptors.__init__`
+ passing the class (`cls`) and `name` under which the descriptor
+ has been assigned.
+ """
+ self.this_class = cls
+ self.name = name
+
def subclass_init(self, cls):
pass
- def instance_init(self, obj):
- """Part of the initialization which may depend on the underlying
- HasDescriptors instance.
-
- It is typically overloaded for specific types.
-
- This method is called by :meth:`HasTraits.__new__` and in the
- :meth:`BaseDescriptor.instance_init` method of descriptors holding
- other descriptors.
- """
- pass
-
-
-class TraitType(BaseDescriptor):
- """A base class for all trait types.
- """
-
- metadata = {}
- allow_none = False
- read_only = False
- info_text = 'any value'
+ def instance_init(self, obj):
+ """Part of the initialization which may depend on the underlying
+ HasDescriptors instance.
+
+ It is typically overloaded for specific types.
+
+ This method is called by :meth:`HasTraits.__new__` and in the
+ :meth:`BaseDescriptor.instance_init` method of descriptors holding
+ other descriptors.
+ """
+ pass
+
+
+class TraitType(BaseDescriptor):
+ """A base class for all trait types.
+ """
+
+ metadata = {}
+ allow_none = False
+ read_only = False
+ info_text = 'any value'
default_value = Undefined
-
+
def __init__(self, default_value=Undefined, allow_none=False, read_only=None, help=None,
config=None, **kwargs):
- """Declare a traitlet.
-
- If *allow_none* is True, None is a valid value in addition to any
- values that are normally valid. The default is up to the subclass.
- For most trait types, the default value for ``allow_none`` is False.
+ """Declare a traitlet.
+
+ If *allow_none* is True, None is a valid value in addition to any
+ values that are normally valid. The default is up to the subclass.
+ For most trait types, the default value for ``allow_none`` is False.
If *read_only* is True, attempts to directly modify a trait attribute raises a TraitError.
- Extra metadata can be associated with the traitlet using the .tag() convenience method
- or by using the traitlet instance's .metadata dictionary.
- """
- if default_value is not Undefined:
- self.default_value = default_value
+ Extra metadata can be associated with the traitlet using the .tag() convenience method
+ or by using the traitlet instance's .metadata dictionary.
+ """
+ if default_value is not Undefined:
+ self.default_value = default_value
if allow_none:
- self.allow_none = allow_none
- if read_only is not None:
- self.read_only = read_only
- self.help = help if help is not None else ''
-
+ self.allow_none = allow_none
+ if read_only is not None:
+ self.read_only = read_only
+ self.help = help if help is not None else ''
+
if len(kwargs) > 0:
- stacklevel = 1
- f = inspect.currentframe()
- # count supers to determine stacklevel for warning
- while f.f_code.co_name == '__init__':
- stacklevel += 1
- f = f.f_back
+ stacklevel = 1
+ f = inspect.currentframe()
+ # count supers to determine stacklevel for warning
+ while f.f_code.co_name == '__init__':
+ stacklevel += 1
+ f = f.f_back
mod = f.f_globals.get('__name__') or ''
pkg = mod.split('.', 1)[0]
key = tuple(['metadata-tag', pkg] + sorted(kwargs))
@@ -468,21 +468,21 @@ class TraitType(BaseDescriptor):
"With traitlets 4.1, metadata should be set using the .tag() method, "
"e.g., Int().tag(key1='value1', key2='value2')" % (kwargs,),
DeprecationWarning, stacklevel=stacklevel)
- if len(self.metadata) > 0:
- self.metadata = self.metadata.copy()
+ if len(self.metadata) > 0:
+ self.metadata = self.metadata.copy()
self.metadata.update(kwargs)
- else:
+ else:
self.metadata = kwargs
- else:
- self.metadata = self.metadata.copy()
+ else:
+ self.metadata = self.metadata.copy()
if config is not None:
self.metadata['config'] = config
-
- # We add help to the metadata during a deprecation period so that
- # code that looks for the help string there can find it.
- if help is not None:
- self.metadata['help'] = help
-
+
+ # We add help to the metadata during a deprecation period so that
+ # code that looks for the help string there can find it.
+ if help is not None:
+ self.metadata['help'] = help
+
def from_string(self, s):
"""Get a value from a config string
@@ -515,28 +515,28 @@ class TraitType(BaseDescriptor):
# Undefined will raise in TraitType.get
return self.default_value
- def get_default_value(self):
- """DEPRECATED: Retrieve the static default value for this trait.
- Use self.default_value instead
- """
+ def get_default_value(self):
+ """DEPRECATED: Retrieve the static default value for this trait.
+ Use self.default_value instead
+ """
warn("get_default_value is deprecated in traitlets 4.0: use the .default_value attribute", DeprecationWarning,
- stacklevel=2)
- return self.default_value
-
- def init_default_value(self, obj):
- """DEPRECATED: Set the static default value for the trait type.
- """
+ stacklevel=2)
+ return self.default_value
+
+ def init_default_value(self, obj):
+ """DEPRECATED: Set the static default value for the trait type.
+ """
warn("init_default_value is deprecated in traitlets 4.0, and may be removed in the future", DeprecationWarning,
- stacklevel=2)
- value = self._validate(obj, self.default_value)
- obj._trait_values[self.name] = value
- return value
-
+ stacklevel=2)
+ value = self._validate(obj, self.default_value)
+ obj._trait_values[self.name] = value
+ return value
+
def get(self, obj, cls=None):
- try:
- value = obj._trait_values[self.name]
- except KeyError:
- # Check for a dynamic initializer.
+ try:
+ value = obj._trait_values[self.name]
+ except KeyError:
+ # Check for a dynamic initializer.
default = obj.trait_defaults(self.name)
if default is Undefined:
warn(
@@ -548,93 +548,93 @@ class TraitType(BaseDescriptor):
)
with obj.cross_validation_lock:
value = self._validate(obj, default)
- obj._trait_values[self.name] = value
+ obj._trait_values[self.name] = value
obj._notify_observers(Bunch(
name=self.name,
value=value,
owner=obj,
type='default',
))
- return value
- except Exception:
- # This should never be reached.
- raise TraitError('Unexpected error in TraitType: '
- 'default value not set properly')
- else:
- return value
-
- def __get__(self, obj, cls=None):
- """Get the value of the trait by self.name for the instance.
-
- Default values are instantiated when :meth:`HasTraits.__new__`
- is called. Thus by the time this method gets called either the
- default value or a user defined value (they called :meth:`__set__`)
- is in the :class:`HasTraits` instance.
- """
- if obj is None:
- return self
- else:
- return self.get(obj, cls)
-
- def set(self, obj, value):
- new_value = self._validate(obj, value)
- try:
- old_value = obj._trait_values[self.name]
- except KeyError:
- old_value = self.default_value
-
- obj._trait_values[self.name] = new_value
- try:
- silent = bool(old_value == new_value)
+ return value
+ except Exception:
+ # This should never be reached.
+ raise TraitError('Unexpected error in TraitType: '
+ 'default value not set properly')
+ else:
+ return value
+
+ def __get__(self, obj, cls=None):
+ """Get the value of the trait by self.name for the instance.
+
+ Default values are instantiated when :meth:`HasTraits.__new__`
+ is called. Thus by the time this method gets called either the
+ default value or a user defined value (they called :meth:`__set__`)
+ is in the :class:`HasTraits` instance.
+ """
+ if obj is None:
+ return self
+ else:
+ return self.get(obj, cls)
+
+ def set(self, obj, value):
+ new_value = self._validate(obj, value)
+ try:
+ old_value = obj._trait_values[self.name]
+ except KeyError:
+ old_value = self.default_value
+
+ obj._trait_values[self.name] = new_value
+ try:
+ silent = bool(old_value == new_value)
except Exception:
- # if there is an error in comparing, default to notify
- silent = False
- if silent is not True:
- # we explicitly compare silent to True just in case the equality
- # comparison above returns something other than True/False
- obj._notify_trait(self.name, old_value, new_value)
-
- def __set__(self, obj, value):
- """Set the value of the trait by self.name for the instance.
-
- Values pass through a validation stage where errors are raised when
- impropper types, or types that cannot be coerced, are encountered.
- """
- if self.read_only:
- raise TraitError('The "%s" trait is read-only.' % self.name)
- else:
- self.set(obj, value)
-
- def _validate(self, obj, value):
- if value is None and self.allow_none:
- return value
- if hasattr(self, 'validate'):
- value = self.validate(obj, value)
- if obj._cross_validation_lock is False:
- value = self._cross_validate(obj, value)
- return value
-
- def _cross_validate(self, obj, value):
- if self.name in obj._trait_validators:
+ # if there is an error in comparing, default to notify
+ silent = False
+ if silent is not True:
+ # we explicitly compare silent to True just in case the equality
+ # comparison above returns something other than True/False
+ obj._notify_trait(self.name, old_value, new_value)
+
+ def __set__(self, obj, value):
+ """Set the value of the trait by self.name for the instance.
+
+ Values pass through a validation stage where errors are raised when
+ impropper types, or types that cannot be coerced, are encountered.
+ """
+ if self.read_only:
+ raise TraitError('The "%s" trait is read-only.' % self.name)
+ else:
+ self.set(obj, value)
+
+ def _validate(self, obj, value):
+ if value is None and self.allow_none:
+ return value
+ if hasattr(self, 'validate'):
+ value = self.validate(obj, value)
+ if obj._cross_validation_lock is False:
+ value = self._cross_validate(obj, value)
+ return value
+
+ def _cross_validate(self, obj, value):
+ if self.name in obj._trait_validators:
proposal = Bunch({'trait': self, 'value': value, 'owner': obj})
- value = obj._trait_validators[self.name](obj, proposal)
- elif hasattr(obj, '_%s_validate' % self.name):
- meth_name = '_%s_validate' % self.name
- cross_validate = getattr(obj, meth_name)
- _deprecated_method(cross_validate, obj.__class__, meth_name,
- "use @validate decorator instead.")
- value = cross_validate(value, self)
- return value
-
- def __or__(self, other):
- if isinstance(other, Union):
- return Union([self] + other.trait_types)
- else:
- return Union([self, other])
-
- def info(self):
- return self.info_text
-
+ value = obj._trait_validators[self.name](obj, proposal)
+ elif hasattr(obj, '_%s_validate' % self.name):
+ meth_name = '_%s_validate' % self.name
+ cross_validate = getattr(obj, meth_name)
+ _deprecated_method(cross_validate, obj.__class__, meth_name,
+ "use @validate decorator instead.")
+ value = cross_validate(value, self)
+ return value
+
+ def __or__(self, other):
+ if isinstance(other, Union):
+ return Union([self] + other.trait_types)
+ else:
+ return Union([self, other])
+
+ def info(self):
+ return self.info_text
+
def error(self, obj, value, error=None, info=None):
"""Raise a TraitError
@@ -676,7 +676,7 @@ class TraitType(BaseDescriptor):
"expected %s, not %s." % (self.name, chain,
error.args[1], describe("the", error.args[0])),)
raise error
- else:
+ else:
# this trait caused an error
if self.name is None:
# this is not the root trait
@@ -690,196 +690,196 @@ class TraitType(BaseDescriptor):
e = "The '%s' trait expected %s, not %s." % (
self.name, self.info(), describe("the", value))
raise TraitError(e)
-
- def get_metadata(self, key, default=None):
- """DEPRECATED: Get a metadata value.
-
- Use .metadata[key] or .metadata.get(key, default) instead.
- """
- if key == 'help':
- msg = "use the instance .help string directly, like x.help"
- else:
- msg = "use the instance .metadata dictionary directly, like x.metadata[key] or x.metadata.get(key, default)"
+
+ def get_metadata(self, key, default=None):
+ """DEPRECATED: Get a metadata value.
+
+ Use .metadata[key] or .metadata.get(key, default) instead.
+ """
+ if key == 'help':
+ msg = "use the instance .help string directly, like x.help"
+ else:
+ msg = "use the instance .metadata dictionary directly, like x.metadata[key] or x.metadata.get(key, default)"
warn("Deprecated in traitlets 4.1, " + msg, DeprecationWarning, stacklevel=2)
- return self.metadata.get(key, default)
-
- def set_metadata(self, key, value):
- """DEPRECATED: Set a metadata key/value.
-
- Use .metadata[key] = value instead.
- """
- if key == 'help':
- msg = "use the instance .help string directly, like x.help = value"
- else:
- msg = "use the instance .metadata dictionary directly, like x.metadata[key] = value"
+ return self.metadata.get(key, default)
+
+ def set_metadata(self, key, value):
+ """DEPRECATED: Set a metadata key/value.
+
+ Use .metadata[key] = value instead.
+ """
+ if key == 'help':
+ msg = "use the instance .help string directly, like x.help = value"
+ else:
+ msg = "use the instance .metadata dictionary directly, like x.metadata[key] = value"
warn("Deprecated in traitlets 4.1, " + msg, DeprecationWarning, stacklevel=2)
- self.metadata[key] = value
-
- def tag(self, **metadata):
- """Sets metadata and returns self.
-
- This allows convenient metadata tagging when initializing the trait, such as:
-
- >>> Int(0).tag(config=True, sync=True)
- """
+ self.metadata[key] = value
+
+ def tag(self, **metadata):
+ """Sets metadata and returns self.
+
+ This allows convenient metadata tagging when initializing the trait, such as:
+
+ >>> Int(0).tag(config=True, sync=True)
+ """
maybe_constructor_keywords = set(metadata.keys()).intersection({'help','allow_none', 'read_only', 'default_value'})
if maybe_constructor_keywords:
warn('The following attributes are set in using `tag`, but seem to be constructor keywords arguments: %s '%
maybe_constructor_keywords, UserWarning, stacklevel=2)
- self.metadata.update(metadata)
- return self
-
- def default_value_repr(self):
- return repr(self.default_value)
-
-#-----------------------------------------------------------------------------
-# The HasTraits implementation
-#-----------------------------------------------------------------------------
-
-class _CallbackWrapper(object):
- """An object adapting a on_trait_change callback into an observe callback.
-
- The comparison operator __eq__ is implemented to enable removal of wrapped
- callbacks.
- """
-
- def __init__(self, cb):
- self.cb = cb
- # Bound methods have an additional 'self' argument.
- offset = -1 if isinstance(self.cb, types.MethodType) else 0
- self.nargs = len(getargspec(cb)[0]) + offset
- if (self.nargs > 4):
- raise TraitError('a trait changed callback must have 0-4 arguments.')
-
- def __eq__(self, other):
- # The wrapper is equal to the wrapped element
- if isinstance(other, _CallbackWrapper):
- return self.cb == other.cb
- else:
- return self.cb == other
-
- def __call__(self, change):
- # The wrapper is callable
- if self.nargs == 0:
- self.cb()
- elif self.nargs == 1:
+ self.metadata.update(metadata)
+ return self
+
+ def default_value_repr(self):
+ return repr(self.default_value)
+
+#-----------------------------------------------------------------------------
+# The HasTraits implementation
+#-----------------------------------------------------------------------------
+
+class _CallbackWrapper(object):
+ """An object adapting a on_trait_change callback into an observe callback.
+
+ The comparison operator __eq__ is implemented to enable removal of wrapped
+ callbacks.
+ """
+
+ def __init__(self, cb):
+ self.cb = cb
+ # Bound methods have an additional 'self' argument.
+ offset = -1 if isinstance(self.cb, types.MethodType) else 0
+ self.nargs = len(getargspec(cb)[0]) + offset
+ if (self.nargs > 4):
+ raise TraitError('a trait changed callback must have 0-4 arguments.')
+
+ def __eq__(self, other):
+ # The wrapper is equal to the wrapped element
+ if isinstance(other, _CallbackWrapper):
+ return self.cb == other.cb
+ else:
+ return self.cb == other
+
+ def __call__(self, change):
+ # The wrapper is callable
+ if self.nargs == 0:
+ self.cb()
+ elif self.nargs == 1:
self.cb(change.name)
- elif self.nargs == 2:
+ elif self.nargs == 2:
self.cb(change.name, change.new)
- elif self.nargs == 3:
+ elif self.nargs == 3:
self.cb(change.name, change.old, change.new)
- elif self.nargs == 4:
+ elif self.nargs == 4:
self.cb(change.name, change.old, change.new, change.owner)
-
-def _callback_wrapper(cb):
- if isinstance(cb, _CallbackWrapper):
- return cb
- else:
- return _CallbackWrapper(cb)
-
-
-class MetaHasDescriptors(type):
- """A metaclass for HasDescriptors.
-
- This metaclass makes sure that any TraitType class attributes are
- instantiated and sets their name attribute.
- """
-
- def __new__(mcls, name, bases, classdict):
- """Create the HasDescriptors class."""
+
+def _callback_wrapper(cb):
+ if isinstance(cb, _CallbackWrapper):
+ return cb
+ else:
+ return _CallbackWrapper(cb)
+
+
+class MetaHasDescriptors(type):
+ """A metaclass for HasDescriptors.
+
+ This metaclass makes sure that any TraitType class attributes are
+ instantiated and sets their name attribute.
+ """
+
+ def __new__(mcls, name, bases, classdict):
+ """Create the HasDescriptors class."""
for k, v in classdict.items():
- # ----------------------------------------------------------------
- # Support of deprecated behavior allowing for TraitType types
- # to be used instead of TraitType instances.
- if inspect.isclass(v) and issubclass(v, TraitType):
+ # ----------------------------------------------------------------
+ # Support of deprecated behavior allowing for TraitType types
+ # to be used instead of TraitType instances.
+ if inspect.isclass(v) and issubclass(v, TraitType):
warn("Traits should be given as instances, not types (for example, `Int()`, not `Int`)."
" Passing types is deprecated in traitlets 4.1.",
- DeprecationWarning, stacklevel=2)
- classdict[k] = v()
- # ----------------------------------------------------------------
-
- return super(MetaHasDescriptors, mcls).__new__(mcls, name, bases, classdict)
-
- def __init__(cls, name, bases, classdict):
- """Finish initializing the HasDescriptors class."""
- super(MetaHasDescriptors, cls).__init__(name, bases, classdict)
- cls.setup_class(classdict)
-
- def setup_class(cls, classdict):
- """Setup descriptor instance on the class
-
- This sets the :attr:`this_class` and :attr:`name` attributes of each
- BaseDescriptor in the class dict of the newly created ``cls`` before
- calling their :attr:`class_init` method.
- """
+ DeprecationWarning, stacklevel=2)
+ classdict[k] = v()
+ # ----------------------------------------------------------------
+
+ return super(MetaHasDescriptors, mcls).__new__(mcls, name, bases, classdict)
+
+ def __init__(cls, name, bases, classdict):
+ """Finish initializing the HasDescriptors class."""
+ super(MetaHasDescriptors, cls).__init__(name, bases, classdict)
+ cls.setup_class(classdict)
+
+ def setup_class(cls, classdict):
+ """Setup descriptor instance on the class
+
+ This sets the :attr:`this_class` and :attr:`name` attributes of each
+ BaseDescriptor in the class dict of the newly created ``cls`` before
+ calling their :attr:`class_init` method.
+ """
for k, v in classdict.items():
- if isinstance(v, BaseDescriptor):
- v.class_init(cls, k)
-
+ if isinstance(v, BaseDescriptor):
+ v.class_init(cls, k)
+
for k, v in getmembers(cls):
if isinstance(v, BaseDescriptor):
v.subclass_init(cls)
-
-
-class MetaHasTraits(MetaHasDescriptors):
- """A metaclass for HasTraits."""
-
- def setup_class(cls, classdict):
- cls._trait_default_generators = {}
- super(MetaHasTraits, cls).setup_class(classdict)
-
-
+
+
+class MetaHasTraits(MetaHasDescriptors):
+ """A metaclass for HasTraits."""
+
+ def setup_class(cls, classdict):
+ cls._trait_default_generators = {}
+ super(MetaHasTraits, cls).setup_class(classdict)
+
+
def observe(*names, type="change"):
- """A decorator which can be used to observe Traits on a class.
-
+ """A decorator which can be used to observe Traits on a class.
+
The handler passed to the decorator will be called with one ``change``
dict argument. The change dictionary at least holds a 'type' key and a
'name' key, corresponding respectively to the type of notification and the
name of the attribute that triggered the notification.
-
- Other keys may be passed depending on the value of 'type'. In the case
- where type is 'change', we also have the following keys:
- * ``owner`` : the HasTraits instance
- * ``old`` : the old value of the modified trait attribute
- * ``new`` : the new value of the modified trait attribute
- * ``name`` : the name of the modified trait attribute.
-
- Parameters
- ----------
- *names
- The str names of the Traits to observe on the object.
+
+ Other keys may be passed depending on the value of 'type'. In the case
+ where type is 'change', we also have the following keys:
+ * ``owner`` : the HasTraits instance
+ * ``old`` : the old value of the modified trait attribute
+ * ``new`` : the new value of the modified trait attribute
+ * ``name`` : the name of the modified trait attribute.
+
+ Parameters
+ ----------
+ *names
+ The str names of the Traits to observe on the object.
type : str, kwarg-only
The type of event to observe (e.g. 'change')
- """
+ """
if not names:
raise TypeError("Please specify at least one trait name to observe.")
for name in names:
if name is not All and not isinstance(name, str):
raise TypeError("trait names to observe must be strings or All, not %r" % name)
return ObserveHandler(names, type=type)
-
-
-def observe_compat(func):
- """Backward-compatibility shim decorator for observers
-
- Use with:
-
- @observe('name')
- @observe_compat
- def _foo_changed(self, change):
- ...
-
- With this, `super()._foo_changed(self, name, old, new)` in subclasses will still work.
- Allows adoption of new observer API without breaking subclasses that override and super.
- """
- def compatible_observer(self, change_or_name, old=Undefined, new=Undefined):
- if isinstance(change_or_name, dict):
- change = change_or_name
- else:
- clsname = self.__class__.__name__
+
+
+def observe_compat(func):
+ """Backward-compatibility shim decorator for observers
+
+ Use with:
+
+ @observe('name')
+ @observe_compat
+ def _foo_changed(self, change):
+ ...
+
+ With this, `super()._foo_changed(self, name, old, new)` in subclasses will still work.
+ Allows adoption of new observer API without breaking subclasses that override and super.
+ """
+ def compatible_observer(self, change_or_name, old=Undefined, new=Undefined):
+ if isinstance(change_or_name, dict):
+ change = change_or_name
+ else:
+ clsname = self.__class__.__name__
warn("A parent of %s._%s_changed has adopted the new (traitlets 4.1) @observe(change) API" % (
- clsname, change_or_name), DeprecationWarning)
+ clsname, change_or_name), DeprecationWarning)
change = Bunch(
type='change',
old=old,
@@ -887,151 +887,151 @@ def observe_compat(func):
name=change_or_name,
owner=self,
)
- return func(self, change)
- return compatible_observer
-
-
-def validate(*names):
- """A decorator to register cross validator of HasTraits object's state
- when a Trait is set.
-
- The handler passed to the decorator must have one ``proposal`` dict argument.
- The proposal dictionary must hold the following keys:
-
- * ``owner`` : the HasTraits instance
- * ``value`` : the proposed value for the modified trait attribute
- * ``trait`` : the TraitType instance associated with the attribute
-
- Parameters
- ----------
+ return func(self, change)
+ return compatible_observer
+
+
+def validate(*names):
+ """A decorator to register cross validator of HasTraits object's state
+ when a Trait is set.
+
+ The handler passed to the decorator must have one ``proposal`` dict argument.
+ The proposal dictionary must hold the following keys:
+
+ * ``owner`` : the HasTraits instance
+ * ``value`` : the proposed value for the modified trait attribute
+ * ``trait`` : the TraitType instance associated with the attribute
+
+ Parameters
+ ----------
*names
- The str names of the Traits to validate.
-
- Notes
- -----
+ The str names of the Traits to validate.
+
+ Notes
+ -----
Since the owner has access to the ``HasTraits`` instance via the 'owner' key,
- the registered cross validator could potentially make changes to attributes
- of the ``HasTraits`` instance. However, we recommend not to do so. The reason
- is that the cross-validation of attributes may run in arbitrary order when
+ the registered cross validator could potentially make changes to attributes
+ of the ``HasTraits`` instance. However, we recommend not to do so. The reason
+ is that the cross-validation of attributes may run in arbitrary order when
exiting the ``hold_trait_notifications`` context, and such changes may not
- commute.
- """
+ commute.
+ """
if not names:
raise TypeError("Please specify at least one trait name to validate.")
for name in names:
if name is not All and not isinstance(name, str):
raise TypeError("trait names to validate must be strings or All, not %r" % name)
- return ValidateHandler(names)
-
-
-def default(name):
- """ A decorator which assigns a dynamic default for a Trait on a HasTraits object.
-
- Parameters
- ----------
- name
- The str name of the Trait on the object whose default should be generated.
-
- Notes
- -----
- Unlike observers and validators which are properties of the HasTraits
- instance, default value generators are class-level properties.
-
- Besides, default generators are only invoked if they are registered in
- subclasses of `this_type`.
-
- ::
-
- class A(HasTraits):
- bar = Int()
-
- @default('bar')
- def get_bar_default(self):
- return 11
-
- class B(A):
- bar = Float() # This trait ignores the default generator defined in
- # the base class A
-
- class C(B):
-
- @default('bar')
- def some_other_default(self): # This default generator should not be
- return 3.0 # ignored since it is defined in a
- # class derived from B.a.this_class.
- """
+ return ValidateHandler(names)
+
+
+def default(name):
+ """ A decorator which assigns a dynamic default for a Trait on a HasTraits object.
+
+ Parameters
+ ----------
+ name
+ The str name of the Trait on the object whose default should be generated.
+
+ Notes
+ -----
+ Unlike observers and validators which are properties of the HasTraits
+ instance, default value generators are class-level properties.
+
+ Besides, default generators are only invoked if they are registered in
+ subclasses of `this_type`.
+
+ ::
+
+ class A(HasTraits):
+ bar = Int()
+
+ @default('bar')
+ def get_bar_default(self):
+ return 11
+
+ class B(A):
+ bar = Float() # This trait ignores the default generator defined in
+ # the base class A
+
+ class C(B):
+
+ @default('bar')
+ def some_other_default(self): # This default generator should not be
+ return 3.0 # ignored since it is defined in a
+ # class derived from B.a.this_class.
+ """
if not isinstance(name, str):
raise TypeError("Trait name must be a string or All, not %r" % name)
- return DefaultHandler(name)
-
-
-class EventHandler(BaseDescriptor):
-
- def _init_call(self, func):
- self.func = func
- return self
-
- def __call__(self, *args, **kwargs):
+ return DefaultHandler(name)
+
+
+class EventHandler(BaseDescriptor):
+
+ def _init_call(self, func):
+ self.func = func
+ return self
+
+ def __call__(self, *args, **kwargs):
"""Pass `*args` and `**kwargs` to the handler's function if it exists."""
- if hasattr(self, 'func'):
- return self.func(*args, **kwargs)
- else:
- return self._init_call(*args, **kwargs)
-
- def __get__(self, inst, cls=None):
- if inst is None:
- return self
- return types.MethodType(self.func, inst)
-
-
-class ObserveHandler(EventHandler):
-
- def __init__(self, names, type):
- self.trait_names = names
- self.type = type
-
- def instance_init(self, inst):
- inst.observe(self, self.trait_names, type=self.type)
-
-
-class ValidateHandler(EventHandler):
-
- def __init__(self, names):
- self.trait_names = names
-
- def instance_init(self, inst):
- inst._register_validator(self, self.trait_names)
-
-
-class DefaultHandler(EventHandler):
-
- def __init__(self, name):
- self.trait_name = name
-
- def class_init(self, cls, name):
+ if hasattr(self, 'func'):
+ return self.func(*args, **kwargs)
+ else:
+ return self._init_call(*args, **kwargs)
+
+ def __get__(self, inst, cls=None):
+ if inst is None:
+ return self
+ return types.MethodType(self.func, inst)
+
+
+class ObserveHandler(EventHandler):
+
+ def __init__(self, names, type):
+ self.trait_names = names
+ self.type = type
+
+ def instance_init(self, inst):
+ inst.observe(self, self.trait_names, type=self.type)
+
+
+class ValidateHandler(EventHandler):
+
+ def __init__(self, names):
+ self.trait_names = names
+
+ def instance_init(self, inst):
+ inst._register_validator(self, self.trait_names)
+
+
+class DefaultHandler(EventHandler):
+
+ def __init__(self, name):
+ self.trait_name = name
+
+ def class_init(self, cls, name):
super().class_init(cls, name)
- cls._trait_default_generators[self.trait_name] = self
-
-
+ cls._trait_default_generators[self.trait_name] = self
+
+
class HasDescriptors(metaclass=MetaHasDescriptors):
- """The base class for all classes that have descriptors.
- """
-
+ """The base class for all classes that have descriptors.
+ """
+
def __new__(*args, **kwargs):
# Pass cls as args[0] to allow "cls" as keyword argument
cls = args[0]
args = args[1:]
- # This is needed because object.__new__ only accepts
- # the cls argument.
- new_meth = super(HasDescriptors, cls).__new__
- if new_meth is object.__new__:
- inst = new_meth(cls)
- else:
+ # This is needed because object.__new__ only accepts
+ # the cls argument.
+ new_meth = super(HasDescriptors, cls).__new__
+ if new_meth is object.__new__:
+ inst = new_meth(cls)
+ else:
inst = new_meth(cls, *args, **kwargs)
inst.setup_instance(*args, **kwargs)
- return inst
-
+ return inst
+
def setup_instance(*args, **kwargs):
"""
This is called **before** self.__init__ is called.
@@ -1041,39 +1041,39 @@ class HasDescriptors(metaclass=MetaHasDescriptors):
args = args[1:]
self._cross_validation_lock = False
- cls = self.__class__
- for key in dir(cls):
- # Some descriptors raise AttributeError like zope.interface's
- # __provides__ attributes even though they exist. This causes
- # AttributeErrors even though they are listed in dir(cls).
- try:
- value = getattr(cls, key)
- except AttributeError:
- pass
- else:
- if isinstance(value, BaseDescriptor):
- value.instance_init(self)
-
-
+ cls = self.__class__
+ for key in dir(cls):
+ # Some descriptors raise AttributeError like zope.interface's
+ # __provides__ attributes even though they exist. This causes
+ # AttributeErrors even though they are listed in dir(cls).
+ try:
+ value = getattr(cls, key)
+ except AttributeError:
+ pass
+ else:
+ if isinstance(value, BaseDescriptor):
+ value.instance_init(self)
+
+
class HasTraits(HasDescriptors, metaclass=MetaHasTraits):
-
+
def setup_instance(*args, **kwargs):
# Pass self as args[0] to allow "self" as keyword argument
self = args[0]
args = args[1:]
- self._trait_values = {}
- self._trait_notifiers = {}
- self._trait_validators = {}
+ self._trait_values = {}
+ self._trait_notifiers = {}
+ self._trait_validators = {}
super(HasTraits, self).setup_instance(*args, **kwargs)
-
+
def __init__(self, *args, **kwargs):
- # Allow trait values to be set using keyword arguments.
- # We need to use setattr for this to trigger validation and
- # notifications.
+ # Allow trait values to be set using keyword arguments.
+ # We need to use setattr for this to trigger validation and
+ # notifications.
super_args = args
super_kwargs = {}
- with self.hold_trait_notifications():
+ with self.hold_trait_notifications():
for key, value in kwargs.items():
if self.has_trait(key):
setattr(self, key, value)
@@ -1099,38 +1099,38 @@ class HasTraits(HasDescriptors, metaclass=MetaHasTraits):
DeprecationWarning,
stacklevel=2,
)
-
- def __getstate__(self):
- d = self.__dict__.copy()
- # event handlers stored on an instance are
- # expected to be reinstantiated during a
- # recall of instance_init during __setstate__
- d['_trait_notifiers'] = {}
- d['_trait_validators'] = {}
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ # event handlers stored on an instance are
+ # expected to be reinstantiated during a
+ # recall of instance_init during __setstate__
+ d['_trait_notifiers'] = {}
+ d['_trait_validators'] = {}
d['_trait_values'] = self._trait_values.copy()
d['_cross_validation_lock'] = False # FIXME: raise if cloning locked!
- return d
-
- def __setstate__(self, state):
- self.__dict__ = state.copy()
-
- # event handlers are reassigned to self
- cls = self.__class__
- for key in dir(cls):
- # Some descriptors raise AttributeError like zope.interface's
- # __provides__ attributes even though they exist. This causes
- # AttributeErrors even though they are listed in dir(cls).
- try:
- value = getattr(cls, key)
- except AttributeError:
- pass
- else:
- if isinstance(value, EventHandler):
- value.instance_init(self)
-
+ return d
+
+ def __setstate__(self, state):
+ self.__dict__ = state.copy()
+
+ # event handlers are reassigned to self
+ cls = self.__class__
+ for key in dir(cls):
+ # Some descriptors raise AttributeError like zope.interface's
+ # __provides__ attributes even though they exist. This causes
+ # AttributeErrors even though they are listed in dir(cls).
+ try:
+ value = getattr(cls, key)
+ except AttributeError:
+ pass
+ else:
+ if isinstance(value, EventHandler):
+ value.instance_init(self)
+
@property
- @contextlib.contextmanager
+ @contextlib.contextmanager
def cross_validation_lock(self):
"""
A contextmanager for running a block with our cross validation lock set
@@ -1150,72 +1150,72 @@ class HasTraits(HasDescriptors, metaclass=MetaHasTraits):
self._cross_validation_lock = False
@contextlib.contextmanager
- def hold_trait_notifications(self):
- """Context manager for bundling trait change notifications and cross
- validation.
-
- Use this when doing multiple trait assignments (init, config), to avoid
- race conditions in trait notifiers requesting other trait values.
- All trait notifications will fire after all values have been assigned.
- """
+ def hold_trait_notifications(self):
+ """Context manager for bundling trait change notifications and cross
+ validation.
+
+ Use this when doing multiple trait assignments (init, config), to avoid
+ race conditions in trait notifiers requesting other trait values.
+ All trait notifications will fire after all values have been assigned.
+ """
if self._cross_validation_lock:
- yield
- return
- else:
- cache = {}
- notify_change = self.notify_change
-
- def compress(past_changes, change):
- """Merges the provided change with the last if possible."""
- if past_changes is None:
- return [change]
- else:
+ yield
+ return
+ else:
+ cache = {}
+ notify_change = self.notify_change
+
+ def compress(past_changes, change):
+ """Merges the provided change with the last if possible."""
+ if past_changes is None:
+ return [change]
+ else:
if past_changes[-1]['type'] == 'change' and change.type == 'change':
past_changes[-1]['new'] = change.new
- else:
- # In case of changes other than 'change', append the notification.
- past_changes.append(change)
- return past_changes
-
- def hold(change):
+ else:
+ # In case of changes other than 'change', append the notification.
+ past_changes.append(change)
+ return past_changes
+
+ def hold(change):
name = change.name
- cache[name] = compress(cache.get(name), change)
-
- try:
- # Replace notify_change with `hold`, caching and compressing
- # notifications, disable cross validation and yield.
- self.notify_change = hold
- self._cross_validation_lock = True
- yield
- # Cross validate final values when context is released.
- for name in list(cache.keys()):
- trait = getattr(self.__class__, name)
- value = trait._cross_validate(self, getattr(self, name))
+ cache[name] = compress(cache.get(name), change)
+
+ try:
+ # Replace notify_change with `hold`, caching and compressing
+ # notifications, disable cross validation and yield.
+ self.notify_change = hold
+ self._cross_validation_lock = True
+ yield
+ # Cross validate final values when context is released.
+ for name in list(cache.keys()):
+ trait = getattr(self.__class__, name)
+ value = trait._cross_validate(self, getattr(self, name))
self.set_trait(name, value)
- except TraitError as e:
- # Roll back in case of TraitError during final cross validation.
- self.notify_change = lambda x: None
- for name, changes in cache.items():
- for change in changes[::-1]:
- # TODO: Separate in a rollback function per notification type.
+ except TraitError as e:
+ # Roll back in case of TraitError during final cross validation.
+ self.notify_change = lambda x: None
+ for name, changes in cache.items():
+ for change in changes[::-1]:
+ # TODO: Separate in a rollback function per notification type.
if change.type == 'change':
if change.old is not Undefined:
self.set_trait(name, change.old)
- else:
- self._trait_values.pop(name)
- cache = {}
- raise e
- finally:
- self._cross_validation_lock = False
+ else:
+ self._trait_values.pop(name)
+ cache = {}
+ raise e
+ finally:
+ self._cross_validation_lock = False
# Restore method retrieval from class
del self.notify_change
-
- # trigger delayed notifications
- for changes in cache.values():
- for change in changes:
- self.notify_change(change)
-
- def _notify_trait(self, name, old_value, new_value):
+
+ # trigger delayed notifications
+ for changes in cache.values():
+ for change in changes:
+ self.notify_change(change)
+
+ def _notify_trait(self, name, old_value, new_value):
self.notify_change(Bunch(
name=name,
old=old_value,
@@ -1223,8 +1223,8 @@ class HasTraits(HasDescriptors, metaclass=MetaHasTraits):
owner=self,
type='change',
))
-
- def notify_change(self, change):
+
+ def notify_change(self, change):
"""Notify observers of a change event"""
return self._notify_observers(change)
@@ -1234,188 +1234,188 @@ class HasTraits(HasDescriptors, metaclass=MetaHasTraits):
# cast to bunch if given a dict
event = Bunch(event)
name, type = event.name, event.type
-
- callables = []
- callables.extend(self._trait_notifiers.get(name, {}).get(type, []))
- callables.extend(self._trait_notifiers.get(name, {}).get(All, []))
- callables.extend(self._trait_notifiers.get(All, {}).get(type, []))
- callables.extend(self._trait_notifiers.get(All, {}).get(All, []))
-
- # Now static ones
- magic_name = '_%s_changed' % name
+
+ callables = []
+ callables.extend(self._trait_notifiers.get(name, {}).get(type, []))
+ callables.extend(self._trait_notifiers.get(name, {}).get(All, []))
+ callables.extend(self._trait_notifiers.get(All, {}).get(type, []))
+ callables.extend(self._trait_notifiers.get(All, {}).get(All, []))
+
+ # Now static ones
+ magic_name = '_%s_changed' % name
if event.type == "change" and hasattr(self, magic_name):
- class_value = getattr(self.__class__, magic_name)
- if not isinstance(class_value, ObserveHandler):
- _deprecated_method(class_value, self.__class__, magic_name,
- "use @observe and @unobserve instead.")
- cb = getattr(self, magic_name)
- # Only append the magic method if it was not manually registered
- if cb not in callables:
- callables.append(_callback_wrapper(cb))
-
- # Call them all now
- # Traits catches and logs errors here. I allow them to raise
- for c in callables:
- # Bound methods have an additional 'self' argument.
-
- if isinstance(c, _CallbackWrapper):
- c = c.__call__
+ class_value = getattr(self.__class__, magic_name)
+ if not isinstance(class_value, ObserveHandler):
+ _deprecated_method(class_value, self.__class__, magic_name,
+ "use @observe and @unobserve instead.")
+ cb = getattr(self, magic_name)
+ # Only append the magic method if it was not manually registered
+ if cb not in callables:
+ callables.append(_callback_wrapper(cb))
+
+ # Call them all now
+ # Traits catches and logs errors here. I allow them to raise
+ for c in callables:
+ # Bound methods have an additional 'self' argument.
+
+ if isinstance(c, _CallbackWrapper):
+ c = c.__call__
elif isinstance(c, EventHandler) and c.name is not None:
- c = getattr(self, c.name)
+ c = getattr(self, c.name)
c(event)
-
- def _add_notifiers(self, handler, name, type):
- if name not in self._trait_notifiers:
- nlist = []
- self._trait_notifiers[name] = {type: nlist}
- else:
- if type not in self._trait_notifiers[name]:
- nlist = []
- self._trait_notifiers[name][type] = nlist
- else:
- nlist = self._trait_notifiers[name][type]
- if handler not in nlist:
- nlist.append(handler)
-
- def _remove_notifiers(self, handler, name, type):
- try:
- if handler is None:
- del self._trait_notifiers[name][type]
- else:
- self._trait_notifiers[name][type].remove(handler)
- except KeyError:
- pass
-
- def on_trait_change(self, handler=None, name=None, remove=False):
- """DEPRECATED: Setup a handler to be called when a trait changes.
-
- This is used to setup dynamic notifications of trait changes.
-
- Static handlers can be created by creating methods on a HasTraits
- subclass with the naming convention '_[traitname]_changed'. Thus,
- to create static handler for the trait 'a', create the method
- _a_changed(self, name, old, new) (fewer arguments can be used, see
- below).
-
- If `remove` is True and `handler` is not specified, all change
- handlers for the specified name are uninstalled.
-
- Parameters
- ----------
- handler : callable, None
- A callable that is called when a trait changes. Its
- signature can be handler(), handler(name), handler(name, new),
- handler(name, old, new), or handler(name, old, new, self).
- name : list, str, None
- If None, the handler will apply to all traits. If a list
- of str, handler will apply to all names in the list. If a
- str, the handler will apply just to that name.
- remove : bool
- If False (the default), then install the handler. If True
- then unintall it.
- """
+
+ def _add_notifiers(self, handler, name, type):
+ if name not in self._trait_notifiers:
+ nlist = []
+ self._trait_notifiers[name] = {type: nlist}
+ else:
+ if type not in self._trait_notifiers[name]:
+ nlist = []
+ self._trait_notifiers[name][type] = nlist
+ else:
+ nlist = self._trait_notifiers[name][type]
+ if handler not in nlist:
+ nlist.append(handler)
+
+ def _remove_notifiers(self, handler, name, type):
+ try:
+ if handler is None:
+ del self._trait_notifiers[name][type]
+ else:
+ self._trait_notifiers[name][type].remove(handler)
+ except KeyError:
+ pass
+
+ def on_trait_change(self, handler=None, name=None, remove=False):
+ """DEPRECATED: Setup a handler to be called when a trait changes.
+
+ This is used to setup dynamic notifications of trait changes.
+
+ Static handlers can be created by creating methods on a HasTraits
+ subclass with the naming convention '_[traitname]_changed'. Thus,
+ to create static handler for the trait 'a', create the method
+ _a_changed(self, name, old, new) (fewer arguments can be used, see
+ below).
+
+ If `remove` is True and `handler` is not specified, all change
+ handlers for the specified name are uninstalled.
+
+ Parameters
+ ----------
+ handler : callable, None
+ A callable that is called when a trait changes. Its
+ signature can be handler(), handler(name), handler(name, new),
+ handler(name, old, new), or handler(name, old, new, self).
+ name : list, str, None
+ If None, the handler will apply to all traits. If a list
+ of str, handler will apply to all names in the list. If a
+ str, the handler will apply just to that name.
+ remove : bool
+ If False (the default), then install the handler. If True
+ then unintall it.
+ """
warn("on_trait_change is deprecated in traitlets 4.1: use observe instead",
- DeprecationWarning, stacklevel=2)
- if name is None:
- name = All
- if remove:
- self.unobserve(_callback_wrapper(handler), names=name)
- else:
- self.observe(_callback_wrapper(handler), names=name)
-
- def observe(self, handler, names=All, type='change'):
- """Setup a handler to be called when a trait changes.
-
- This is used to setup dynamic notifications of trait changes.
-
- Parameters
- ----------
- handler : callable
- A callable that is called when a trait changes. Its
+ DeprecationWarning, stacklevel=2)
+ if name is None:
+ name = All
+ if remove:
+ self.unobserve(_callback_wrapper(handler), names=name)
+ else:
+ self.observe(_callback_wrapper(handler), names=name)
+
+ def observe(self, handler, names=All, type='change'):
+ """Setup a handler to be called when a trait changes.
+
+ This is used to setup dynamic notifications of trait changes.
+
+ Parameters
+ ----------
+ handler : callable
+ A callable that is called when a trait changes. Its
signature should be ``handler(change)``, where ``change`` is a
dictionary. The change dictionary at least holds a 'type' key.
- * ``type``: the type of notification.
- Other keys may be passed depending on the value of 'type'. In the
- case where type is 'change', we also have the following keys:
- * ``owner`` : the HasTraits instance
- * ``old`` : the old value of the modified trait attribute
- * ``new`` : the new value of the modified trait attribute
- * ``name`` : the name of the modified trait attribute.
- names : list, str, All
- If names is All, the handler will apply to all traits. If a list
- of str, handler will apply to all names in the list. If a
- str, the handler will apply just to that name.
- type : str, All (default: 'change')
- The type of notification to filter by. If equal to All, then all
- notifications are passed to the observe handler.
- """
- names = parse_notifier_name(names)
- for n in names:
- self._add_notifiers(handler, n, type)
-
- def unobserve(self, handler, names=All, type='change'):
- """Remove a trait change handler.
-
+ * ``type``: the type of notification.
+ Other keys may be passed depending on the value of 'type'. In the
+ case where type is 'change', we also have the following keys:
+ * ``owner`` : the HasTraits instance
+ * ``old`` : the old value of the modified trait attribute
+ * ``new`` : the new value of the modified trait attribute
+ * ``name`` : the name of the modified trait attribute.
+ names : list, str, All
+ If names is All, the handler will apply to all traits. If a list
+ of str, handler will apply to all names in the list. If a
+ str, the handler will apply just to that name.
+ type : str, All (default: 'change')
+ The type of notification to filter by. If equal to All, then all
+ notifications are passed to the observe handler.
+ """
+ names = parse_notifier_name(names)
+ for n in names:
+ self._add_notifiers(handler, n, type)
+
+ def unobserve(self, handler, names=All, type='change'):
+ """Remove a trait change handler.
+
This is used to unregister handlers to trait change notifications.
-
- Parameters
- ----------
- handler : callable
- The callable called when a trait attribute changes.
- names : list, str, All (default: All)
- The names of the traits for which the specified handler should be
- uninstalled. If names is All, the specified handler is uninstalled
- from the list of notifiers corresponding to all changes.
- type : str or All (default: 'change')
- The type of notification to filter by. If All, the specified handler
- is uninstalled from the list of notifiers corresponding to all types.
- """
- names = parse_notifier_name(names)
- for n in names:
- self._remove_notifiers(handler, n, type)
-
- def unobserve_all(self, name=All):
- """Remove trait change handlers of any type for the specified name.
- If name is not specified, removes all trait notifiers."""
- if name is All:
- self._trait_notifiers = {}
- else:
- try:
- del self._trait_notifiers[name]
- except KeyError:
- pass
-
- def _register_validator(self, handler, names):
+
+ Parameters
+ ----------
+ handler : callable
+ The callable called when a trait attribute changes.
+ names : list, str, All (default: All)
+ The names of the traits for which the specified handler should be
+ uninstalled. If names is All, the specified handler is uninstalled
+ from the list of notifiers corresponding to all changes.
+ type : str or All (default: 'change')
+ The type of notification to filter by. If All, the specified handler
+ is uninstalled from the list of notifiers corresponding to all types.
+ """
+ names = parse_notifier_name(names)
+ for n in names:
+ self._remove_notifiers(handler, n, type)
+
+ def unobserve_all(self, name=All):
+ """Remove trait change handlers of any type for the specified name.
+ If name is not specified, removes all trait notifiers."""
+ if name is All:
+ self._trait_notifiers = {}
+ else:
+ try:
+ del self._trait_notifiers[name]
+ except KeyError:
+ pass
+
+ def _register_validator(self, handler, names):
"""Setup a handler to be called when a trait should be cross validated.
-
- This is used to setup dynamic notifications for cross-validation.
-
- If a validator is already registered for any of the provided names, a
+
+ This is used to setup dynamic notifications for cross-validation.
+
+ If a validator is already registered for any of the provided names, a
TraitError is raised and no new validator is registered.
-
- Parameters
- ----------
- handler : callable
- A callable that is called when the given trait is cross-validated.
+
+ Parameters
+ ----------
+ handler : callable
+ A callable that is called when the given trait is cross-validated.
Its signature is handler(proposal), where proposal is a Bunch (dictionary with attribute access)
with the following attributes/keys:
- * ``owner`` : the HasTraits instance
- * ``value`` : the proposed value for the modified trait attribute
- * ``trait`` : the TraitType instance associated with the attribute
- names : List of strings
- The names of the traits that should be cross-validated
- """
- for name in names:
- magic_name = '_%s_validate' % name
- if hasattr(self, magic_name):
- class_value = getattr(self.__class__, magic_name)
- if not isinstance(class_value, ValidateHandler):
+ * ``owner`` : the HasTraits instance
+ * ``value`` : the proposed value for the modified trait attribute
+ * ``trait`` : the TraitType instance associated with the attribute
+ names : List of strings
+ The names of the traits that should be cross-validated
+ """
+ for name in names:
+ magic_name = '_%s_validate' % name
+ if hasattr(self, magic_name):
+ class_value = getattr(self.__class__, magic_name)
+ if not isinstance(class_value, ValidateHandler):
_deprecated_method(class_value, self.__class__, magic_name,
- "use @validate decorator instead.")
- for name in names:
- self._trait_validators[name] = handler
-
+ "use @validate decorator instead.")
+ for name in names:
+ self._trait_validators[name] = handler
+
def add_traits(self, **traits):
"""Dynamically add trait attributes to the HasTraits instance."""
cls = self.__class__
@@ -1437,63 +1437,63 @@ class HasTraits(HasDescriptors, metaclass=MetaHasTraits):
else:
getattr(cls, name).set(self, value)
- @classmethod
- def class_trait_names(cls, **metadata):
- """Get a list of all the names of this class' traits.
-
- This method is just like the :meth:`trait_names` method,
- but is unbound.
- """
+ @classmethod
+ def class_trait_names(cls, **metadata):
+ """Get a list of all the names of this class' traits.
+
+ This method is just like the :meth:`trait_names` method,
+ but is unbound.
+ """
return list(cls.class_traits(**metadata))
-
- @classmethod
- def class_traits(cls, **metadata):
- """Get a ``dict`` of all the traits of this class. The dictionary
- is keyed on the name and the values are the TraitType objects.
-
- This method is just like the :meth:`traits` method, but is unbound.
-
- The TraitTypes returned don't know anything about the values
- that the various HasTrait's instances are holding.
-
- The metadata kwargs allow functions to be passed in which
- filter traits based on metadata values. The functions should
- take a single value as an argument and return a boolean. If
- any function returns False, then the trait is not included in
- the output. If a metadata key doesn't exist, None will be passed
- to the function.
- """
- traits = dict([memb for memb in getmembers(cls) if
- isinstance(memb[1], TraitType)])
-
- if len(metadata) == 0:
- return traits
-
- result = {}
- for name, trait in traits.items():
- for meta_name, meta_eval in metadata.items():
+
+ @classmethod
+ def class_traits(cls, **metadata):
+ """Get a ``dict`` of all the traits of this class. The dictionary
+ is keyed on the name and the values are the TraitType objects.
+
+ This method is just like the :meth:`traits` method, but is unbound.
+
+ The TraitTypes returned don't know anything about the values
+ that the various HasTrait's instances are holding.
+
+ The metadata kwargs allow functions to be passed in which
+ filter traits based on metadata values. The functions should
+ take a single value as an argument and return a boolean. If
+ any function returns False, then the trait is not included in
+ the output. If a metadata key doesn't exist, None will be passed
+ to the function.
+ """
+ traits = dict([memb for memb in getmembers(cls) if
+ isinstance(memb[1], TraitType)])
+
+ if len(metadata) == 0:
+ return traits
+
+ result = {}
+ for name, trait in traits.items():
+ for meta_name, meta_eval in metadata.items():
if not callable(meta_eval):
- meta_eval = _SimpleTest(meta_eval)
- if not meta_eval(trait.metadata.get(meta_name, None)):
- break
- else:
- result[name] = trait
-
- return result
-
- @classmethod
- def class_own_traits(cls, **metadata):
- """Get a dict of all the traitlets defined on this class, not a parent.
-
- Works like `class_traits`, except for excluding traits from parents.
- """
- sup = super(cls, cls)
- return {n: t for (n, t) in cls.class_traits(**metadata).items()
- if getattr(sup, n, None) is not t}
-
- def has_trait(self, name):
- """Returns True if the object has a trait with the specified name."""
- return isinstance(getattr(self.__class__, name, None), TraitType)
+ meta_eval = _SimpleTest(meta_eval)
+ if not meta_eval(trait.metadata.get(meta_name, None)):
+ break
+ else:
+ result[name] = trait
+
+ return result
+
+ @classmethod
+ def class_own_traits(cls, **metadata):
+ """Get a dict of all the traitlets defined on this class, not a parent.
+
+ Works like `class_traits`, except for excluding traits from parents.
+ """
+ sup = super(cls, cls)
+ return {n: t for (n, t) in cls.class_traits(**metadata).items()
+ if getattr(sup, n, None) is not t}
+
+ def has_trait(self, name):
+ """Returns True if the object has a trait with the specified name."""
+ return isinstance(getattr(self.__class__, name, None), TraitType)
def trait_has_value(self, name):
"""Returns True if the specified trait has a value.
@@ -1587,65 +1587,65 @@ class HasTraits(HasDescriptors, metaclass=MetaHasTraits):
defaults[n] = self._get_trait_default_generator(n)(self)
return defaults
- def trait_names(self, **metadata):
- """Get a list of all the names of this class' traits."""
+ def trait_names(self, **metadata):
+ """Get a list of all the names of this class' traits."""
return list(self.traits(**metadata))
-
- def traits(self, **metadata):
- """Get a ``dict`` of all the traits of this class. The dictionary
- is keyed on the name and the values are the TraitType objects.
-
- The TraitTypes returned don't know anything about the values
- that the various HasTrait's instances are holding.
-
- The metadata kwargs allow functions to be passed in which
- filter traits based on metadata values. The functions should
- take a single value as an argument and return a boolean. If
- any function returns False, then the trait is not included in
- the output. If a metadata key doesn't exist, None will be passed
- to the function.
- """
- traits = dict([memb for memb in getmembers(self.__class__) if
- isinstance(memb[1], TraitType)])
-
- if len(metadata) == 0:
- return traits
-
- result = {}
- for name, trait in traits.items():
- for meta_name, meta_eval in metadata.items():
+
+ def traits(self, **metadata):
+ """Get a ``dict`` of all the traits of this class. The dictionary
+ is keyed on the name and the values are the TraitType objects.
+
+ The TraitTypes returned don't know anything about the values
+ that the various HasTrait's instances are holding.
+
+ The metadata kwargs allow functions to be passed in which
+ filter traits based on metadata values. The functions should
+ take a single value as an argument and return a boolean. If
+ any function returns False, then the trait is not included in
+ the output. If a metadata key doesn't exist, None will be passed
+ to the function.
+ """
+ traits = dict([memb for memb in getmembers(self.__class__) if
+ isinstance(memb[1], TraitType)])
+
+ if len(metadata) == 0:
+ return traits
+
+ result = {}
+ for name, trait in traits.items():
+ for meta_name, meta_eval in metadata.items():
if not callable(meta_eval):
- meta_eval = _SimpleTest(meta_eval)
- if not meta_eval(trait.metadata.get(meta_name, None)):
- break
- else:
- result[name] = trait
-
- return result
-
- def trait_metadata(self, traitname, key, default=None):
- """Get metadata values for trait by key."""
- try:
- trait = getattr(self.__class__, traitname)
- except AttributeError:
- raise TraitError("Class %s does not have a trait named %s" %
- (self.__class__.__name__, traitname))
+ meta_eval = _SimpleTest(meta_eval)
+ if not meta_eval(trait.metadata.get(meta_name, None)):
+ break
+ else:
+ result[name] = trait
+
+ return result
+
+ def trait_metadata(self, traitname, key, default=None):
+ """Get metadata values for trait by key."""
+ try:
+ trait = getattr(self.__class__, traitname)
+ except AttributeError:
+ raise TraitError("Class %s does not have a trait named %s" %
+ (self.__class__.__name__, traitname))
metadata_name = '_' + traitname + '_metadata'
if hasattr(self, metadata_name) and key in getattr(self, metadata_name):
return getattr(self, metadata_name).get(key, default)
- else:
- return trait.metadata.get(key, default)
-
+ else:
+ return trait.metadata.get(key, default)
+
@classmethod
def class_own_trait_events(cls, name):
"""Get a dict of all event handlers defined on this class, not a parent.
-
+
Works like ``event_handlers``, except for excluding traits from parents.
"""
sup = super(cls, cls)
return {n: e for (n, e) in cls.events(name).items()
if getattr(sup, n, None) is not e}
-
+
@classmethod
def trait_events(cls, name=None):
"""Get a ``dict`` of all the event handlers of this class.
@@ -1672,288 +1672,288 @@ class HasTraits(HasDescriptors, metaclass=MetaHasTraits):
events[k] = v
return events
-#-----------------------------------------------------------------------------
-# Actual TraitTypes implementations/subclasses
-#-----------------------------------------------------------------------------
-
-#-----------------------------------------------------------------------------
-# TraitTypes subclasses for handling classes and instances of classes
-#-----------------------------------------------------------------------------
-
-
-class ClassBasedTraitType(TraitType):
- """
- A trait with error reporting and string -> type resolution for Type,
- Instance and This.
- """
-
- def _resolve_string(self, string):
- """
- Resolve a string supplied for a type into an actual object.
- """
- return import_item(string)
-
-
-class Type(ClassBasedTraitType):
- """A trait whose value must be a subclass of a specified class."""
-
+#-----------------------------------------------------------------------------
+# Actual TraitTypes implementations/subclasses
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+# TraitTypes subclasses for handling classes and instances of classes
+#-----------------------------------------------------------------------------
+
+
+class ClassBasedTraitType(TraitType):
+ """
+ A trait with error reporting and string -> type resolution for Type,
+ Instance and This.
+ """
+
+ def _resolve_string(self, string):
+ """
+ Resolve a string supplied for a type into an actual object.
+ """
+ return import_item(string)
+
+
+class Type(ClassBasedTraitType):
+ """A trait whose value must be a subclass of a specified class."""
+
def __init__(self, default_value=Undefined, klass=None, **kwargs):
- """Construct a Type trait
-
- A Type trait specifies that its values must be subclasses of
- a particular class.
-
- If only ``default_value`` is given, it is used for the ``klass`` as
- well. If neither are given, both default to ``object``.
-
- Parameters
- ----------
- default_value : class, str or None
- The default value must be a subclass of klass. If an str,
- the str must be a fully specified class name, like 'foo.bar.Bah'.
- The string is resolved into real class, when the parent
- :class:`HasTraits` class is instantiated.
- klass : class, str [ default object ]
- Values of this trait must be a subclass of klass. The klass
- may be specified in a string like: 'foo.bar.MyClass'.
- The string is resolved into real class, when the parent
- :class:`HasTraits` class is instantiated.
- allow_none : bool [ default False ]
- Indicates whether None is allowed as an assignable value.
+ """Construct a Type trait
+
+ A Type trait specifies that its values must be subclasses of
+ a particular class.
+
+ If only ``default_value`` is given, it is used for the ``klass`` as
+ well. If neither are given, both default to ``object``.
+
+ Parameters
+ ----------
+ default_value : class, str or None
+ The default value must be a subclass of klass. If an str,
+ the str must be a fully specified class name, like 'foo.bar.Bah'.
+ The string is resolved into real class, when the parent
+ :class:`HasTraits` class is instantiated.
+ klass : class, str [ default object ]
+ Values of this trait must be a subclass of klass. The klass
+ may be specified in a string like: 'foo.bar.MyClass'.
+ The string is resolved into real class, when the parent
+ :class:`HasTraits` class is instantiated.
+ allow_none : bool [ default False ]
+ Indicates whether None is allowed as an assignable value.
**kwargs
extra kwargs passed to `ClassBasedTraitType`
- """
- if default_value is Undefined:
- new_default_value = object if (klass is None) else klass
- else:
- new_default_value = default_value
-
- if klass is None:
- if (default_value is None) or (default_value is Undefined):
- klass = object
- else:
- klass = default_value
-
+ """
+ if default_value is Undefined:
+ new_default_value = object if (klass is None) else klass
+ else:
+ new_default_value = default_value
+
+ if klass is None:
+ if (default_value is None) or (default_value is Undefined):
+ klass = object
+ else:
+ klass = default_value
+
if not (inspect.isclass(klass) or isinstance(klass, str)):
- raise TraitError("A Type trait must specify a class.")
-
- self.klass = klass
-
+ raise TraitError("A Type trait must specify a class.")
+
+ self.klass = klass
+
super().__init__(new_default_value, **kwargs)
-
- def validate(self, obj, value):
- """Validates that the value is a valid object instance."""
+
+ def validate(self, obj, value):
+ """Validates that the value is a valid object instance."""
if isinstance(value, str):
- try:
- value = self._resolve_string(value)
- except ImportError:
- raise TraitError("The '%s' trait of %s instance must be a type, but "
- "%r could not be imported" % (self.name, obj, value))
- try:
- if issubclass(value, self.klass):
- return value
+ try:
+ value = self._resolve_string(value)
+ except ImportError:
+ raise TraitError("The '%s' trait of %s instance must be a type, but "
+ "%r could not be imported" % (self.name, obj, value))
+ try:
+ if issubclass(value, self.klass):
+ return value
except Exception:
- pass
-
- self.error(obj, value)
-
- def info(self):
- """ Returns a description of the trait."""
+ pass
+
+ self.error(obj, value)
+
+ def info(self):
+ """ Returns a description of the trait."""
if isinstance(self.klass, str):
- klass = self.klass
- else:
+ klass = self.klass
+ else:
klass = self.klass.__module__ + '.' + self.klass.__name__
- result = "a subclass of '%s'" % klass
- if self.allow_none:
- return result + ' or None'
- return result
-
- def instance_init(self, obj):
- self._resolve_classes()
+ result = "a subclass of '%s'" % klass
+ if self.allow_none:
+ return result + ' or None'
+ return result
+
+ def instance_init(self, obj):
+ self._resolve_classes()
super().instance_init(obj)
-
- def _resolve_classes(self):
+
+ def _resolve_classes(self):
if isinstance(self.klass, str):
- self.klass = self._resolve_string(self.klass)
+ self.klass = self._resolve_string(self.klass)
if isinstance(self.default_value, str):
- self.default_value = self._resolve_string(self.default_value)
-
- def default_value_repr(self):
- value = self.default_value
+ self.default_value = self._resolve_string(self.default_value)
+
+ def default_value_repr(self):
+ value = self.default_value
if isinstance(value, str):
- return repr(value)
- else:
+ return repr(value)
+ else:
return repr(f'{value.__module__}.{value.__name__}')
-
-
-class Instance(ClassBasedTraitType):
- """A trait whose value must be an instance of a specified class.
-
- The value can also be an instance of a subclass of the specified class.
-
- Subclasses can declare default classes by overriding the klass attribute
- """
-
- klass = None
-
+
+
+class Instance(ClassBasedTraitType):
+ """A trait whose value must be an instance of a specified class.
+
+ The value can also be an instance of a subclass of the specified class.
+
+ Subclasses can declare default classes by overriding the klass attribute
+ """
+
+ klass = None
+
def __init__(self, klass=None, args=None, kw=None, **kwargs):
- """Construct an Instance trait.
-
- This trait allows values that are instances of a particular
- class or its subclasses. Our implementation is quite different
- from that of enthough.traits as we don't allow instances to be used
- for klass and we handle the ``args`` and ``kw`` arguments differently.
-
- Parameters
- ----------
- klass : class, str
- The class that forms the basis for the trait. Class names
- can also be specified as strings, like 'foo.bar.Bar'.
- args : tuple
- Positional arguments for generating the default value.
- kw : dict
- Keyword arguments for generating the default value.
- allow_none : bool [ default False ]
- Indicates whether None is allowed as a value.
+ """Construct an Instance trait.
+
+ This trait allows values that are instances of a particular
+ class or its subclasses. Our implementation is quite different
+ from that of enthough.traits as we don't allow instances to be used
+ for klass and we handle the ``args`` and ``kw`` arguments differently.
+
+ Parameters
+ ----------
+ klass : class, str
+ The class that forms the basis for the trait. Class names
+ can also be specified as strings, like 'foo.bar.Bar'.
+ args : tuple
+ Positional arguments for generating the default value.
+ kw : dict
+ Keyword arguments for generating the default value.
+ allow_none : bool [ default False ]
+ Indicates whether None is allowed as a value.
**kwargs
Extra kwargs passed to `ClassBasedTraitType`
-
- Notes
- -----
- If both ``args`` and ``kw`` are None, then the default value is None.
- If ``args`` is a tuple and ``kw`` is a dict, then the default is
- created as ``klass(*args, **kw)``. If exactly one of ``args`` or ``kw`` is
- None, the None is replaced by ``()`` or ``{}``, respectively.
- """
- if klass is None:
- klass = self.klass
+
+ Notes
+ -----
+ If both ``args`` and ``kw`` are None, then the default value is None.
+ If ``args`` is a tuple and ``kw`` is a dict, then the default is
+ created as ``klass(*args, **kw)``. If exactly one of ``args`` or ``kw`` is
+ None, the None is replaced by ``()`` or ``{}``, respectively.
+ """
+ if klass is None:
+ klass = self.klass
if (klass is not None) and (inspect.isclass(klass) or isinstance(klass, str)):
- self.klass = klass
- else:
- raise TraitError('The klass attribute must be a class'
- ' not: %r' % klass)
-
- if (kw is not None) and not isinstance(kw, dict):
- raise TraitError("The 'kw' argument must be a dict or None.")
- if (args is not None) and not isinstance(args, tuple):
- raise TraitError("The 'args' argument must be a tuple or None.")
-
- self.default_args = args
- self.default_kwargs = kw
-
+ self.klass = klass
+ else:
+ raise TraitError('The klass attribute must be a class'
+ ' not: %r' % klass)
+
+ if (kw is not None) and not isinstance(kw, dict):
+ raise TraitError("The 'kw' argument must be a dict or None.")
+ if (args is not None) and not isinstance(args, tuple):
+ raise TraitError("The 'args' argument must be a tuple or None.")
+
+ self.default_args = args
+ self.default_kwargs = kw
+
super(Instance, self).__init__(**kwargs)
-
- def validate(self, obj, value):
- if isinstance(value, self.klass):
- return value
- else:
- self.error(obj, value)
-
- def info(self):
+
+ def validate(self, obj, value):
+ if isinstance(value, self.klass):
+ return value
+ else:
+ self.error(obj, value)
+
+ def info(self):
if isinstance(self.klass, str):
result = add_article(self.klass)
- else:
+ else:
result = describe("a", self.klass)
- if self.allow_none:
+ if self.allow_none:
result += ' or None'
- return result
-
- def instance_init(self, obj):
- self._resolve_classes()
+ return result
+
+ def instance_init(self, obj):
+ self._resolve_classes()
super().instance_init(obj)
-
- def _resolve_classes(self):
+
+ def _resolve_classes(self):
if isinstance(self.klass, str):
- self.klass = self._resolve_string(self.klass)
-
- def make_dynamic_default(self):
- if (self.default_args is None) and (self.default_kwargs is None):
- return None
- return self.klass(*(self.default_args or ()),
- **(self.default_kwargs or {}))
-
- def default_value_repr(self):
- return repr(self.make_dynamic_default())
-
+ self.klass = self._resolve_string(self.klass)
+
+ def make_dynamic_default(self):
+ if (self.default_args is None) and (self.default_kwargs is None):
+ return None
+ return self.klass(*(self.default_args or ()),
+ **(self.default_kwargs or {}))
+
+ def default_value_repr(self):
+ return repr(self.make_dynamic_default())
+
def from_string(self, s):
return _safe_literal_eval(s)
-
-
-class ForwardDeclaredMixin(object):
- """
- Mixin for forward-declared versions of Instance and Type.
- """
- def _resolve_string(self, string):
- """
- Find the specified class name by looking for it in the module in which
- our this_class attribute was defined.
- """
- modname = self.this_class.__module__
- return import_item('.'.join([modname, string]))
-
-
-class ForwardDeclaredType(ForwardDeclaredMixin, Type):
- """
- Forward-declared version of Type.
- """
- pass
-
-
-class ForwardDeclaredInstance(ForwardDeclaredMixin, Instance):
- """
- Forward-declared version of Instance.
- """
- pass
-
-
-class This(ClassBasedTraitType):
- """A trait for instances of the class containing this trait.
-
- Because how how and when class bodies are executed, the ``This``
- trait can only have a default value of None. This, and because we
- always validate default values, ``allow_none`` is *always* true.
- """
-
- info_text = 'an instance of the same type as the receiver or None'
-
+
+
+class ForwardDeclaredMixin(object):
+ """
+ Mixin for forward-declared versions of Instance and Type.
+ """
+ def _resolve_string(self, string):
+ """
+ Find the specified class name by looking for it in the module in which
+ our this_class attribute was defined.
+ """
+ modname = self.this_class.__module__
+ return import_item('.'.join([modname, string]))
+
+
+class ForwardDeclaredType(ForwardDeclaredMixin, Type):
+ """
+ Forward-declared version of Type.
+ """
+ pass
+
+
+class ForwardDeclaredInstance(ForwardDeclaredMixin, Instance):
+ """
+ Forward-declared version of Instance.
+ """
+ pass
+
+
+class This(ClassBasedTraitType):
+ """A trait for instances of the class containing this trait.
+
+ Because how how and when class bodies are executed, the ``This``
+ trait can only have a default value of None. This, and because we
+ always validate default values, ``allow_none`` is *always* true.
+ """
+
+ info_text = 'an instance of the same type as the receiver or None'
+
def __init__(self, **kwargs):
super(This, self).__init__(None, **kwargs)
-
- def validate(self, obj, value):
- # What if value is a superclass of obj.__class__? This is
- # complicated if it was the superclass that defined the This
- # trait.
- if isinstance(value, self.this_class) or (value is None):
- return value
- else:
- self.error(obj, value)
-
-
-class Union(TraitType):
- """A trait type representing a Union type."""
-
+
+ def validate(self, obj, value):
+ # What if value is a superclass of obj.__class__? This is
+ # complicated if it was the superclass that defined the This
+ # trait.
+ if isinstance(value, self.this_class) or (value is None):
+ return value
+ else:
+ self.error(obj, value)
+
+
+class Union(TraitType):
+ """A trait type representing a Union type."""
+
def __init__(self, trait_types, **kwargs):
- """Construct a Union trait.
-
- This trait allows values that are allowed by at least one of the
- specified trait types. A Union traitlet cannot have metadata on
- its own, besides the metadata of the listed types.
-
- Parameters
- ----------
+ """Construct a Union trait.
+
+ This trait allows values that are allowed by at least one of the
+ specified trait types. A Union traitlet cannot have metadata on
+ its own, besides the metadata of the listed types.
+
+ Parameters
+ ----------
trait_types : sequence
- The list of trait types of length at least 1.
-
- Notes
- -----
- Union([Float(), Bool(), Int()]) attempts to validate the provided values
- with the validation function of Float, then Bool, and finally Int.
- """
+ The list of trait types of length at least 1.
+
+ Notes
+ -----
+ Union([Float(), Bool(), Int()]) attempts to validate the provided values
+ with the validation function of Float, then Bool, and finally Int.
+ """
self.trait_types = list(trait_types)
self.info_text = " or ".join([tt.info() for tt in self.trait_types])
super(Union, self).__init__(**kwargs)
-
+
def default(self, obj=None):
default = super(Union, self).default(obj)
for t in self.trait_types:
@@ -1963,48 +1963,48 @@ class Union(TraitType):
break
return default
- def class_init(self, cls, name):
+ def class_init(self, cls, name):
for trait_type in reversed(self.trait_types):
- trait_type.class_init(cls, None)
- super(Union, self).class_init(cls, name)
-
- def instance_init(self, obj):
+ trait_type.class_init(cls, None)
+ super(Union, self).class_init(cls, name)
+
+ def instance_init(self, obj):
for trait_type in reversed(self.trait_types):
- trait_type.instance_init(obj)
- super(Union, self).instance_init(obj)
-
- def validate(self, obj, value):
+ trait_type.instance_init(obj)
+ super(Union, self).instance_init(obj)
+
+ def validate(self, obj, value):
with obj.cross_validation_lock:
- for trait_type in self.trait_types:
- try:
- v = trait_type._validate(obj, value)
+ for trait_type in self.trait_types:
+ try:
+ v = trait_type._validate(obj, value)
# In the case of an element trait, the name is None
if self.name is not None:
setattr(obj, '_' + self.name + '_metadata', trait_type.metadata)
- return v
- except TraitError:
- continue
- self.error(obj, value)
-
- def __or__(self, other):
- if isinstance(other, Union):
- return Union(self.trait_types + other.trait_types)
- else:
- return Union(self.trait_types + [other])
-
-
-#-----------------------------------------------------------------------------
-# Basic TraitTypes implementations/subclasses
-#-----------------------------------------------------------------------------
-
-
-class Any(TraitType):
- """A trait which allows any value."""
- default_value = None
+ return v
+ except TraitError:
+ continue
+ self.error(obj, value)
+
+ def __or__(self, other):
+ if isinstance(other, Union):
+ return Union(self.trait_types + other.trait_types)
+ else:
+ return Union(self.trait_types + [other])
+
+
+#-----------------------------------------------------------------------------
+# Basic TraitTypes implementations/subclasses
+#-----------------------------------------------------------------------------
+
+
+class Any(TraitType):
+ """A trait which allows any value."""
+ default_value = None
allow_none = True
- info_text = 'any value'
-
-
+ info_text = 'any value'
+
+
def _validate_bounds(trait, obj, value):
"""
Validate that a number to be applied to a trait is between bounds.
@@ -2029,122 +2029,122 @@ def _validate_bounds(trait, obj, value):
return value
-class Int(TraitType):
- """An int trait."""
-
- default_value = 0
- info_text = 'an int'
-
+class Int(TraitType):
+ """An int trait."""
+
+ default_value = 0
+ info_text = 'an int'
+
def __init__(self, default_value=Undefined, allow_none=False, **kwargs):
- self.min = kwargs.pop('min', None)
- self.max = kwargs.pop('max', None)
- super(Int, self).__init__(default_value=default_value,
- allow_none=allow_none, **kwargs)
-
- def validate(self, obj, value):
- if not isinstance(value, int):
- self.error(obj, value)
+ self.min = kwargs.pop('min', None)
+ self.max = kwargs.pop('max', None)
+ super(Int, self).__init__(default_value=default_value,
+ allow_none=allow_none, **kwargs)
+
+ def validate(self, obj, value):
+ if not isinstance(value, int):
+ self.error(obj, value)
return _validate_bounds(self, obj, value)
-
+
def from_string(self, s):
if self.allow_none and s == 'None':
return None
return int(s)
-
-class CInt(Int):
- """A casting version of the int trait."""
-
- def validate(self, obj, value):
- try:
+
+class CInt(Int):
+ """A casting version of the int trait."""
+
+ def validate(self, obj, value):
+ try:
value = int(value)
except Exception:
- self.error(obj, value)
+ self.error(obj, value)
return _validate_bounds(self, obj, value)
-
+
Long, CLong = Int, CInt
Integer = Int
-
-
-class Float(TraitType):
- """A float trait."""
-
- default_value = 0.0
- info_text = 'a float'
-
+
+
+class Float(TraitType):
+ """A float trait."""
+
+ default_value = 0.0
+ info_text = 'a float'
+
def __init__(self, default_value=Undefined, allow_none=False, **kwargs):
- self.min = kwargs.pop('min', -float('inf'))
- self.max = kwargs.pop('max', float('inf'))
+ self.min = kwargs.pop('min', -float('inf'))
+ self.max = kwargs.pop('max', float('inf'))
super(Float, self).__init__(default_value=default_value,
- allow_none=allow_none, **kwargs)
-
- def validate(self, obj, value):
- if isinstance(value, int):
- value = float(value)
- if not isinstance(value, float):
- self.error(obj, value)
+ allow_none=allow_none, **kwargs)
+
+ def validate(self, obj, value):
+ if isinstance(value, int):
+ value = float(value)
+ if not isinstance(value, float):
+ self.error(obj, value)
return _validate_bounds(self, obj, value)
-
+
def from_string(self, s):
if self.allow_none and s == 'None':
return None
return float(s)
-
-class CFloat(Float):
- """A casting version of the float trait."""
-
- def validate(self, obj, value):
- try:
+
+class CFloat(Float):
+ """A casting version of the float trait."""
+
+ def validate(self, obj, value):
+ try:
value = float(value)
except Exception:
- self.error(obj, value)
+ self.error(obj, value)
return _validate_bounds(self, obj, value)
-
-
-class Complex(TraitType):
- """A trait for complex numbers."""
-
- default_value = 0.0 + 0.0j
- info_text = 'a complex number'
-
- def validate(self, obj, value):
- if isinstance(value, complex):
- return value
- if isinstance(value, (float, int)):
- return complex(value)
- self.error(obj, value)
-
+
+
+class Complex(TraitType):
+ """A trait for complex numbers."""
+
+ default_value = 0.0 + 0.0j
+ info_text = 'a complex number'
+
+ def validate(self, obj, value):
+ if isinstance(value, complex):
+ return value
+ if isinstance(value, (float, int)):
+ return complex(value)
+ self.error(obj, value)
+
def from_string(self, s):
if self.allow_none and s == 'None':
return None
return complex(s)
-
-
-class CComplex(Complex):
- """A casting version of the complex number trait."""
-
- def validate (self, obj, value):
- try:
- return complex(value)
+
+
+class CComplex(Complex):
+ """A casting version of the complex number trait."""
+
+ def validate (self, obj, value):
+ try:
+ return complex(value)
except Exception:
- self.error(obj, value)
-
-# We should always be explicit about whether we're using bytes or unicode, both
-# for Python 3 conversion and for reliable unicode behaviour on Python 2. So
-# we don't have a Str type.
-class Bytes(TraitType):
- """A trait for byte strings."""
-
- default_value = b''
- info_text = 'a bytes object'
-
- def validate(self, obj, value):
- if isinstance(value, bytes):
- return value
- self.error(obj, value)
-
+ self.error(obj, value)
+
+# We should always be explicit about whether we're using bytes or unicode, both
+# for Python 3 conversion and for reliable unicode behaviour on Python 2. So
+# we don't have a Str type.
+class Bytes(TraitType):
+ """A trait for byte strings."""
+
+ default_value = b''
+ info_text = 'a bytes object'
+
+ def validate(self, obj, value):
+ if isinstance(value, bytes):
+ return value
+ self.error(obj, value)
+
def from_string(self, s):
if self.allow_none and s == "None":
return None
@@ -2160,35 +2160,35 @@ class Bytes(TraitType):
FutureWarning)
break
return s.encode("utf8")
-
-
-class CBytes(Bytes):
- """A casting version of the byte string trait."""
-
- def validate(self, obj, value):
- try:
- return bytes(value)
+
+
+class CBytes(Bytes):
+ """A casting version of the byte string trait."""
+
+ def validate(self, obj, value):
+ try:
+ return bytes(value)
except Exception:
- self.error(obj, value)
-
-
-class Unicode(TraitType):
- """A trait for unicode strings."""
-
+ self.error(obj, value)
+
+
+class Unicode(TraitType):
+ """A trait for unicode strings."""
+
default_value = ''
- info_text = 'a unicode string'
-
- def validate(self, obj, value):
+ info_text = 'a unicode string'
+
+ def validate(self, obj, value):
if isinstance(value, str):
- return value
- if isinstance(value, bytes):
- try:
- return value.decode('ascii', 'strict')
- except UnicodeDecodeError:
- msg = "Could not decode {!r} for unicode trait '{}' of {} instance."
- raise TraitError(msg.format(value, self.name, class_of(obj)))
- self.error(obj, value)
-
+ return value
+ if isinstance(value, bytes):
+ try:
+ return value.decode('ascii', 'strict')
+ except UnicodeDecodeError:
+ msg = "Could not decode {!r} for unicode trait '{}' of {} instance."
+ raise TraitError(msg.format(value, self.name, class_of(obj)))
+ self.error(obj, value)
+
def from_string(self, s):
if self.allow_none and s == 'None':
return None
@@ -2204,66 +2204,66 @@ class Unicode(TraitType):
"You can use %r instead of %r if you require traitlets >=5." % (s, old_s),
FutureWarning)
return s
-
-class CUnicode(Unicode):
- """A casting version of the unicode trait."""
-
- def validate(self, obj, value):
- try:
+
+class CUnicode(Unicode):
+ """A casting version of the unicode trait."""
+
+ def validate(self, obj, value):
+ try:
return str(value)
except Exception:
- self.error(obj, value)
-
-
-class ObjectName(TraitType):
- """A string holding a valid object name in this version of Python.
-
- This does not check that the name exists in any scope."""
- info_text = "a valid object identifier in Python"
-
+ self.error(obj, value)
+
+
+class ObjectName(TraitType):
+ """A string holding a valid object name in this version of Python.
+
+ This does not check that the name exists in any scope."""
+ info_text = "a valid object identifier in Python"
+
coerce_str = staticmethod(lambda _,s: s)
-
- def validate(self, obj, value):
- value = self.coerce_str(obj, value)
-
+
+ def validate(self, obj, value):
+ value = self.coerce_str(obj, value)
+
if isinstance(value, str) and isidentifier(value):
- return value
- self.error(obj, value)
-
+ return value
+ self.error(obj, value)
+
def from_string(self, s):
if self.allow_none and s == 'None':
return None
return s
-class DottedObjectName(ObjectName):
- """A string holding a valid dotted object name in Python, such as A.b3._c"""
- def validate(self, obj, value):
- value = self.coerce_str(obj, value)
-
+class DottedObjectName(ObjectName):
+ """A string holding a valid dotted object name in Python, such as A.b3._c"""
+ def validate(self, obj, value):
+ value = self.coerce_str(obj, value)
+
if isinstance(value, str) and all(isidentifier(a)
for a in value.split('.')):
- return value
- self.error(obj, value)
-
-
-class Bool(TraitType):
- """A boolean (True, False) trait."""
-
- default_value = False
- info_text = 'a boolean'
-
- def validate(self, obj, value):
- if isinstance(value, bool):
- return value
+ return value
+ self.error(obj, value)
+
+
+class Bool(TraitType):
+ """A boolean (True, False) trait."""
+
+ default_value = False
+ info_text = 'a boolean'
+
+ def validate(self, obj, value):
+ if isinstance(value, bool):
+ return value
elif isinstance(value, int):
if value == 1:
return True
elif value == 0:
return False
- self.error(obj, value)
-
+ self.error(obj, value)
+
def from_string(self, s):
if self.allow_none and s == 'None':
return None
@@ -2274,32 +2274,32 @@ class Bool(TraitType):
return False
else:
raise ValueError("%r is not 1, 0, true, or false")
-
-
-class CBool(Bool):
- """A casting version of the boolean trait."""
-
- def validate(self, obj, value):
- try:
- return bool(value)
+
+
+class CBool(Bool):
+ """A casting version of the boolean trait."""
+
+ def validate(self, obj, value):
+ try:
+ return bool(value)
except Exception:
- self.error(obj, value)
-
-
-class Enum(TraitType):
- """An enum whose value must be in a given sequence."""
-
+ self.error(obj, value)
+
+
+class Enum(TraitType):
+ """An enum whose value must be in a given sequence."""
+
def __init__(self, values, default_value=Undefined, **kwargs):
- self.values = values
+ self.values = values
if kwargs.get('allow_none', False) and default_value is Undefined:
- default_value = None
+ default_value = None
super(Enum, self).__init__(default_value, **kwargs)
-
- def validate(self, obj, value):
- if value in self.values:
- return value
- self.error(obj, value)
-
+
+ def validate(self, obj, value):
+ if value in self.values:
+ return value
+ self.error(obj, value)
+
def _choices_str(self, as_rst=False):
""" Returns a description of the trait choices (not none)."""
choices = self.values
@@ -2310,12 +2310,12 @@ class Enum(TraitType):
return choices
def _info(self, as_rst=False):
- """ Returns a description of the trait."""
+ """ Returns a description of the trait."""
none = (' or %s' % ('`None`' if as_rst else 'None')
if self.allow_none else
'')
return 'any of %s%s' % (self._choices_str(as_rst), none)
-
+
def info(self):
return self._info(as_rst=False)
@@ -2329,21 +2329,21 @@ class Enum(TraitType):
return _safe_literal_eval(s)
-class CaselessStrEnum(Enum):
- """An enum of strings where the case should be ignored."""
+class CaselessStrEnum(Enum):
+ """An enum of strings where the case should be ignored."""
def __init__(self, values, default_value=Undefined, **kwargs):
super().__init__(values, default_value=default_value, **kwargs)
- def validate(self, obj, value):
+ def validate(self, obj, value):
if not isinstance(value, str):
- self.error(obj, value)
-
- for v in self.values:
- if v.lower() == value.lower():
- return v
- self.error(obj, value)
-
+ self.error(obj, value)
+
+ for v in self.values:
+ if v.lower() == value.lower():
+ return v
+ self.error(obj, value)
+
def _info(self, as_rst=False):
""" Returns a description of the trait."""
none = (' or %s' % ('`None`' if as_rst else 'None')
@@ -2408,52 +2408,52 @@ class FuzzyEnum(Enum):
return self._info(as_rst=True)
-class Container(Instance):
- """An instance of a container (list, set, etc.)
-
- To be subclassed by overriding klass.
- """
+class Container(Instance):
+ """An instance of a container (list, set, etc.)
+
+ To be subclassed by overriding klass.
+ """
- klass = None
- _cast_types = ()
- _valid_defaults = SequenceTypes
- _trait = None
+ klass = None
+ _cast_types = ()
+ _valid_defaults = SequenceTypes
+ _trait = None
_literal_from_string_pairs = ("[]", "()")
-
+
def __init__(self, trait=None, default_value=Undefined, **kwargs):
- """Create a container trait type from a list, set, or tuple.
-
- The default value is created by doing ``List(default_value)``,
- which creates a copy of the ``default_value``.
-
- ``trait`` can be specified, which restricts the type of elements
- in the container to that TraitType.
-
- If only one arg is given and it is not a Trait, it is taken as
- ``default_value``:
-
- ``c = List([1, 2, 3])``
-
- Parameters
- ----------
- trait : TraitType [ optional ]
- the type for restricting the contents of the Container. If unspecified,
- types are not checked.
- default_value : SequenceType [ optional ]
- The default value for the Trait. Must be list/tuple/set, and
- will be cast to the container type.
- allow_none : bool [ default False ]
- Whether to allow the value to be None
+ """Create a container trait type from a list, set, or tuple.
+
+ The default value is created by doing ``List(default_value)``,
+ which creates a copy of the ``default_value``.
+
+ ``trait`` can be specified, which restricts the type of elements
+ in the container to that TraitType.
+
+ If only one arg is given and it is not a Trait, it is taken as
+ ``default_value``:
+
+ ``c = List([1, 2, 3])``
+
+ Parameters
+ ----------
+ trait : TraitType [ optional ]
+ the type for restricting the contents of the Container. If unspecified,
+ types are not checked.
+ default_value : SequenceType [ optional ]
+ The default value for the Trait. Must be list/tuple/set, and
+ will be cast to the container type.
+ allow_none : bool [ default False ]
+ Whether to allow the value to be None
**kwargs : any
- further keys for extensions to the Trait (e.g. config)
-
- """
+ further keys for extensions to the Trait (e.g. config)
- # allow List([values]):
+ """
+
+ # allow List([values]):
if trait is not None and default_value is Undefined and not is_trait(trait):
- default_value = trait
- trait = None
-
+ default_value = trait
+ trait = None
+
if default_value is None and not kwargs.get("allow_none", False):
# improve backward-compatibility for possible subclasses
# specifying default_value=None as default,
@@ -2468,68 +2468,68 @@ class Container(Instance):
default_value = Undefined
if default_value is Undefined:
- args = ()
+ args = ()
elif default_value is None:
# default_value back on kwargs for super() to handle
args = ()
kwargs["default_value"] = None
- elif isinstance(default_value, self._valid_defaults):
- args = (default_value,)
- else:
+ elif isinstance(default_value, self._valid_defaults):
+ args = (default_value,)
+ else:
raise TypeError(
"default value of %s was %s" % (self.__class__.__name__, default_value)
)
-
- if is_trait(trait):
- if isinstance(trait, type):
+
+ if is_trait(trait):
+ if isinstance(trait, type):
warn(
"Traits should be given as instances, not types (for example, `Int()`, not `Int`)."
" Passing types is deprecated in traitlets 4.1.",
DeprecationWarning,
stacklevel=3,
)
- self._trait = trait() if isinstance(trait, type) else trait
- elif trait is not None:
+ self._trait = trait() if isinstance(trait, type) else trait
+ elif trait is not None:
raise TypeError(
"`trait` must be a Trait or None, got %s" % repr_type(trait)
)
-
+
super(Container, self).__init__(klass=self.klass, args=args, **kwargs)
-
- def validate(self, obj, value):
- if isinstance(value, self._cast_types):
- value = self.klass(value)
- value = super(Container, self).validate(obj, value)
- if value is None:
- return value
-
- value = self.validate_elements(obj, value)
-
- return value
-
- def validate_elements(self, obj, value):
- validated = []
- if self._trait is None or isinstance(self._trait, Any):
- return value
- for v in value:
- try:
- v = self._trait._validate(obj, v)
+
+ def validate(self, obj, value):
+ if isinstance(value, self._cast_types):
+ value = self.klass(value)
+ value = super(Container, self).validate(obj, value)
+ if value is None:
+ return value
+
+ value = self.validate_elements(obj, value)
+
+ return value
+
+ def validate_elements(self, obj, value):
+ validated = []
+ if self._trait is None or isinstance(self._trait, Any):
+ return value
+ for v in value:
+ try:
+ v = self._trait._validate(obj, v)
except TraitError as error:
self.error(obj, v, error)
- else:
- validated.append(v)
- return self.klass(validated)
-
- def class_init(self, cls, name):
- if isinstance(self._trait, TraitType):
- self._trait.class_init(cls, None)
- super(Container, self).class_init(cls, name)
-
- def instance_init(self, obj):
- if isinstance(self._trait, TraitType):
- self._trait.instance_init(obj)
- super(Container, self).instance_init(obj)
-
+ else:
+ validated.append(v)
+ return self.klass(validated)
+
+ def class_init(self, cls, name):
+ if isinstance(self._trait, TraitType):
+ self._trait.class_init(cls, None)
+ super(Container, self).class_init(cls, name)
+
+ def instance_init(self, obj):
+ if isinstance(self._trait, TraitType):
+ self._trait.instance_init(obj)
+ super(Container, self).instance_init(obj)
+
def from_string(self, s):
"""Load value from a single string"""
if not isinstance(s, str):
@@ -2539,7 +2539,7 @@ class Container(Instance):
except Exception:
test = None
return self.validate(None, test)
-
+
def from_string_list(self, s_list):
"""Return the value from a list of config strings
@@ -2588,11 +2588,11 @@ class Container(Instance):
return s
-class List(Container):
- """An instance of a Python list."""
- klass = list
- _cast_types = (tuple,)
-
+class List(Container):
+ """An instance of a Python list."""
+ klass = list
+ _cast_types = (tuple,)
+
def __init__(
self,
trait=None,
@@ -2601,64 +2601,64 @@ class List(Container):
maxlen=sys.maxsize,
**kwargs,
):
- """Create a List trait type from a list, set, or tuple.
-
- The default value is created by doing ``list(default_value)``,
- which creates a copy of the ``default_value``.
-
- ``trait`` can be specified, which restricts the type of elements
- in the container to that TraitType.
-
- If only one arg is given and it is not a Trait, it is taken as
- ``default_value``:
-
- ``c = List([1, 2, 3])``
-
- Parameters
- ----------
- trait : TraitType [ optional ]
- the type for restricting the contents of the Container.
- If unspecified, types are not checked.
- default_value : SequenceType [ optional ]
- The default value for the Trait. Must be list/tuple/set, and
- will be cast to the container type.
- minlen : Int [ default 0 ]
- The minimum length of the input list
- maxlen : Int [ default sys.maxsize ]
- The maximum length of the input list
- """
- self._minlen = minlen
- self._maxlen = maxlen
- super(List, self).__init__(trait=trait, default_value=default_value,
+ """Create a List trait type from a list, set, or tuple.
+
+ The default value is created by doing ``list(default_value)``,
+ which creates a copy of the ``default_value``.
+
+ ``trait`` can be specified, which restricts the type of elements
+ in the container to that TraitType.
+
+ If only one arg is given and it is not a Trait, it is taken as
+ ``default_value``:
+
+ ``c = List([1, 2, 3])``
+
+ Parameters
+ ----------
+ trait : TraitType [ optional ]
+ the type for restricting the contents of the Container.
+ If unspecified, types are not checked.
+ default_value : SequenceType [ optional ]
+ The default value for the Trait. Must be list/tuple/set, and
+ will be cast to the container type.
+ minlen : Int [ default 0 ]
+ The minimum length of the input list
+ maxlen : Int [ default sys.maxsize ]
+ The maximum length of the input list
+ """
+ self._minlen = minlen
+ self._maxlen = maxlen
+ super(List, self).__init__(trait=trait, default_value=default_value,
**kwargs)
-
- def length_error(self, obj, value):
- e = "The '%s' trait of %s instance must be of length %i <= L <= %i, but a value of %s was specified." \
- % (self.name, class_of(obj), self._minlen, self._maxlen, value)
- raise TraitError(e)
-
- def validate_elements(self, obj, value):
- length = len(value)
- if length < self._minlen or length > self._maxlen:
- self.length_error(obj, value)
-
- return super(List, self).validate_elements(obj, value)
-
+
+ def length_error(self, obj, value):
+ e = "The '%s' trait of %s instance must be of length %i <= L <= %i, but a value of %s was specified." \
+ % (self.name, class_of(obj), self._minlen, self._maxlen, value)
+ raise TraitError(e)
+
+ def validate_elements(self, obj, value):
+ length = len(value)
+ if length < self._minlen or length > self._maxlen:
+ self.length_error(obj, value)
+
+ return super(List, self).validate_elements(obj, value)
+
def set(self, obj, value):
if isinstance(value, str):
return super().set(obj, [value])
else:
return super().set(obj, value)
-
-class Set(List):
- """An instance of a Python set."""
- klass = set
- _cast_types = (tuple, list)
-
+
+class Set(List):
+ """An instance of a Python set."""
+ klass = set
+ _cast_types = (tuple, list)
+
_literal_from_string_pairs = ("[]", "()", "{}")
- # Redefine __init__ just to make the docstring more accurate.
+ # Redefine __init__ just to make the docstring more accurate.
def __init__(
self,
trait=None,
@@ -2667,84 +2667,84 @@ class Set(List):
maxlen=sys.maxsize,
**kwargs,
):
- """Create a Set trait type from a list, set, or tuple.
-
- The default value is created by doing ``set(default_value)``,
- which creates a copy of the ``default_value``.
-
- ``trait`` can be specified, which restricts the type of elements
- in the container to that TraitType.
-
- If only one arg is given and it is not a Trait, it is taken as
- ``default_value``:
-
- ``c = Set({1, 2, 3})``
-
- Parameters
- ----------
- trait : TraitType [ optional ]
- the type for restricting the contents of the Container.
- If unspecified, types are not checked.
- default_value : SequenceType [ optional ]
- The default value for the Trait. Must be list/tuple/set, and
- will be cast to the container type.
- minlen : Int [ default 0 ]
- The minimum length of the input list
- maxlen : Int [ default sys.maxsize ]
- The maximum length of the input list
- """
+ """Create a Set trait type from a list, set, or tuple.
+
+ The default value is created by doing ``set(default_value)``,
+ which creates a copy of the ``default_value``.
+
+ ``trait`` can be specified, which restricts the type of elements
+ in the container to that TraitType.
+
+ If only one arg is given and it is not a Trait, it is taken as
+ ``default_value``:
+
+ ``c = Set({1, 2, 3})``
+
+ Parameters
+ ----------
+ trait : TraitType [ optional ]
+ the type for restricting the contents of the Container.
+ If unspecified, types are not checked.
+ default_value : SequenceType [ optional ]
+ The default value for the Trait. Must be list/tuple/set, and
+ will be cast to the container type.
+ minlen : Int [ default 0 ]
+ The minimum length of the input list
+ maxlen : Int [ default sys.maxsize ]
+ The maximum length of the input list
+ """
super(Set, self).__init__(trait, default_value, minlen, maxlen, **kwargs)
-
+
def default_value_repr(self):
# Ensure default value is sorted for a reproducible build
list_repr = repr(sorted(self.make_dynamic_default()))
if list_repr == '[]':
return 'set()'
return '{'+list_repr[1:-1]+'}'
-
-class Tuple(Container):
- """An instance of a Python tuple."""
- klass = tuple
- _cast_types = (list,)
-
+class Tuple(Container):
+ """An instance of a Python tuple."""
+
+ klass = tuple
+ _cast_types = (list,)
+
def __init__(self, *traits, **kwargs):
- """Create a tuple from a list, set, or tuple.
-
- Create a fixed-type tuple with Traits:
-
- ``t = Tuple(Int(), Str(), CStr())``
-
- would be length 3, with Int,Str,CStr for each element.
-
- If only one arg is given and it is not a Trait, it is taken as
- default_value:
-
- ``t = Tuple((1, 2, 3))``
-
- Otherwise, ``default_value`` *must* be specified by keyword.
-
- Parameters
- ----------
+ """Create a tuple from a list, set, or tuple.
+
+ Create a fixed-type tuple with Traits:
+
+ ``t = Tuple(Int(), Str(), CStr())``
+
+ would be length 3, with Int,Str,CStr for each element.
+
+ If only one arg is given and it is not a Trait, it is taken as
+ default_value:
+
+ ``t = Tuple((1, 2, 3))``
+
+ Otherwise, ``default_value`` *must* be specified by keyword.
+
+ Parameters
+ ----------
*traits : TraitTypes [ optional ]
- the types for restricting the contents of the Tuple. If unspecified,
- types are not checked. If specified, then each positional argument
- corresponds to an element of the tuple. Tuples defined with traits
- are of fixed length.
- default_value : SequenceType [ optional ]
- The default value for the Tuple. Must be list/tuple/set, and
- will be cast to a tuple. If ``traits`` are specified,
- ``default_value`` must conform to the shape and type they specify.
+ the types for restricting the contents of the Tuple. If unspecified,
+ types are not checked. If specified, then each positional argument
+ corresponds to an element of the tuple. Tuples defined with traits
+ are of fixed length.
+ default_value : SequenceType [ optional ]
+ The default value for the Tuple. Must be list/tuple/set, and
+ will be cast to a tuple. If ``traits`` are specified,
+ ``default_value`` must conform to the shape and type they specify.
**kwargs
Other kwargs passed to `Container`
- """
+ """
default_value = kwargs.pop("default_value", Undefined)
- # allow Tuple((values,)):
- if len(traits) == 1 and default_value is Undefined and not is_trait(traits[0]):
- default_value = traits[0]
- traits = ()
-
+ # allow Tuple((values,)):
+ if len(traits) == 1 and default_value is Undefined and not is_trait(traits[0]):
+ default_value = traits[0]
+ traits = ()
+
if default_value is None and not kwargs.get("allow_none", False):
# improve backward-compatibility for possible subclasses
# specifying default_value=None as default,
@@ -2758,22 +2758,22 @@ class Tuple(Container):
)
default_value = Undefined
- if default_value is Undefined:
- args = ()
+ if default_value is Undefined:
+ args = ()
elif default_value is None:
# default_value back on kwargs for super() to handle
args = ()
kwargs["default_value"] = None
- elif isinstance(default_value, self._valid_defaults):
- args = (default_value,)
- else:
+ elif isinstance(default_value, self._valid_defaults):
+ args = (default_value,)
+ else:
raise TypeError(
"default value of %s was %s" % (self.__class__.__name__, default_value)
)
-
- self._traits = []
- for trait in traits:
- if isinstance(trait, type):
+
+ self._traits = []
+ for trait in traits:
+ if isinstance(trait, type):
warn(
"Traits should be given as instances, not types (for example, `Int()`, not `Int`)"
" Passing types is deprecated in traitlets 4.1.",
@@ -2782,12 +2782,12 @@ class Tuple(Container):
)
trait = trait()
self._traits.append(trait)
-
+
if self._traits and (default_value is None or default_value is Undefined):
- # don't allow default to be an empty container if length is specified
- args = None
+ # don't allow default to be an empty container if length is specified
+ args = None
super(Container, self).__init__(klass=self.klass, args=args, **kwargs)
-
+
def item_from_string(self, s, index):
"""Cast a single item from a string
@@ -2799,41 +2799,41 @@ class Tuple(Container):
return s
return self._traits[index].from_string(s)
- def validate_elements(self, obj, value):
- if not self._traits:
- # nothing to validate
- return value
- if len(value) != len(self._traits):
- e = "The '%s' trait of %s instance requires %i elements, but a value of %s was specified." \
- % (self.name, class_of(obj), len(self._traits), repr_type(value))
- raise TraitError(e)
-
- validated = []
- for t, v in zip(self._traits, value):
- try:
- v = t._validate(obj, v)
+ def validate_elements(self, obj, value):
+ if not self._traits:
+ # nothing to validate
+ return value
+ if len(value) != len(self._traits):
+ e = "The '%s' trait of %s instance requires %i elements, but a value of %s was specified." \
+ % (self.name, class_of(obj), len(self._traits), repr_type(value))
+ raise TraitError(e)
+
+ validated = []
+ for t, v in zip(self._traits, value):
+ try:
+ v = t._validate(obj, v)
except TraitError as error:
self.error(obj, v, error)
- else:
- validated.append(v)
- return tuple(validated)
-
- def class_init(self, cls, name):
- for trait in self._traits:
- if isinstance(trait, TraitType):
- trait.class_init(cls, None)
- super(Container, self).class_init(cls, name)
-
- def instance_init(self, obj):
- for trait in self._traits:
- if isinstance(trait, TraitType):
- trait.instance_init(obj)
- super(Container, self).instance_init(obj)
-
-
-class Dict(Instance):
+ else:
+ validated.append(v)
+ return tuple(validated)
+
+ def class_init(self, cls, name):
+ for trait in self._traits:
+ if isinstance(trait, TraitType):
+ trait.class_init(cls, None)
+ super(Container, self).class_init(cls, name)
+
+ def instance_init(self, obj):
+ for trait in self._traits:
+ if isinstance(trait, TraitType):
+ trait.instance_init(obj)
+ super(Container, self).instance_init(obj)
+
+
+class Dict(Instance):
"""An instance of a Python dict.
-
+
One or more traits can be passed to the constructor
to validate the keys and/or values of the dict.
If you need more detailed validation,
@@ -2851,10 +2851,10 @@ class Dict(Instance):
def __init__(self, value_trait=None, per_key_traits=None, key_trait=None, default_value=Undefined,
**kwargs):
"""Create a dict trait type from a Python dict.
-
- The default value is created by doing ``dict(default_value)``,
- which creates a copy of the ``default_value``.
-
+
+ The default value is created by doing ``dict(default_value)``,
+ which creates a copy of the ``default_value``.
+
Parameters
----------
value_trait : TraitType [ optional ]
@@ -2876,15 +2876,15 @@ class Dict(Instance):
--------
>>> d = Dict(Unicode())
a dict whose values must be text
-
+
>>> d2 = Dict(per_key_traits={"n": Integer(), "s": Unicode()})
d2['n'] must be an integer
d2['s'] must be text
-
+
>>> d3 = Dict(value_trait=Integer(), key_trait=Unicode())
d3's keys must be text
d3's values must be integers
- """
+ """
# handle deprecated keywords
trait = kwargs.pop('trait', None)
@@ -2908,40 +2908,40 @@ class Dict(Instance):
stacklevel=2,
)
- # Handling positional arguments
+ # Handling positional arguments
if default_value is Undefined and value_trait is not None:
if not is_trait(value_trait):
default_value = value_trait
value_trait = None
-
+
if key_trait is None and per_key_traits is not None:
if is_trait(per_key_traits):
key_trait = per_key_traits
per_key_traits = None
- # Handling default value
- if default_value is Undefined:
- default_value = {}
- if default_value is None:
- args = None
- elif isinstance(default_value, dict):
- args = (default_value,)
- elif isinstance(default_value, SequenceTypes):
- args = (default_value,)
- else:
- raise TypeError('default value of Dict was %s' % default_value)
-
- # Case where a type of TraitType is provided rather than an instance
+ # Handling default value
+ if default_value is Undefined:
+ default_value = {}
+ if default_value is None:
+ args = None
+ elif isinstance(default_value, dict):
+ args = (default_value,)
+ elif isinstance(default_value, SequenceTypes):
+ args = (default_value,)
+ else:
+ raise TypeError('default value of Dict was %s' % default_value)
+
+ # Case where a type of TraitType is provided rather than an instance
if is_trait(value_trait):
if isinstance(value_trait, type):
warn("Traits should be given as instances, not types (for example, `Int()`, not `Int`)"
" Passing types is deprecated in traitlets 4.1.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=2)
value_trait = value_trait()
self._value_trait = value_trait
elif value_trait is not None:
raise TypeError("`value_trait` must be a Trait or None, got %s" % repr_type(value_trait))
-
+
if is_trait(key_trait):
if isinstance(key_trait, type):
warn("Traits should be given as instances, not types (for example, `Int()`, not `Int`)"
@@ -2951,32 +2951,32 @@ class Dict(Instance):
self._key_trait = key_trait
elif key_trait is not None:
raise TypeError("`key_trait` must be a Trait or None, got %s" % repr_type(key_trait))
-
+
self._per_key_traits = per_key_traits
super(Dict, self).__init__(klass=dict, args=args, **kwargs)
-
+
def element_error(self, obj, element, validator, side='Values'):
e = side + " of the '%s' trait of %s instance must be %s, but a value of %s was specified." \
- % (self.name, class_of(obj), validator.info(), repr_type(element))
- raise TraitError(e)
-
- def validate(self, obj, value):
- value = super(Dict, self).validate(obj, value)
- if value is None:
- return value
- value = self.validate_elements(obj, value)
- return value
-
- def validate_elements(self, obj, value):
+ % (self.name, class_of(obj), validator.info(), repr_type(element))
+ raise TraitError(e)
+
+ def validate(self, obj, value):
+ value = super(Dict, self).validate(obj, value)
+ if value is None:
+ return value
+ value = self.validate_elements(obj, value)
+ return value
+
+ def validate_elements(self, obj, value):
per_key_override = self._per_key_traits or {}
key_trait = self._key_trait
value_trait = self._value_trait
if not (key_trait or value_trait or per_key_override):
- return value
+ return value
- validated = {}
- for key in value:
+ validated = {}
+ for key in value:
v = value[key]
if key_trait:
try:
@@ -2991,28 +2991,28 @@ class Dict(Instance):
self.element_error(obj, v, active_value_trait, 'Values')
validated[key] = v
- return self.klass(validated)
-
- def class_init(self, cls, name):
+ return self.klass(validated)
+
+ def class_init(self, cls, name):
if isinstance(self._value_trait, TraitType):
self._value_trait.class_init(cls, None)
if isinstance(self._key_trait, TraitType):
self._key_trait.class_init(cls, None)
if self._per_key_traits is not None:
for trait in self._per_key_traits.values():
- trait.class_init(cls, None)
- super(Dict, self).class_init(cls, name)
-
- def instance_init(self, obj):
+ trait.class_init(cls, None)
+ super(Dict, self).class_init(cls, name)
+
+ def instance_init(self, obj):
if isinstance(self._value_trait, TraitType):
self._value_trait.instance_init(obj)
if isinstance(self._key_trait, TraitType):
self._key_trait.instance_init(obj)
if self._per_key_traits is not None:
for trait in self._per_key_traits.values():
- trait.instance_init(obj)
- super(Dict, self).instance_init(obj)
-
+ trait.instance_init(obj)
+ super(Dict, self).instance_init(obj)
+
def from_string(self, s):
"""Load value from a single string"""
if not isinstance(s, str):
@@ -3024,7 +3024,7 @@ class Dict(Instance):
if isinstance(test, dict):
return test
raise
-
+
def from_string_list(self, s_list):
"""Return a dict from a list of config strings.
@@ -3086,24 +3086,24 @@ class Dict(Instance):
return {key: value}
-class TCPAddress(TraitType):
- """A trait for an (ip, port) tuple.
-
- This allows for both IPv4 IP addresses as well as hostnames.
- """
-
- default_value = ('127.0.0.1', 0)
- info_text = 'an (ip, port) tuple'
-
- def validate(self, obj, value):
- if isinstance(value, tuple):
- if len(value) == 2:
+class TCPAddress(TraitType):
+ """A trait for an (ip, port) tuple.
+
+ This allows for both IPv4 IP addresses as well as hostnames.
+ """
+
+ default_value = ('127.0.0.1', 0)
+ info_text = 'an (ip, port) tuple'
+
+ def validate(self, obj, value):
+ if isinstance(value, tuple):
+ if len(value) == 2:
if isinstance(value[0], str) and isinstance(value[1], int):
- port = value[1]
- if port >= 0 and port <= 65535:
- return value
- self.error(obj, value)
-
+ port = value[1]
+ if port >= 0 and port <= 65535:
+ return value
+ self.error(obj, value)
+
def from_string(self, s):
if self.allow_none and s == 'None':
return None
@@ -3114,19 +3114,19 @@ class TCPAddress(TraitType):
return (ip, port)
-class CRegExp(TraitType):
- """A casting compiled regular expression trait.
-
- Accepts both strings and compiled regular expressions. The resulting
- attribute will be a compiled regular expression."""
-
- info_text = 'a regular expression'
-
- def validate(self, obj, value):
- try:
- return re.compile(value)
+class CRegExp(TraitType):
+ """A casting compiled regular expression trait.
+
+ Accepts both strings and compiled regular expressions. The resulting
+ attribute will be a compiled regular expression."""
+
+ info_text = 'a regular expression'
+
+ def validate(self, obj, value):
+ try:
+ return re.compile(value)
except Exception:
- self.error(obj, value)
+ self.error(obj, value)
class UseEnum(TraitType):
diff --git a/contrib/python/traitlets/py3/traitlets/utils/getargspec.py b/contrib/python/traitlets/py3/traitlets/utils/getargspec.py
index 086b21999a..22511437bd 100644
--- a/contrib/python/traitlets/py3/traitlets/utils/getargspec.py
+++ b/contrib/python/traitlets/py3/traitlets/utils/getargspec.py
@@ -1,19 +1,19 @@
-"""
- getargspec excerpted from:
-
- sphinx.util.inspect
- ~~~~~~~~~~~~~~~~~~~
- Helpers for inspecting Python modules.
- :copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import inspect
-
-# Unmodified from sphinx below this line
-
+"""
+ getargspec excerpted from:
+
+ sphinx.util.inspect
+ ~~~~~~~~~~~~~~~~~~~
+ Helpers for inspecting Python modules.
+ :copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import inspect
+
+# Unmodified from sphinx below this line
+
from functools import partial
-
+
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
@@ -29,12 +29,12 @@ def getargspec(func):
args = args[len(func.args):]
for arg in func.keywords or ():
try:
- i = args.index(arg) - len(args)
- del args[i]
- try:
+ i = args.index(arg) - len(args)
+ del args[i]
+ try:
del defaults[i]
- except IndexError:
- pass
+ except IndexError:
+ pass
except ValueError: # must be a kwonly arg
i = kwoargs.index(arg)
del kwoargs[i]
diff --git a/contrib/python/traitlets/py3/traitlets/utils/importstring.py b/contrib/python/traitlets/py3/traitlets/utils/importstring.py
index 6d9e0986cd..defad8f183 100644
--- a/contrib/python/traitlets/py3/traitlets/utils/importstring.py
+++ b/contrib/python/traitlets/py3/traitlets/utils/importstring.py
@@ -1,38 +1,38 @@
-"""
-A simple utility to import something by its string name.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-
-def import_item(name):
- """Import and return ``bar`` given the string ``foo.bar``.
-
- Calling ``bar = import_item("foo.bar")`` is the functional equivalent of
- executing the code ``from foo import bar``.
-
- Parameters
- ----------
- name : string
+"""
+A simple utility to import something by its string name.
+"""
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+def import_item(name):
+ """Import and return ``bar`` given the string ``foo.bar``.
+
+ Calling ``bar = import_item("foo.bar")`` is the functional equivalent of
+ executing the code ``from foo import bar``.
+
+ Parameters
+ ----------
+ name : string
The fully qualified name of the module/package being imported.
-
- Returns
- -------
- mod : module object
+
+ Returns
+ -------
+ mod : module object
The module that was imported.
- """
+ """
if not isinstance(name, str):
- raise TypeError("import_item accepts strings, not '%s'." % type(name))
- parts = name.rsplit('.', 1)
- if len(parts) == 2:
- # called with 'foo.bar....'
- package, obj = parts
- module = __import__(package, fromlist=[obj])
- try:
- pak = getattr(module, obj)
- except AttributeError:
- raise ImportError('No module named %s' % obj)
- return pak
- else:
- # called with un-dotted string
- return __import__(parts[0])
+ raise TypeError("import_item accepts strings, not '%s'." % type(name))
+ parts = name.rsplit('.', 1)
+ if len(parts) == 2:
+ # called with 'foo.bar....'
+ package, obj = parts
+ module = __import__(package, fromlist=[obj])
+ try:
+ pak = getattr(module, obj)
+ except AttributeError:
+ raise ImportError('No module named %s' % obj)
+ return pak
+ else:
+ # called with un-dotted string
+ return __import__(parts[0])
diff --git a/contrib/python/traitlets/py3/traitlets/utils/sentinel.py b/contrib/python/traitlets/py3/traitlets/utils/sentinel.py
index de6b3e508f..0760bec8b5 100644
--- a/contrib/python/traitlets/py3/traitlets/utils/sentinel.py
+++ b/contrib/python/traitlets/py3/traitlets/utils/sentinel.py
@@ -1,20 +1,20 @@
-"""Sentinel class for constants with useful reprs"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
+"""Sentinel class for constants with useful reprs"""
-class Sentinel(object):
-
- def __init__(self, name, module, docstring=None):
- self.name = name
- self.module = module
- if docstring:
- self.__doc__ = docstring
-
- def __repr__(self):
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+
+class Sentinel(object):
+
+ def __init__(self, name, module, docstring=None):
+ self.name = name
+ self.module = module
+ if docstring:
+ self.__doc__ = docstring
+
+ def __repr__(self):
return str(self.module) + '.' + self.name
-
+
def __copy__(self):
return self
diff --git a/contrib/python/traitlets/py3/ya.make b/contrib/python/traitlets/py3/ya.make
index be00a1bd39..46980f21b3 100644
--- a/contrib/python/traitlets/py3/ya.make
+++ b/contrib/python/traitlets/py3/ya.make
@@ -1,49 +1,49 @@
# Generated by devtools/yamaker (pypi).
PY3_LIBRARY()
-
+
PROVIDES(python_traitlets)
OWNER(borman nslus g:python-contrib)
-
+
VERSION(5.1.1)
LICENSE(BSD-3-Clause)
NO_LINT()
-
-PY_SRCS(
- TOP_LEVEL
- traitlets/__init__.py
- traitlets/_version.py
- traitlets/config/__init__.py
- traitlets/config/application.py
- traitlets/config/configurable.py
- traitlets/config/loader.py
- traitlets/config/manager.py
+
+PY_SRCS(
+ TOP_LEVEL
+ traitlets/__init__.py
+ traitlets/_version.py
+ traitlets/config/__init__.py
+ traitlets/config/application.py
+ traitlets/config/configurable.py
+ traitlets/config/loader.py
+ traitlets/config/manager.py
traitlets/config/sphinxdoc.py
- traitlets/log.py
+ traitlets/log.py
traitlets/tests/__init__.py
traitlets/tests/_warnings.py
traitlets/tests/utils.py
- traitlets/traitlets.py
- traitlets/utils/__init__.py
+ traitlets/traitlets.py
+ traitlets/utils/__init__.py
traitlets/utils/bunch.py
traitlets/utils/decorators.py
traitlets/utils/descriptions.py
- traitlets/utils/getargspec.py
- traitlets/utils/importstring.py
- traitlets/utils/sentinel.py
+ traitlets/utils/getargspec.py
+ traitlets/utils/importstring.py
+ traitlets/utils/sentinel.py
traitlets/utils/text.py
-)
-
+)
+
RESOURCE_FILES(
PREFIX contrib/python/traitlets/py3/
.dist-info/METADATA
.dist-info/top_level.txt
)
-END()
+END()
RECURSE_FOR_TESTS(
tests
diff --git a/contrib/python/traitlets/ya.make b/contrib/python/traitlets/ya.make
index 934eb39823..3156aae8c5 100644
--- a/contrib/python/traitlets/ya.make
+++ b/contrib/python/traitlets/ya.make
@@ -1,5 +1,5 @@
PY23_LIBRARY()
-
+
LICENSE(Service-Py23-Proxy)
OWNER(g:python-contrib)
@@ -12,7 +12,7 @@ ENDIF()
NO_LINT()
-END()
+END()
RECURSE(
py2
diff --git a/contrib/python/ya.make b/contrib/python/ya.make
index fcbdd423b0..d01ced9f3a 100644
--- a/contrib/python/ya.make
+++ b/contrib/python/ya.make
@@ -1,4 +1,4 @@
-OWNER(g:contrib g:python-contrib)
+OWNER(g:contrib g:python-contrib)
RECURSE(
absl-py
@@ -37,7 +37,7 @@ RECURSE(
aiosmtplib
aiosocksy
aiosqlite
- aiotg
+ aiotg
aiounittest
aiozipkin
aiozk
@@ -149,7 +149,7 @@ RECURSE(
cherrypy-cors
ciso8601
clang
- clang/example
+ clang/example
cli-helpers
click
click-didyoumean
@@ -161,12 +161,12 @@ RECURSE(
cloudpickle
color
colorama
- coloredlogs
+ coloredlogs
colorhash
colorlog
commoncode
commonmark
- ConfigArgParse
+ ConfigArgParse
configobj
configparser
confluent-kafka
@@ -356,7 +356,7 @@ RECURSE(
fakeredis
falcon
falcon-cors
- falcon-multipart
+ falcon-multipart
fallocate
fancycompleter
fastapi
@@ -551,7 +551,7 @@ RECURSE(
jmespath
json-rpc
json2html
- jsondiff
+ jsondiff
jsonfield
jsonobject
jsonpath-rw
@@ -640,7 +640,7 @@ RECURSE(
mpi4py
mpmath
msal
- msgpack
+ msgpack
mujson
multidict
multitasking
@@ -736,7 +736,7 @@ RECURSE(
pexpect
pgcli
PGPy
- pgspecial
+ pgspecial
phonenumbers
pickleshare
pika
@@ -801,8 +801,8 @@ RECURSE(
pycparser
pycrypto
pycryptodome
- pycurl
- pycurl/example
+ pycurl
+ pycurl/example
pydantic
pydash
PyDispatcher
@@ -824,7 +824,7 @@ RECURSE(
pyjavaproperties
PyJWT
pykdtree
- pyketama
+ pyketama
pylev
pylint
pylxd
@@ -840,7 +840,7 @@ RECURSE(
pynetbox
pyodbc
pyOpenSSL
- pyparsing
+ pyparsing
PyPDF2
pyperclip
PyPika
@@ -987,7 +987,7 @@ RECURSE(
semver
Send2Trash
sentinels
- sentry-sdk
+ sentry-sdk
service-identity
setproctitle
setuptools
@@ -1097,7 +1097,7 @@ RECURSE(
typed-ast
typeguard
typer
- typing
+ typing
typing-extensions
typing-inspect
tzlocal
@@ -1119,31 +1119,31 @@ RECURSE(
user-agents
uvicorn
uvloop
- uwsgi
- uwsgi/bin
- uwsgi/examples
+ uwsgi
+ uwsgi/bin
+ uwsgi/examples
uwsgiconf
- validators
+ validators
validr
- vcrpy
+ vcrpy
viberbot
- vine
+ vine
visitor
- voluptuous
+ voluptuous
w3lib
waitress
walrus
Wand
wasabi
- watchdog
+ watchdog
watchgod
- wcwidth
+ wcwidth
webargs
webauthn
webcolors
webencodings
WebOb
- websocket-client
+ websocket-client
websockets
webstruct
WebTest
@@ -1154,31 +1154,31 @@ RECURSE(
whitenoise
whodap
wmctrl
- wrapt
- ws4py
+ wrapt
+ ws4py
wsgi-intercept
- wsgi-profiler
+ wsgi-profiler
wsproto
wtf-peewee
- WTForms
- WTForms-JSON
- wurlitzer
+ WTForms
+ WTForms-JSON
+ wurlitzer
xhtml2pdf
xlrd
- XlsxWriter
+ XlsxWriter
xlutils
- xlwt
+ xlwt
xmlsec
- xmltodict
+ xmltodict
xxhash
yandex-pgmigrate
- yappi
- yarl
+ yappi
+ yarl
yfinance
youtube-dl
yoyo-migrations
yt-dlp
- zake
+ zake
zeep
zero-downtime-migrations
zope.event
@@ -1186,27 +1186,27 @@ RECURSE(
zope.schema
zstandard
)
-
+
IF (OS_WINDOWS)
RECURSE(
win_unicode_console
)
ENDIF()
-IF (OS_DARWIN)
- RECURSE(
- appnope
- )
-ENDIF ()
-
-IF (OS_LINUX)
- RECURSE(
- pyroute2
- )
-
+IF (OS_DARWIN)
+ RECURSE(
+ appnope
+ )
+ENDIF ()
+
+IF (OS_LINUX)
+ RECURSE(
+ pyroute2
+ )
+
IF (OS_SDK != "ubuntu-12")
- RECURSE(
- cysystemd
- )
- ENDIF()
-ENDIF ()
+ RECURSE(
+ cysystemd
+ )
+ ENDIF()
+ENDIF ()